Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: use dev_warn_once() in CS parsers

Older GPUs did not support memory protection, so the kernel
driver would validate the command submissions (CS) from userspace
to avoid the GPU accessing any memory it shouldn't.

Change any error messages in that validation to dev_warn_once() to
avoid spamming the kernel log in the event of a bad CS. If users
see any of these messages they should report them to the user space
component, which in most cases is mesa
(https://gitlab.freedesktop.org/mesa/mesa/-/issues).

Cc: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20250829171655.GBaLHgh3VOvuM1UfJg@fat_crate.local
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

+650 -635
+264 -259
drivers/gpu/drm/radeon/evergreen_cs.c
··· 951 951 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 952 952 (u64)track->vgt_strmout_size[i]; 953 953 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 954 - DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", 955 - i, offset, 956 - radeon_bo_size(track->vgt_strmout_bo[i])); 954 + dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n", 955 + i, offset, 956 + radeon_bo_size(track->vgt_strmout_bo[i])); 957 957 return -EINVAL; 958 958 } 959 959 } else { 960 - dev_warn(p->dev, "No buffer for streamout %d\n", i); 960 + dev_warn_once(p->dev, "No buffer for streamout %d\n", i); 961 961 return -EINVAL; 962 962 } 963 963 } ··· 979 979 (tmp >> (i * 4)) & 0xF) { 980 980 /* at least one component is enabled */ 981 981 if (track->cb_color_bo[i] == NULL) { 982 - dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 983 - __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 982 + dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 983 + __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 984 984 return -EINVAL; 985 985 } 986 986 /* check cb */ ··· 1056 1056 case EVERGREEN_VLINE_START_END: 1057 1057 r = evergreen_cs_packet_parse_vline(p); 1058 1058 if (r) { 1059 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1060 - idx, reg); 1059 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1060 + idx, reg); 1061 1061 return r; 1062 1062 } 1063 1063 break; ··· 1143 1143 case SQ_VSTMP_RING_BASE: 1144 1144 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1145 1145 if (r) { 1146 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1147 - "0x%04X\n", reg); 1146 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1147 + "0x%04X\n", reg); 1148 1148 return -EINVAL; 1149 1149 } 1150 1150 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1155 1155 break; 1156 1156 case CAYMAN_DB_EQAA: 1157 1157 if (p->rdev->family < CHIP_CAYMAN) { 1158 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1159 - "0x%04X\n", reg); 1158 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1159 + "0x%04X\n", reg); 1160 1160 return -EINVAL; 1161 1161 } 1162 1162 break; 1163 1163 case CAYMAN_DB_DEPTH_INFO: 1164 1164 if (p->rdev->family < CHIP_CAYMAN) { 1165 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1166 - "0x%04X\n", reg); 1165 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1166 + "0x%04X\n", reg); 1167 1167 return -EINVAL; 1168 1168 } 1169 1169 break; ··· 1172 1172 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1173 1173 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1174 1174 if (r) { 1175 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1176 - "0x%04X\n", reg); 1175 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1176 + "0x%04X\n", reg); 1177 1177 return -EINVAL; 1178 1178 } 1179 1179 ib[idx] &= ~Z_ARRAY_MODE(0xf); ··· 1214 1214 case DB_Z_READ_BASE: 1215 1215 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1216 1216 if (r) { 1217 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1218 - "0x%04X\n", reg); 1217 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1218 + "0x%04X\n", reg); 1219 1219 return -EINVAL; 1220 1220 } 1221 1221 track->db_z_read_offset = radeon_get_ib_value(p, idx); ··· 1226 1226 case DB_Z_WRITE_BASE: 1227 1227 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1228 1228 if (r) { 1229 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1230 - "0x%04X\n", reg); 1229 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1230 + "0x%04X\n", reg); 1231 1231 return -EINVAL; 1232 1232 } 1233 1233 track->db_z_write_offset = radeon_get_ib_value(p, idx); ··· 1238 1238 case DB_STENCIL_READ_BASE: 1239 1239 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1240 1240 if (r) { 1241 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1242 - "0x%04X\n", reg); 1241 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1242 + "0x%04X\n", reg); 1243 1243 return -EINVAL; 1244 1244 } 1245 1245 track->db_s_read_offset = radeon_get_ib_value(p, idx); ··· 1250 1250 case DB_STENCIL_WRITE_BASE: 1251 1251 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1252 1252 if (r) { 1253 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1254 - "0x%04X\n", reg); 1253 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1254 + "0x%04X\n", reg); 1255 1255 return -EINVAL; 1256 1256 } 1257 1257 track->db_s_write_offset = radeon_get_ib_value(p, idx); ··· 1273 1273 case VGT_STRMOUT_BUFFER_BASE_3: 1274 1274 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1275 1275 if (r) { 1276 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1277 - "0x%04X\n", reg); 1276 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1277 + "0x%04X\n", reg); 1278 1278 return -EINVAL; 1279 1279 } 1280 1280 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; ··· 1295 1295 case CP_COHER_BASE: 1296 1296 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1297 1297 if (r) { 1298 - dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1299 - "0x%04X\n", reg); 1298 + dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE " 1299 + "0x%04X\n", reg); 1300 1300 return -EINVAL; 1301 1301 } 1302 1302 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1311 1311 break; 1312 1312 case PA_SC_AA_CONFIG: 1313 1313 if (p->rdev->family >= CHIP_CAYMAN) { 1314 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1315 - "0x%04X\n", reg); 1314 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1315 + "0x%04X\n", reg); 1316 1316 return -EINVAL; 1317 1317 } 1318 1318 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; ··· 1320 1320 break; 1321 1321 case CAYMAN_PA_SC_AA_CONFIG: 1322 1322 if (p->rdev->family < CHIP_CAYMAN) { 1323 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1324 - "0x%04X\n", reg); 1323 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1324 + "0x%04X\n", reg); 1325 1325 return -EINVAL; 1326 1326 } 1327 1327 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK; ··· 1360 1360 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1361 1361 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1362 1362 if (r) { 1363 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1364 - "0x%04X\n", reg); 1363 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1364 + "0x%04X\n", reg); 1365 1365 return -EINVAL; 1366 1366 } 1367 1367 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); ··· 1378 1378 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1379 1379 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1380 1380 if (r) { 1381 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1382 - "0x%04X\n", reg); 1381 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1382 + "0x%04X\n", reg); 1383 1383 return -EINVAL; 1384 1384 } 1385 1385 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); ··· 1439 1439 case CB_COLOR7_ATTRIB: 1440 1440 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1441 1441 if (r) { 1442 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1443 - "0x%04X\n", reg); 1442 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1443 + "0x%04X\n", reg); 1444 1444 return -EINVAL; 1445 1445 } 1446 1446 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { ··· 1467 1467 case CB_COLOR11_ATTRIB: 1468 1468 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1469 1469 if (r) { 1470 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1471 - "0x%04X\n", reg); 1470 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1471 + "0x%04X\n", reg); 1472 1472 return -EINVAL; 1473 1473 } 1474 1474 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { ··· 1555 1555 case CB_COLOR7_BASE: 1556 1556 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1557 1557 if (r) { 1558 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1559 - "0x%04X\n", reg); 1558 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1559 + "0x%04X\n", reg); 1560 1560 return -EINVAL; 1561 1561 } 1562 1562 tmp = (reg - CB_COLOR0_BASE) / 0x3c; ··· 1571 1571 case CB_COLOR11_BASE: 1572 1572 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1573 1573 if (r) { 1574 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1575 - "0x%04X\n", reg); 1574 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1575 + "0x%04X\n", reg); 1576 1576 return -EINVAL; 1577 1577 } 1578 1578 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; ··· 1584 1584 case DB_HTILE_DATA_BASE: 1585 1585 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1586 1586 if (r) { 1587 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1588 - "0x%04X\n", reg); 1587 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1588 + "0x%04X\n", reg); 1589 1589 return -EINVAL; 1590 1590 } 1591 1591 track->htile_offset = radeon_get_ib_value(p, idx); ··· 1702 1702 case SQ_ALU_CONST_CACHE_LS_15: 1703 1703 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1704 1704 if (r) { 1705 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1706 - "0x%04X\n", reg); 1705 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1706 + "0x%04X\n", reg); 1707 1707 return -EINVAL; 1708 1708 } 1709 1709 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1710 1710 break; 1711 1711 case SX_MEMORY_EXPORT_BASE: 1712 1712 if (p->rdev->family >= CHIP_CAYMAN) { 1713 - dev_warn(p->dev, "bad SET_CONFIG_REG " 1714 - "0x%04X\n", reg); 1713 + dev_warn_once(p->dev, "bad SET_CONFIG_REG " 1714 + "0x%04X\n", reg); 1715 1715 return -EINVAL; 1716 1716 } 1717 1717 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1718 1718 if (r) { 1719 - dev_warn(p->dev, "bad SET_CONFIG_REG " 1720 - "0x%04X\n", reg); 1719 + dev_warn_once(p->dev, "bad SET_CONFIG_REG " 1720 + "0x%04X\n", reg); 1721 1721 return -EINVAL; 1722 1722 } 1723 1723 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); 1724 1724 break; 1725 1725 case CAYMAN_SX_SCATTER_EXPORT_BASE: 1726 1726 if (p->rdev->family < CHIP_CAYMAN) { 1727 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1728 - "0x%04X\n", reg); 1727 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1728 + "0x%04X\n", reg); 1729 1729 return -EINVAL; 1730 1730 } 1731 1731 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1732 1732 if (r) { 1733 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1734 - "0x%04X\n", reg); 1733 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1734 + "0x%04X\n", reg); 1735 1735 return -EINVAL; 1736 1736 } 1737 1737 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1740 1740 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1741 1741 break; 1742 1742 default: 1743 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1743 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1744 1744 return -EINVAL; 1745 1745 } 1746 1746 return 0; ··· 1795 1795 uint64_t offset; 1796 1796 1797 1797 if (pkt->count != 1) { 1798 - DRM_ERROR("bad SET PREDICATION\n"); 1798 + dev_warn_once(p->dev, "bad SET PREDICATION\n"); 1799 1799 return -EINVAL; 1800 1800 } 1801 1801 ··· 1807 1807 return 0; 1808 1808 1809 1809 if (pred_op > 2) { 1810 - DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1810 + dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op); 1811 1811 return -EINVAL; 1812 1812 } 1813 1813 1814 1814 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1815 1815 if (r) { 1816 - DRM_ERROR("bad SET PREDICATION\n"); 1816 + dev_warn_once(p->dev, "bad SET PREDICATION\n"); 1817 1817 return -EINVAL; 1818 1818 } 1819 1819 ··· 1827 1827 break; 1828 1828 case PACKET3_CONTEXT_CONTROL: 1829 1829 if (pkt->count != 1) { 1830 - DRM_ERROR("bad CONTEXT_CONTROL\n"); 1830 + dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n"); 1831 1831 return -EINVAL; 1832 1832 } 1833 1833 break; ··· 1835 1835 case PACKET3_NUM_INSTANCES: 1836 1836 case PACKET3_CLEAR_STATE: 1837 1837 if (pkt->count) { 1838 - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); 1838 + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); 1839 1839 return -EINVAL; 1840 1840 } 1841 1841 break; 1842 1842 case CAYMAN_PACKET3_DEALLOC_STATE: 1843 1843 if (p->rdev->family < CHIP_CAYMAN) { 1844 - DRM_ERROR("bad PACKET3_DEALLOC_STATE\n"); 1844 + dev_warn_once(p->dev, "bad PACKET3_DEALLOC_STATE\n"); 1845 1845 return -EINVAL; 1846 1846 } 1847 1847 if (pkt->count) { 1848 - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); 1848 + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); 1849 1849 return -EINVAL; 1850 1850 } 1851 1851 break; ··· 1854 1854 uint64_t offset; 1855 1855 1856 1856 if (pkt->count != 1) { 1857 - DRM_ERROR("bad INDEX_BASE\n"); 1857 + dev_warn_once(p->dev, "bad INDEX_BASE\n"); 1858 1858 return -EINVAL; 1859 1859 } 1860 1860 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1861 1861 if (r) { 1862 - DRM_ERROR("bad INDEX_BASE\n"); 1862 + dev_warn_once(p->dev, "bad INDEX_BASE\n"); 1863 1863 return -EINVAL; 1864 1864 } 1865 1865 ··· 1872 1872 1873 1873 r = evergreen_cs_track_check(p); 1874 1874 if (r) { 1875 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1875 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1876 1876 return r; 1877 1877 } 1878 1878 break; ··· 1880 1880 case PACKET3_INDEX_BUFFER_SIZE: 1881 1881 { 1882 1882 if (pkt->count != 0) { 1883 - DRM_ERROR("bad INDEX_BUFFER_SIZE\n"); 1883 + dev_warn_once(p->dev, "bad INDEX_BUFFER_SIZE\n"); 1884 1884 return -EINVAL; 1885 1885 } 1886 1886 break; ··· 1889 1889 { 1890 1890 uint64_t offset; 1891 1891 if (pkt->count != 3) { 1892 - DRM_ERROR("bad DRAW_INDEX\n"); 1892 + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); 1893 1893 return -EINVAL; 1894 1894 } 1895 1895 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1896 1896 if (r) { 1897 - DRM_ERROR("bad DRAW_INDEX\n"); 1897 + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); 1898 1898 return -EINVAL; 1899 1899 } 1900 1900 ··· 1907 1907 1908 1908 r = evergreen_cs_track_check(p); 1909 1909 if (r) { 1910 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1910 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1911 1911 return r; 1912 1912 } 1913 1913 break; ··· 1917 1917 uint64_t offset; 1918 1918 1919 1919 if (pkt->count != 4) { 1920 - DRM_ERROR("bad DRAW_INDEX_2\n"); 1920 + dev_warn_once(p->dev, "bad DRAW_INDEX_2\n"); 1921 1921 return -EINVAL; 1922 1922 } 1923 1923 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1924 1924 if (r) { 1925 - DRM_ERROR("bad DRAW_INDEX_2\n"); 1925 + dev_warn_once(p->dev, "bad DRAW_INDEX_2\n"); 1926 1926 return -EINVAL; 1927 1927 } 1928 1928 ··· 1935 1935 1936 1936 r = evergreen_cs_track_check(p); 1937 1937 if (r) { 1938 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1938 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1939 1939 return r; 1940 1940 } 1941 1941 break; 1942 1942 } 1943 1943 case PACKET3_DRAW_INDEX_AUTO: 1944 1944 if (pkt->count != 1) { 1945 - DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1945 + dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n"); 1946 1946 return -EINVAL; 1947 1947 } 1948 1948 r = evergreen_cs_track_check(p); 1949 1949 if (r) { 1950 - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1950 + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1951 1951 return r; 1952 1952 } 1953 1953 break; 1954 1954 case PACKET3_DRAW_INDEX_MULTI_AUTO: 1955 1955 if (pkt->count != 2) { 1956 - DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); 1956 + dev_warn_once(p->dev, "bad DRAW_INDEX_MULTI_AUTO\n"); 1957 1957 return -EINVAL; 1958 1958 } 1959 1959 r = evergreen_cs_track_check(p); 1960 1960 if (r) { 1961 - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1961 + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1962 1962 return r; 1963 1963 } 1964 1964 break; 1965 1965 case PACKET3_DRAW_INDEX_IMMD: 1966 1966 if (pkt->count < 2) { 1967 - DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1967 + dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n"); 1968 1968 return -EINVAL; 1969 1969 } 1970 1970 r = evergreen_cs_track_check(p); 1971 1971 if (r) { 1972 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1972 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1973 1973 return r; 1974 1974 } 1975 1975 break; 1976 1976 case PACKET3_DRAW_INDEX_OFFSET: 1977 1977 if (pkt->count != 2) { 1978 - DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); 1978 + dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET\n"); 1979 1979 return -EINVAL; 1980 1980 } 1981 1981 r = evergreen_cs_track_check(p); 1982 1982 if (r) { 1983 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1983 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1984 1984 return r; 1985 1985 } 1986 1986 break; 1987 1987 case PACKET3_DRAW_INDEX_OFFSET_2: 1988 1988 if (pkt->count != 3) { 1989 - DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); 1989 + dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET_2\n"); 1990 1990 return -EINVAL; 1991 1991 } 1992 1992 r = evergreen_cs_track_check(p); 1993 1993 if (r) { 1994 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1994 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1995 1995 return r; 1996 1996 } 1997 1997 break; ··· 2005 2005 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32] 2006 2006 */ 2007 2007 if (pkt->count != 2) { 2008 - DRM_ERROR("bad SET_BASE\n"); 2008 + dev_warn_once(p->dev, "bad SET_BASE\n"); 2009 2009 return -EINVAL; 2010 2010 } 2011 2011 2012 2012 /* currently only supporting setting indirect draw buffer base address */ 2013 2013 if (idx_value != 1) { 2014 - DRM_ERROR("bad SET_BASE\n"); 2014 + dev_warn_once(p->dev, "bad SET_BASE\n"); 2015 2015 return -EINVAL; 2016 2016 } 2017 2017 2018 2018 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2019 2019 if (r) { 2020 - DRM_ERROR("bad SET_BASE\n"); 2020 + dev_warn_once(p->dev, "bad SET_BASE\n"); 2021 2021 return -EINVAL; 2022 2022 } 2023 2023 ··· 2039 2039 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context 2040 2040 */ 2041 2041 if (pkt->count != 1) { 2042 - DRM_ERROR("bad DRAW_INDIRECT\n"); 2042 + dev_warn_once(p->dev, "bad DRAW_INDIRECT\n"); 2043 2043 return -EINVAL; 2044 2044 } 2045 2045 2046 2046 if (idx_value + size > track->indirect_draw_buffer_size) { 2047 - dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", 2048 - idx_value, size, track->indirect_draw_buffer_size); 2047 + dev_warn_once(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", 2048 + idx_value, size, track->indirect_draw_buffer_size); 2049 2049 return -EINVAL; 2050 2050 } 2051 2051 2052 2052 r = evergreen_cs_track_check(p); 2053 2053 if (r) { 2054 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 2054 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 2055 2055 return r; 2056 2056 } 2057 2057 break; 2058 2058 } 2059 2059 case PACKET3_DISPATCH_DIRECT: 2060 2060 if (pkt->count != 3) { 2061 - DRM_ERROR("bad DISPATCH_DIRECT\n"); 2061 + dev_warn_once(p->dev, "bad DISPATCH_DIRECT\n"); 2062 2062 return -EINVAL; 2063 2063 } 2064 2064 r = evergreen_cs_track_check(p); 2065 2065 if (r) { 2066 - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 2066 + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 2067 2067 return r; 2068 2068 } 2069 2069 break; 2070 2070 case PACKET3_DISPATCH_INDIRECT: 2071 2071 if (pkt->count != 1) { 2072 - DRM_ERROR("bad DISPATCH_INDIRECT\n"); 2072 + dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n"); 2073 2073 return -EINVAL; 2074 2074 } 2075 2075 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2076 2076 if (r) { 2077 - DRM_ERROR("bad DISPATCH_INDIRECT\n"); 2077 + dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n"); 2078 2078 return -EINVAL; 2079 2079 } 2080 2080 ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff); 2081 2081 r = evergreen_cs_track_check(p); 2082 2082 if (r) { 2083 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 2083 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 2084 2084 return r; 2085 2085 } 2086 2086 break; 2087 2087 case PACKET3_WAIT_REG_MEM: 2088 2088 if (pkt->count != 5) { 2089 - DRM_ERROR("bad WAIT_REG_MEM\n"); 2089 + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); 2090 2090 return -EINVAL; 2091 2091 } 2092 2092 /* bit 4 is reg (0) or mem (1) */ ··· 2095 2095 2096 2096 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2097 2097 if (r) { 2098 - DRM_ERROR("bad WAIT_REG_MEM\n"); 2098 + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); 2099 2099 return -EINVAL; 2100 2100 } 2101 2101 ··· 2106 2106 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); 2107 2107 ib[idx+2] = upper_32_bits(offset) & 0xff; 2108 2108 } else if (idx_value & 0x100) { 2109 - DRM_ERROR("cannot use PFP on REG wait\n"); 2109 + dev_warn_once(p->dev, "cannot use PFP on REG wait\n"); 2110 2110 return -EINVAL; 2111 2111 } 2112 2112 break; ··· 2115 2115 u32 command, size, info; 2116 2116 u64 offset, tmp; 2117 2117 if (pkt->count != 4) { 2118 - DRM_ERROR("bad CP DMA\n"); 2118 + dev_warn_once(p->dev, "bad CP DMA\n"); 2119 2119 return -EINVAL; 2120 2120 } 2121 2121 command = radeon_get_ib_value(p, idx+4); ··· 2129 2129 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ 2130 2130 /* non mem to mem copies requires dw aligned count */ 2131 2131 if (size % 4) { 2132 - DRM_ERROR("CP DMA command requires dw count alignment\n"); 2132 + dev_warn_once(p->dev, "CP DMA command requires dw count alignment\n"); 2133 2133 return -EINVAL; 2134 2134 } 2135 2135 } ··· 2137 2137 /* src address space is register */ 2138 2138 /* GDS is ok */ 2139 2139 if (((info & 0x60000000) >> 29) != 1) { 2140 - DRM_ERROR("CP DMA SAS not supported\n"); 2140 + dev_warn_once(p->dev, "CP DMA SAS not supported\n"); 2141 2141 return -EINVAL; 2142 2142 } 2143 2143 } else { 2144 2144 if (command & PACKET3_CP_DMA_CMD_SAIC) { 2145 - DRM_ERROR("CP DMA SAIC only supported for registers\n"); 2145 + dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n"); 2146 2146 return -EINVAL; 2147 2147 } 2148 2148 /* src address space is memory */ 2149 2149 if (((info & 0x60000000) >> 29) == 0) { 2150 2150 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2151 2151 if (r) { 2152 - DRM_ERROR("bad CP DMA SRC\n"); 2152 + dev_warn_once(p->dev, "bad CP DMA SRC\n"); 2153 2153 return -EINVAL; 2154 2154 } 2155 2155 ··· 2159 2159 offset = reloc->gpu_offset + tmp; 2160 2160 2161 2161 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 2162 - dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 2163 - tmp + size, radeon_bo_size(reloc->robj)); 2162 + dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 2163 + tmp + size, radeon_bo_size(reloc->robj)); 2164 2164 return -EINVAL; 2165 2165 } 2166 2166 2167 2167 ib[idx] = offset; 2168 2168 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 2169 2169 } else if (((info & 0x60000000) >> 29) != 2) { 2170 - DRM_ERROR("bad CP DMA SRC_SEL\n"); 2170 + dev_warn_once(p->dev, "bad CP DMA SRC_SEL\n"); 2171 2171 return -EINVAL; 2172 2172 } 2173 2173 } ··· 2175 2175 /* dst address space is register */ 2176 2176 /* GDS is ok */ 2177 2177 if (((info & 0x00300000) >> 20) != 1) { 2178 - DRM_ERROR("CP DMA DAS not supported\n"); 2178 + dev_warn_once(p->dev, "CP DMA DAS not supported\n"); 2179 2179 return -EINVAL; 2180 2180 } 2181 2181 } else { 2182 2182 /* dst address space is memory */ 2183 2183 if (command & PACKET3_CP_DMA_CMD_DAIC) { 2184 - DRM_ERROR("CP DMA DAIC only supported for registers\n"); 2184 + dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n"); 2185 2185 return -EINVAL; 2186 2186 } 2187 2187 if (((info & 0x00300000) >> 20) == 0) { 2188 2188 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2189 2189 if (r) { 2190 - DRM_ERROR("bad CP DMA DST\n"); 2190 + dev_warn_once(p->dev, "bad CP DMA DST\n"); 2191 2191 return -EINVAL; 2192 2192 } 2193 2193 ··· 2197 2197 offset = reloc->gpu_offset + tmp; 2198 2198 2199 2199 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 2200 - dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 2201 - tmp + size, radeon_bo_size(reloc->robj)); 2200 + dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 2201 + tmp + size, radeon_bo_size(reloc->robj)); 2202 2202 return -EINVAL; 2203 2203 } 2204 2204 2205 2205 ib[idx+2] = offset; 2206 2206 ib[idx+3] = upper_32_bits(offset) & 0xff; 2207 2207 } else { 2208 - DRM_ERROR("bad CP DMA DST_SEL\n"); 2208 + dev_warn_once(p->dev, "bad CP DMA DST_SEL\n"); 2209 2209 return -EINVAL; 2210 2210 } 2211 2211 } ··· 2213 2213 } 2214 2214 case PACKET3_PFP_SYNC_ME: 2215 2215 if (pkt->count) { 2216 - DRM_ERROR("bad PFP_SYNC_ME\n"); 2216 + dev_warn_once(p->dev, "bad PFP_SYNC_ME\n"); 2217 2217 return -EINVAL; 2218 2218 } 2219 2219 break; 2220 2220 case PACKET3_SURFACE_SYNC: 2221 2221 if (pkt->count != 3) { 2222 - DRM_ERROR("bad SURFACE_SYNC\n"); 2222 + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); 2223 2223 return -EINVAL; 2224 2224 } 2225 2225 /* 0xffffffff/0x0 is flush all cache flag */ ··· 2227 2227 radeon_get_ib_value(p, idx + 2) != 0) { 2228 2228 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2229 2229 if (r) { 2230 - DRM_ERROR("bad SURFACE_SYNC\n"); 2230 + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); 2231 2231 return -EINVAL; 2232 2232 } 2233 2233 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 2235 2235 break; 2236 2236 case PACKET3_EVENT_WRITE: 2237 2237 if (pkt->count != 2 && pkt->count != 0) { 2238 - DRM_ERROR("bad EVENT_WRITE\n"); 2238 + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 2239 2239 return -EINVAL; 2240 2240 } 2241 2241 if (pkt->count) { ··· 2243 2243 2244 2244 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2245 2245 if (r) { 2246 - DRM_ERROR("bad EVENT_WRITE\n"); 2246 + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 2247 2247 return -EINVAL; 2248 2248 } 2249 2249 offset = reloc->gpu_offset + ··· 2259 2259 uint64_t offset; 2260 2260 2261 2261 if (pkt->count != 4) { 2262 - DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2262 + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); 2263 2263 return -EINVAL; 2264 2264 } 2265 2265 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2266 2266 if (r) { 2267 - DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2267 + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); 2268 2268 return -EINVAL; 2269 2269 } 2270 2270 ··· 2281 2281 uint64_t offset; 2282 2282 2283 2283 if (pkt->count != 3) { 2284 - DRM_ERROR("bad EVENT_WRITE_EOS\n"); 2284 + dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n"); 2285 2285 return -EINVAL; 2286 2286 } 2287 2287 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2288 2288 if (r) { 2289 - DRM_ERROR("bad EVENT_WRITE_EOS\n"); 2289 + dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n"); 2290 2290 return -EINVAL; 2291 2291 } 2292 2292 ··· 2304 2304 if ((start_reg < PACKET3_SET_CONFIG_REG_START) || 2305 2305 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 2306 2306 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 2307 - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 2307 + dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n"); 2308 2308 return -EINVAL; 2309 2309 } 2310 2310 for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { ··· 2321 2321 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || 2322 2322 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 2323 2323 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 2324 - DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 2324 + dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n"); 2325 2325 return -EINVAL; 2326 2326 } 2327 2327 for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { ··· 2334 2334 break; 2335 2335 case PACKET3_SET_RESOURCE: 2336 2336 if (pkt->count % 8) { 2337 - DRM_ERROR("bad SET_RESOURCE\n"); 2337 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2338 2338 return -EINVAL; 2339 2339 } 2340 2340 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; ··· 2342 2342 if ((start_reg < PACKET3_SET_RESOURCE_START) || 2343 2343 (start_reg >= PACKET3_SET_RESOURCE_END) || 2344 2344 (end_reg >= PACKET3_SET_RESOURCE_END)) { 2345 - DRM_ERROR("bad SET_RESOURCE\n"); 2345 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2346 2346 return -EINVAL; 2347 2347 } 2348 2348 for (i = 0; i < (pkt->count / 8); i++) { ··· 2355 2355 /* tex base */ 2356 2356 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2357 2357 if (r) { 2358 - DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2358 + dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n"); 2359 2359 return -EINVAL; 2360 2360 } 2361 2361 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { ··· 2392 2392 } else { 2393 2393 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2394 2394 if (r) { 2395 - DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2395 + dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n"); 2396 2396 return -EINVAL; 2397 2397 } 2398 2398 moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 2411 2411 /* vtx base */ 2412 2412 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2413 2413 if (r) { 2414 - DRM_ERROR("bad SET_RESOURCE (vtx)\n"); 2414 + dev_warn_once(p->dev, "bad SET_RESOURCE (vtx)\n"); 2415 2415 return -EINVAL; 2416 2416 } 2417 2417 offset = radeon_get_ib_value(p, idx+1+(i*8)+0); 2418 2418 size = radeon_get_ib_value(p, idx+1+(i*8)+1); 2419 2419 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2420 2420 /* force size to size of the buffer */ 2421 - dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n"); 2421 + dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2422 + size + offset, radeon_bo_size(reloc->robj)); 2422 2423 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; 2423 2424 } 2424 2425 ··· 2432 2431 case SQ_TEX_VTX_INVALID_TEXTURE: 2433 2432 case SQ_TEX_VTX_INVALID_BUFFER: 2434 2433 default: 2435 - DRM_ERROR("bad SET_RESOURCE\n"); 2434 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2436 2435 return -EINVAL; 2437 2436 } 2438 2437 } ··· 2446 2445 if ((start_reg < PACKET3_SET_BOOL_CONST_START) || 2447 2446 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2448 2447 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2449 - DRM_ERROR("bad SET_BOOL_CONST\n"); 2448 + dev_warn_once(p->dev, "bad SET_BOOL_CONST\n"); 2450 2449 return -EINVAL; 2451 2450 } 2452 2451 break; ··· 2456 2455 if ((start_reg < PACKET3_SET_LOOP_CONST_START) || 2457 2456 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2458 2457 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2459 - DRM_ERROR("bad SET_LOOP_CONST\n"); 2458 + dev_warn_once(p->dev, "bad SET_LOOP_CONST\n"); 2460 2459 return -EINVAL; 2461 2460 } 2462 2461 break; ··· 2466 2465 if ((start_reg < PACKET3_SET_CTL_CONST_START) || 2467 2466 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2468 2467 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2469 - DRM_ERROR("bad SET_CTL_CONST\n"); 2468 + dev_warn_once(p->dev, "bad SET_CTL_CONST\n"); 2470 2469 return -EINVAL; 2471 2470 } 2472 2471 break; 2473 2472 case PACKET3_SET_SAMPLER: 2474 2473 if (pkt->count % 3) { 2475 - DRM_ERROR("bad SET_SAMPLER\n"); 2474 + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); 2476 2475 return -EINVAL; 2477 2476 } 2478 2477 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; ··· 2480 2479 if ((start_reg < PACKET3_SET_SAMPLER_START) || 2481 2480 (start_reg >= PACKET3_SET_SAMPLER_END) || 2482 2481 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2483 - DRM_ERROR("bad SET_SAMPLER\n"); 2482 + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); 2484 2483 return -EINVAL; 2485 2484 } 2486 2485 break; 2487 2486 case PACKET3_STRMOUT_BUFFER_UPDATE: 2488 2487 if (pkt->count != 4) { 2489 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2488 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2490 2489 return -EINVAL; 2491 2490 } 2492 2491 /* Updating memory at DST_ADDRESS. */ ··· 2494 2493 u64 offset; 2495 2494 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2496 2495 if (r) { 2497 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2496 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2498 2497 return -EINVAL; 2499 2498 } 2500 2499 offset = radeon_get_ib_value(p, idx+1); 2501 2500 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2502 2501 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2503 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", 2504 - offset + 4, radeon_bo_size(reloc->robj)); 2502 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", 2503 + offset + 4, radeon_bo_size(reloc->robj)); 2505 2504 return -EINVAL; 2506 2505 } 2507 2506 offset += reloc->gpu_offset; ··· 2513 2512 u64 offset; 2514 2513 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2515 2514 if (r) { 2516 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2515 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2517 2516 return -EINVAL; 2518 2517 } 2519 2518 offset = radeon_get_ib_value(p, idx+3); 2520 2519 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2521 2520 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2522 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", 2523 - offset + 4, radeon_bo_size(reloc->robj)); 2521 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", 2522 + offset + 4, radeon_bo_size(reloc->robj)); 2524 2523 return -EINVAL; 2525 2524 } 2526 2525 offset += reloc->gpu_offset; ··· 2533 2532 u64 offset; 2534 2533 2535 2534 if (pkt->count != 3) { 2536 - DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2535 + dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n"); 2537 2536 return -EINVAL; 2538 2537 } 2539 2538 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2540 2539 if (r) { 2541 - DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2540 + dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n"); 2542 2541 return -EINVAL; 2543 2542 } 2544 2543 offset = radeon_get_ib_value(p, idx+0); 2545 2544 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2546 2545 if (offset & 0x7) { 2547 - DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); 2546 + dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n"); 2548 2547 return -EINVAL; 2549 2548 } 2550 2549 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2551 - DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", 2552 - offset + 8, radeon_bo_size(reloc->robj)); 2550 + dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", 2551 + offset + 8, radeon_bo_size(reloc->robj)); 2553 2552 return -EINVAL; 2554 2553 } 2555 2554 offset += reloc->gpu_offset; ··· 2559 2558 } 2560 2559 case PACKET3_COPY_DW: 2561 2560 if (pkt->count != 4) { 2562 - DRM_ERROR("bad COPY_DW (invalid count)\n"); 2561 + dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n"); 2563 2562 return -EINVAL; 2564 2563 } 2565 2564 if (idx_value & 0x1) { ··· 2567 2566 /* SRC is memory. */ 2568 2567 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2569 2568 if (r) { 2570 - DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2569 + dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n"); 2571 2570 return -EINVAL; 2572 2571 } 2573 2572 offset = radeon_get_ib_value(p, idx+1); 2574 2573 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2575 2574 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2576 - DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", 2577 - offset + 4, radeon_bo_size(reloc->robj)); 2575 + dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", 2576 + offset + 4, radeon_bo_size(reloc->robj)); 2578 2577 return -EINVAL; 2579 2578 } 2580 2579 offset += reloc->gpu_offset; ··· 2584 2583 /* SRC is a reg. */ 2585 2584 reg = radeon_get_ib_value(p, idx+1) << 2; 2586 2585 if (!evergreen_is_safe_reg(p, reg)) { 2587 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", 2588 - reg, idx + 1); 2586 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", 2587 + reg, idx + 1); 2589 2588 return -EINVAL; 2590 2589 } 2591 2590 } ··· 2594 2593 /* DST is memory. */ 2595 2594 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2596 2595 if (r) { 2597 - DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2596 + dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n"); 2598 2597 return -EINVAL; 2599 2598 } 2600 2599 offset = radeon_get_ib_value(p, idx+3); 2601 2600 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2602 2601 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2603 - DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", 2604 - offset + 4, radeon_bo_size(reloc->robj)); 2602 + dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", 2603 + offset + 4, radeon_bo_size(reloc->robj)); 2605 2604 return -EINVAL; 2606 2605 } 2607 2606 offset += reloc->gpu_offset; ··· 2611 2610 /* DST is a reg. */ 2612 2611 reg = radeon_get_ib_value(p, idx+3) << 2; 2613 2612 if (!evergreen_is_safe_reg(p, reg)) { 2614 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", 2615 - reg, idx + 3); 2613 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", 2614 + reg, idx + 3); 2616 2615 return -EINVAL; 2617 2616 } 2618 2617 } ··· 2623 2622 uint32_t allowed_reg_base; 2624 2623 uint32_t source_sel; 2625 2624 if (pkt->count != 2) { 2626 - DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n"); 2625 + dev_warn_once(p->dev, "bad SET_APPEND_CNT (invalid count)\n"); 2627 2626 return -EINVAL; 2628 2627 } 2629 2628 ··· 2633 2632 2634 2633 areg = idx_value >> 16; 2635 2634 if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) { 2636 - dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n", 2637 - areg, idx); 2635 + dev_warn_once(p->dev, "forbidden register for append cnt 0x%08x at %d\n", 2636 + areg, idx); 2638 2637 return -EINVAL; 2639 2638 } 2640 2639 ··· 2644 2643 uint32_t swap; 2645 2644 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2646 2645 if (r) { 2647 - DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n"); 2646 + dev_warn_once(p->dev, "bad SET_APPEND_CNT (missing reloc)\n"); 2648 2647 return -EINVAL; 2649 2648 } 2650 2649 offset = radeon_get_ib_value(p, idx + 1); ··· 2657 2656 ib[idx+1] = (offset & 0xfffffffc) | swap; 2658 2657 ib[idx+2] = upper_32_bits(offset) & 0xff; 2659 2658 } else { 2660 - DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n"); 2659 + dev_warn_once(p->dev, "bad SET_APPEND_CNT (unsupported operation)\n"); 2661 2660 return -EINVAL; 2662 2661 } 2663 2662 break; ··· 2667 2666 u64 offset; 2668 2667 2669 2668 if (pkt->count != 2) { 2670 - DRM_ERROR("bad COND_EXEC (invalid count)\n"); 2669 + dev_warn_once(p->dev, "bad COND_EXEC (invalid count)\n"); 2671 2670 return -EINVAL; 2672 2671 } 2673 2672 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2674 2673 if (r) { 2675 - DRM_ERROR("bad COND_EXEC (missing reloc)\n"); 2674 + dev_warn_once(p->dev, "bad COND_EXEC (missing reloc)\n"); 2676 2675 return -EINVAL; 2677 2676 } 2678 2677 offset = radeon_get_ib_value(p, idx + 0); 2679 2678 offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL; 2680 2679 if (offset & 0x7) { 2681 - DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n"); 2680 + dev_warn_once(p->dev, "bad COND_EXEC (address not qwords aligned)\n"); 2682 2681 return -EINVAL; 2683 2682 } 2684 2683 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2685 - DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n", 2686 - offset + 8, radeon_bo_size(reloc->robj)); 2684 + dev_warn_once(p->dev, "bad COND_EXEC bo too small: 0x%llx, 0x%lx\n", 2685 + offset + 8, radeon_bo_size(reloc->robj)); 2687 2686 return -EINVAL; 2688 2687 } 2689 2688 offset += reloc->gpu_offset; ··· 2693 2692 } 2694 2693 case PACKET3_COND_WRITE: 2695 2694 if (pkt->count != 7) { 2696 - DRM_ERROR("bad COND_WRITE (invalid count)\n"); 2695 + dev_warn_once(p->dev, "bad COND_WRITE (invalid count)\n"); 2697 2696 return -EINVAL; 2698 2697 } 2699 2698 if (idx_value & 0x10) { ··· 2701 2700 /* POLL is memory. */ 2702 2701 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2703 2702 if (r) { 2704 - DRM_ERROR("bad COND_WRITE (missing src reloc)\n"); 2703 + dev_warn_once(p->dev, "bad COND_WRITE (missing src reloc)\n"); 2705 2704 return -EINVAL; 2706 2705 } 2707 2706 offset = radeon_get_ib_value(p, idx + 1); 2708 2707 offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32; 2709 2708 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2710 - DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n", 2711 - offset + 8, radeon_bo_size(reloc->robj)); 2709 + dev_warn_once(p->dev, "bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n", 2710 + offset + 8, radeon_bo_size(reloc->robj)); 2712 2711 return -EINVAL; 2713 2712 } 2714 2713 offset += reloc->gpu_offset; ··· 2718 2717 /* POLL is a reg. */ 2719 2718 reg = radeon_get_ib_value(p, idx + 1) << 2; 2720 2719 if (!evergreen_is_safe_reg(p, reg)) { 2721 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", 2722 - reg, idx + 1); 2720 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", 2721 + reg, idx + 1); 2723 2722 return -EINVAL; 2724 2723 } 2725 2724 } ··· 2728 2727 /* WRITE is memory. */ 2729 2728 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 2730 2729 if (r) { 2731 - DRM_ERROR("bad COND_WRITE (missing dst reloc)\n"); 2730 + dev_warn_once(p->dev, "bad COND_WRITE (missing dst reloc)\n"); 2732 2731 return -EINVAL; 2733 2732 } 2734 2733 offset = radeon_get_ib_value(p, idx + 5); 2735 2734 offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32; 2736 2735 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2737 - DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n", 2738 - offset + 8, radeon_bo_size(reloc->robj)); 2736 + dev_warn_once(p->dev, "bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n", 2737 + offset + 8, radeon_bo_size(reloc->robj)); 2739 2738 return -EINVAL; 2740 2739 } 2741 2740 offset += reloc->gpu_offset; ··· 2745 2744 /* WRITE is a reg. */ 2746 2745 reg = radeon_get_ib_value(p, idx + 5) << 2; 2747 2746 if (!evergreen_is_safe_reg(p, reg)) { 2748 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", 2749 - reg, idx + 5); 2747 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", 2748 + reg, idx + 5); 2750 2749 return -EINVAL; 2751 2750 } 2752 2751 } ··· 2754 2753 case PACKET3_NOP: 2755 2754 break; 2756 2755 default: 2757 - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2756 + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); 2758 2757 return -EINVAL; 2759 2758 } 2760 2759 return 0; ··· 2854 2853 r = evergreen_packet3_check(p, &pkt); 2855 2854 break; 2856 2855 default: 2857 - DRM_ERROR("Unknown packet type %d !\n", pkt.type); 2856 + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); 2858 2857 kfree(p->track); 2859 2858 p->track = NULL; 2860 2859 return -EINVAL; ··· 2897 2896 2898 2897 do { 2899 2898 if (p->idx >= ib_chunk->length_dw) { 2900 - DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 2901 - p->idx, ib_chunk->length_dw); 2899 + dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n", 2900 + p->idx, ib_chunk->length_dw); 2902 2901 return -EINVAL; 2903 2902 } 2904 2903 idx = p->idx; ··· 2911 2910 case DMA_PACKET_WRITE: 2912 2911 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2913 2912 if (r) { 2914 - DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2913 + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); 2915 2914 return -EINVAL; 2916 2915 } 2917 2916 switch (sub_cmd) { ··· 2933 2932 p->idx += count + 3; 2934 2933 break; 2935 2934 default: 2936 - DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header); 2935 + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header); 2937 2936 return -EINVAL; 2938 2937 } 2939 2938 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2940 - dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", 2941 - dst_offset, radeon_bo_size(dst_reloc->robj)); 2939 + dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n", 2940 + dst_offset, radeon_bo_size(dst_reloc->robj)); 2942 2941 return -EINVAL; 2943 2942 } 2944 2943 break; 2945 2944 case DMA_PACKET_COPY: 2946 2945 r = r600_dma_cs_next_reloc(p, &src_reloc); 2947 2946 if (r) { 2948 - DRM_ERROR("bad DMA_PACKET_COPY\n"); 2947 + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); 2949 2948 return -EINVAL; 2950 2949 } 2951 2950 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2952 2951 if (r) { 2953 - DRM_ERROR("bad DMA_PACKET_COPY\n"); 2952 + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); 2954 2953 return -EINVAL; 2955 2954 } 2956 2955 switch (sub_cmd) { ··· 2962 2961 dst_offset = radeon_get_ib_value(p, idx+1); 2963 2962 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2964 2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2965 - dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", 2966 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2964 + dev_warn_once(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", 2965 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2967 2966 return -EINVAL; 2968 2967 } 2969 2968 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2970 - dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n", 2971 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2969 + dev_warn_once(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n", 2970 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2972 2971 return -EINVAL; 2973 2972 } 2974 2973 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ··· 3002 3001 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); 3003 3002 } 3004 3003 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3005 - dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", 3006 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3004 + dev_warn_once(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", 3005 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3007 3006 return -EINVAL; 3008 3007 } 3009 3008 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3010 - dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n", 3011 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3009 + dev_warn_once(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n", 3010 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3012 3011 return -EINVAL; 3013 3012 } 3014 3013 p->idx += 9; ··· 3021 3020 dst_offset = radeon_get_ib_value(p, idx+1); 3022 3021 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 3023 3022 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { 3024 - dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", 3025 - src_offset + count, radeon_bo_size(src_reloc->robj)); 3023 + dev_warn_once(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", 3024 + src_offset + count, radeon_bo_size(src_reloc->robj)); 3026 3025 return -EINVAL; 3027 3026 } 3028 3027 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) { 3029 - dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", 3030 - dst_offset + count, radeon_bo_size(dst_reloc->robj)); 3028 + dev_warn_once(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", 3029 + dst_offset + count, radeon_bo_size(dst_reloc->robj)); 3031 3030 return -EINVAL; 3032 3031 } 3033 3032 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff); ··· 3040 3039 case 0x41: 3041 3040 /* L2L, partial */ 3042 3041 if (p->family < CHIP_CAYMAN) { 3043 - DRM_ERROR("L2L Partial is cayman only !\n"); 3042 + dev_warn_once(p->dev, "L2L Partial is cayman only !\n"); 3044 3043 return -EINVAL; 3045 3044 } 3046 3045 ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff); ··· 3055 3054 /* L2L, dw, broadcast */ 3056 3055 r = r600_dma_cs_next_reloc(p, &dst2_reloc); 3057 3056 if (r) { 3058 - DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); 3057 + dev_warn_once(p->dev, "bad L2L, dw, broadcast DMA_PACKET_COPY\n"); 3059 3058 return -EINVAL; 3060 3059 } 3061 3060 dst_offset = radeon_get_ib_value(p, idx+1); ··· 3065 3064 src_offset = radeon_get_ib_value(p, idx+3); 3066 3065 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 3067 3066 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3068 - dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", 3069 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3067 + dev_warn_once(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", 3068 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3070 3069 return -EINVAL; 3071 3070 } 3072 3071 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3073 - dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", 3074 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3072 + dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", 3073 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3075 3074 return -EINVAL; 3076 3075 } 3077 3076 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { 3078 - dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", 3079 - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3077 + dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", 3078 + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3080 3079 return -EINVAL; 3081 3080 } 3082 3081 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ··· 3090 3089 /* Copy L2T Frame to Field */ 3091 3090 case 0x48: 3092 3091 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { 3093 - DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); 3092 + dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n"); 3094 3093 return -EINVAL; 3095 3094 } 3096 3095 r = r600_dma_cs_next_reloc(p, &dst2_reloc); 3097 3096 if (r) { 3098 - DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); 3097 + dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n"); 3099 3098 return -EINVAL; 3100 3099 } 3101 3100 dst_offset = radeon_get_ib_value(p, idx+1); ··· 3105 3104 src_offset = radeon_get_ib_value(p, idx+8); 3106 3105 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; 3107 3106 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3108 - dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", 3109 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3107 + dev_warn_once(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", 3108 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3110 3109 return -EINVAL; 3111 3110 } 3112 3111 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3113 - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", 3114 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3112 + dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", 3113 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3115 3114 return -EINVAL; 3116 3115 } 3117 3116 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { 3118 - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", 3119 - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3117 + dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", 3118 + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3120 3119 return -EINVAL; 3121 3120 } 3122 3121 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); ··· 3129 3128 case 0x49: 3130 3129 /* L2T, T2L partial */ 3131 3130 if (p->family < CHIP_CAYMAN) { 3132 - DRM_ERROR("L2T, T2L Partial is cayman only !\n"); 3131 + dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n"); 3133 3132 return -EINVAL; 3134 3133 } 3135 3134 /* detile bit */ ··· 3152 3151 case 0x4b: 3153 3152 /* L2T, broadcast */ 3154 3153 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { 3155 - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3154 + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); 3156 3155 return -EINVAL; 3157 3156 } 3158 3157 r = r600_dma_cs_next_reloc(p, &dst2_reloc); 3159 3158 if (r) { 3160 - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3159 + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); 3161 3160 return -EINVAL; 3162 3161 } 3163 3162 dst_offset = radeon_get_ib_value(p, idx+1); ··· 3167 3166 src_offset = radeon_get_ib_value(p, idx+8); 3168 3167 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; 3169 3168 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3170 - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3171 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3169 + dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3170 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3172 3171 return -EINVAL; 3173 3172 } 3174 3173 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3175 - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", 3176 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3174 + dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", 3175 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3177 3176 return -EINVAL; 3178 3177 } 3179 3178 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { 3180 - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", 3181 - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3179 + dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", 3180 + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3182 3181 return -EINVAL; 3183 3182 } 3184 3183 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); ··· 3213 3212 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); 3214 3213 } 3215 3214 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3216 - dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", 3217 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3215 + dev_warn_once(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", 3216 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3218 3217 return -EINVAL; 3219 3218 } 3220 3219 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3221 - dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n", 3222 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3220 + dev_warn_once(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n", 3221 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3223 3222 return -EINVAL; 3224 3223 } 3225 3224 p->idx += 9; ··· 3228 3227 case 0x4d: 3229 3228 /* T2T partial */ 3230 3229 if (p->family < CHIP_CAYMAN) { 3231 - DRM_ERROR("L2T, T2L Partial is cayman only !\n"); 3230 + dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n"); 3232 3231 return -EINVAL; 3233 3232 } 3234 3233 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); ··· 3239 3238 case 0x4f: 3240 3239 /* L2T, broadcast */ 3241 3240 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { 3242 - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3241 + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); 3243 3242 return -EINVAL; 3244 3243 } 3245 3244 r = r600_dma_cs_next_reloc(p, &dst2_reloc); 3246 3245 if (r) { 3247 - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); 3246 + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); 3248 3247 return -EINVAL; 3249 3248 } 3250 3249 dst_offset = radeon_get_ib_value(p, idx+1); ··· 3254 3253 src_offset = radeon_get_ib_value(p, idx+8); 3255 3254 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; 3256 3255 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3257 - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3258 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3256 + dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", 3257 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 3259 3258 return -EINVAL; 3260 3259 } 3261 3260 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3262 - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", 3263 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3261 + dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", 3262 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 3264 3263 return -EINVAL; 3265 3264 } 3266 3265 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { 3267 - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", 3268 - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3266 + dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", 3267 + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 3269 3268 return -EINVAL; 3270 3269 } 3271 3270 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); ··· 3275 3274 p->idx += 10; 3276 3275 break; 3277 3276 default: 3278 - DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header); 3277 + dev_warn_once(p->dev, "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header); 3279 3278 return -EINVAL; 3280 3279 } 3281 3280 break; 3282 3281 case DMA_PACKET_CONSTANT_FILL: 3283 3282 r = r600_dma_cs_next_reloc(p, &dst_reloc); 3284 3283 if (r) { 3285 - DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); 3284 + dev_warn_once(p->dev, "bad DMA_PACKET_CONSTANT_FILL\n"); 3286 3285 return -EINVAL; 3287 3286 } 3288 3287 dst_offset = radeon_get_ib_value(p, idx+1); 3289 3288 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 3290 3289 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 3291 - dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 3292 - dst_offset, radeon_bo_size(dst_reloc->robj)); 3290 + dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 3291 + dst_offset, radeon_bo_size(dst_reloc->robj)); 3293 3292 return -EINVAL; 3294 3293 } 3295 3294 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ··· 3300 3299 p->idx += 1; 3301 3300 break; 3302 3301 default: 3303 - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 3302 + dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx); 3304 3303 return -EINVAL; 3305 3304 } 3306 3305 } while (p->idx < p->chunk_ib->length_dw); ··· 3431 3430 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: 3432 3431 return true; 3433 3432 default: 3434 - DRM_ERROR("Invalid register 0x%x in CS\n", reg); 3433 + DRM_DEBUG("Invalid register 0x%x in CS\n", reg); 3435 3434 return false; 3436 3435 } 3437 3436 } ··· 3449 3448 break; 3450 3449 case PACKET3_SET_BASE: 3451 3450 if (idx_value != 1) { 3452 - DRM_ERROR("bad SET_BASE"); 3451 + dev_warn_once(rdev->dev, "bad SET_BASE"); 3453 3452 return -EINVAL; 3454 3453 } 3455 3454 break; ··· 3520 3519 if ((start_reg < PACKET3_SET_CONFIG_REG_START) || 3521 3520 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 3522 3521 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 3523 - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 3522 + dev_warn_once(rdev->dev, "bad PACKET3_SET_CONFIG_REG\n"); 3524 3523 return -EINVAL; 3525 3524 } 3526 3525 for (i = 0; i < pkt->count; i++) { ··· 3540 3539 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ 3541 3540 /* non mem to mem copies requires dw aligned count */ 3542 3541 if ((command & 0x1fffff) % 4) { 3543 - DRM_ERROR("CP DMA command requires dw count alignment\n"); 3542 + dev_warn_once(rdev->dev, "CP DMA command requires dw count alignment\n"); 3544 3543 return -EINVAL; 3545 3544 } 3546 3545 } ··· 3551 3550 if (command & PACKET3_CP_DMA_CMD_SAIC) { 3552 3551 reg = start_reg; 3553 3552 if (!evergreen_vm_reg_valid(reg)) { 3554 - DRM_ERROR("CP DMA Bad SRC register\n"); 3553 + dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n"); 3555 3554 return -EINVAL; 3556 3555 } 3557 3556 } else { 3558 3557 for (i = 0; i < (command & 0x1fffff); i++) { 3559 3558 reg = start_reg + (4 * i); 3560 3559 if (!evergreen_vm_reg_valid(reg)) { 3561 - DRM_ERROR("CP DMA Bad SRC register\n"); 3560 + dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n"); 3562 3561 return -EINVAL; 3563 3562 } 3564 3563 } ··· 3572 3571 if (command & PACKET3_CP_DMA_CMD_DAIC) { 3573 3572 reg = start_reg; 3574 3573 if (!evergreen_vm_reg_valid(reg)) { 3575 - DRM_ERROR("CP DMA Bad DST register\n"); 3574 + dev_warn_once(rdev->dev, "CP DMA Bad DST register\n"); 3576 3575 return -EINVAL; 3577 3576 } 3578 3577 } else { 3579 3578 for (i = 0; i < (command & 0x1fffff); i++) { 3580 3579 reg = start_reg + (4 * i); 3581 3580 if (!evergreen_vm_reg_valid(reg)) { 3582 - DRM_ERROR("CP DMA Bad DST register\n"); 3581 + dev_warn_once(rdev->dev, "CP DMA Bad DST register\n"); 3583 3582 return -EINVAL; 3584 3583 } 3585 3584 } ··· 3592 3591 uint32_t allowed_reg_base; 3593 3592 3594 3593 if (pkt->count != 2) { 3595 - DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n"); 3594 + dev_warn_once(rdev->dev, "bad SET_APPEND_CNT (invalid count)\n"); 3596 3595 return -EINVAL; 3597 3596 } 3598 3597 ··· 3602 3601 3603 3602 areg = idx_value >> 16; 3604 3603 if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) { 3605 - DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n", 3606 - areg, idx); 3604 + dev_warn_once(rdev->dev, "forbidden register for append cnt 0x%08x at %d\n", 3605 + areg, idx); 3607 3606 return -EINVAL; 3608 3607 } 3609 3608 break; ··· 3682 3681 idx += count + 3; 3683 3682 break; 3684 3683 default: 3685 - DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]); 3684 + dev_warn_once(rdev->dev, 3685 + "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", 3686 + idx, ib->ptr[idx]); 3686 3687 return -EINVAL; 3687 3688 } 3688 3689 break; ··· 3735 3732 idx += 10; 3736 3733 break; 3737 3734 default: 3738 - DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]); 3735 + dev_warn_once(rdev->dev, 3736 + "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", 3737 + idx, ib->ptr[idx]); 3739 3738 return -EINVAL; 3740 3739 } 3741 3740 break; ··· 3748 3743 idx += 1; 3749 3744 break; 3750 3745 default: 3751 - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 3746 + dev_warn_once(rdev->dev, "Unknown packet type %d at %d !\n", cmd, idx); 3752 3747 return -EINVAL; 3753 3748 } 3754 3749 } while (idx < ib->length_dw);
+108 -107
drivers/gpu/drm/radeon/r100.c
··· 1298 1298 1299 1299 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1300 1300 if (r) { 1301 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1302 - idx, reg); 1301 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1302 + idx, reg); 1303 1303 radeon_cs_dump_packet(p, pkt); 1304 1304 return r; 1305 1305 } ··· 1313 1313 tile_flags |= RADEON_DST_TILE_MACRO; 1314 1314 if (reloc->tiling_flags & RADEON_TILING_MICRO) { 1315 1315 if (reg == RADEON_SRC_PITCH_OFFSET) { 1316 - DRM_ERROR("Cannot src blit from microtiled surface\n"); 1316 + dev_warn_once(p->dev, "Cannot src blit from microtiled surface\n"); 1317 1317 radeon_cs_dump_packet(p, pkt); 1318 1318 return -EINVAL; 1319 1319 } ··· 1342 1342 track = (struct r100_cs_track *)p->track; 1343 1343 c = radeon_get_ib_value(p, idx++) & 0x1F; 1344 1344 if (c > 16) { 1345 - DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1346 - pkt->opcode); 1345 + dev_warn_once(p->dev, "Only 16 vertex buffers are allowed %d\n", 1346 + pkt->opcode); 1347 1347 radeon_cs_dump_packet(p, pkt); 1348 1348 return -EINVAL; 1349 1349 } ··· 1351 1351 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1352 1352 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1353 1353 if (r) { 1354 - DRM_ERROR("No reloc for packet3 %d\n", 1355 - pkt->opcode); 1354 + dev_warn_once(p->dev, "No reloc for packet3 %d\n", 1355 + pkt->opcode); 1356 1356 radeon_cs_dump_packet(p, pkt); 1357 1357 return r; 1358 1358 } ··· 1364 1364 track->arrays[i + 0].esize &= 0x7F; 1365 1365 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1366 1366 if (r) { 1367 - DRM_ERROR("No reloc for packet3 %d\n", 1368 - pkt->opcode); 1367 + dev_warn_once(p->dev, "No reloc for packet3 %d\n", 1368 + pkt->opcode); 1369 1369 radeon_cs_dump_packet(p, pkt); 1370 1370 return r; 1371 1371 } ··· 1377 1377 if (c & 1) { 1378 1378 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1379 1379 if (r) { 1380 - DRM_ERROR("No reloc for packet3 %d\n", 1381 - pkt->opcode); 1380 + dev_warn_once(p->dev, "No reloc for packet3 %d\n", 1381 + pkt->opcode); 1382 1382 radeon_cs_dump_packet(p, pkt); 1383 1383 return r; 1384 1384 } ··· 1470 1470 /* check its a wait until and only 1 count */ 1471 1471 if (waitreloc.reg != RADEON_WAIT_UNTIL || 1472 1472 waitreloc.count != 0) { 1473 - DRM_ERROR("vline wait had illegal wait until segment\n"); 1473 + dev_warn_once(p->dev, "vline wait had illegal wait until segment\n"); 1474 1474 return -EINVAL; 1475 1475 } 1476 1476 1477 1477 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { 1478 - DRM_ERROR("vline wait had illegal wait until\n"); 1478 + dev_warn_once(p->dev, "vline wait had illegal wait until\n"); 1479 1479 return -EINVAL; 1480 1480 } 1481 1481 ··· 1493 1493 reg = R100_CP_PACKET0_GET_REG(header); 1494 1494 crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id); 1495 1495 if (!crtc) { 1496 - DRM_ERROR("cannot find crtc %d\n", crtc_id); 1496 + dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id); 1497 1497 return -ENOENT; 1498 1498 } 1499 1499 radeon_crtc = to_radeon_crtc(crtc); ··· 1514 1514 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1515 1515 break; 1516 1516 default: 1517 - DRM_ERROR("unknown crtc reloc\n"); 1517 + dev_warn_once(p->dev, "unknown crtc reloc\n"); 1518 1518 return -EINVAL; 1519 1519 } 1520 1520 ib[h_idx] = header; ··· 1599 1599 case RADEON_CRTC_GUI_TRIG_VLINE: 1600 1600 r = r100_cs_packet_parse_vline(p); 1601 1601 if (r) { 1602 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1602 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1603 1603 idx, reg); 1604 1604 radeon_cs_dump_packet(p, pkt); 1605 1605 return r; ··· 1616 1616 case RADEON_RB3D_DEPTHOFFSET: 1617 1617 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1618 1618 if (r) { 1619 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1620 - idx, reg); 1619 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1620 + idx, reg); 1621 1621 radeon_cs_dump_packet(p, pkt); 1622 1622 return r; 1623 1623 } ··· 1629 1629 case RADEON_RB3D_COLOROFFSET: 1630 1630 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1631 1631 if (r) { 1632 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1633 - idx, reg); 1632 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1633 + idx, reg); 1634 1634 radeon_cs_dump_packet(p, pkt); 1635 1635 return r; 1636 1636 } ··· 1645 1645 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1646 1646 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1647 1647 if (r) { 1648 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1649 - idx, reg); 1648 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1649 + idx, reg); 1650 1650 radeon_cs_dump_packet(p, pkt); 1651 1651 return r; 1652 1652 } ··· 1672 1672 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1673 1673 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1674 1674 if (r) { 1675 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1676 - idx, reg); 1675 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1676 + idx, reg); 1677 1677 radeon_cs_dump_packet(p, pkt); 1678 1678 return r; 1679 1679 } ··· 1690 1690 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1691 1691 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1692 1692 if (r) { 1693 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1694 - idx, reg); 1693 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1694 + idx, reg); 1695 1695 radeon_cs_dump_packet(p, pkt); 1696 1696 return r; 1697 1697 } ··· 1708 1708 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1709 1709 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1710 1710 if (r) { 1711 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1712 - idx, reg); 1711 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1712 + idx, reg); 1713 1713 radeon_cs_dump_packet(p, pkt); 1714 1714 return r; 1715 1715 } ··· 1726 1726 case RADEON_RB3D_COLORPITCH: 1727 1727 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1728 1728 if (r) { 1729 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1730 - idx, reg); 1729 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1730 + idx, reg); 1731 1731 radeon_cs_dump_packet(p, pkt); 1732 1732 return r; 1733 1733 } ··· 1768 1768 track->cb[0].cpp = 4; 1769 1769 break; 1770 1770 default: 1771 - DRM_ERROR("Invalid color buffer format (%d) !\n", 1772 - ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1771 + dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n", 1772 + ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1773 1773 return -EINVAL; 1774 1774 } 1775 1775 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); ··· 1797 1797 case RADEON_RB3D_ZPASS_ADDR: 1798 1798 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1799 1799 if (r) { 1800 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1801 - idx, reg); 1800 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1801 + idx, reg); 1802 1802 radeon_cs_dump_packet(p, pkt); 1803 1803 return r; 1804 1804 } ··· 1927 1927 idx = pkt->idx + 1; 1928 1928 value = radeon_get_ib_value(p, idx + 2); 1929 1929 if ((value + 1) > radeon_bo_size(robj)) { 1930 - DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1931 - "(need %u have %lu) !\n", 1932 - value + 1, 1933 - radeon_bo_size(robj)); 1930 + dev_warn_once(p->dev, "[drm] Buffer too small for PACKET3 INDX_BUFFER " 1931 + "(need %u have %lu) !\n", 1932 + value + 1, 1933 + radeon_bo_size(robj)); 1934 1934 return -EINVAL; 1935 1935 } 1936 1936 return 0; ··· 1957 1957 case PACKET3_INDX_BUFFER: 1958 1958 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1959 1959 if (r) { 1960 - DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1960 + dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode); 1961 1961 radeon_cs_dump_packet(p, pkt); 1962 1962 return r; 1963 1963 } ··· 1971 1971 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1972 1972 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1973 1973 if (r) { 1974 - DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1974 + dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode); 1975 1975 radeon_cs_dump_packet(p, pkt); 1976 1976 return r; 1977 1977 } ··· 1992 1992 break; 1993 1993 case PACKET3_3D_DRAW_IMMD: 1994 1994 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1995 - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1995 + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); 1996 1996 return -EINVAL; 1997 1997 } 1998 1998 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); ··· 2005 2005 /* triggers drawing using in-packet vertex data */ 2006 2006 case PACKET3_3D_DRAW_IMMD_2: 2007 2007 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 2008 - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 2008 + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); 2009 2009 return -EINVAL; 2010 2010 } 2011 2011 track->vap_vf_cntl = radeon_get_ib_value(p, idx); ··· 2051 2051 case PACKET3_NOP: 2052 2052 break; 2053 2053 default: 2054 - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2054 + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); 2055 2055 return -EINVAL; 2056 2056 } 2057 2057 return 0; ··· 2093 2093 r = r100_packet3_check(p, &pkt); 2094 2094 break; 2095 2095 default: 2096 - DRM_ERROR("Unknown packet type %d !\n", 2097 - pkt.type); 2096 + dev_warn_once(p->dev, "Unknown packet type %d !\n", 2097 + pkt.type); 2098 2098 return -EINVAL; 2099 2099 } 2100 2100 if (r) ··· 2105 2105 2106 2106 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) 2107 2107 { 2108 - DRM_ERROR("pitch %d\n", t->pitch); 2109 - DRM_ERROR("use_pitch %d\n", t->use_pitch); 2110 - DRM_ERROR("width %d\n", t->width); 2111 - DRM_ERROR("width_11 %d\n", t->width_11); 2112 - DRM_ERROR("height %d\n", t->height); 2113 - DRM_ERROR("height_11 %d\n", t->height_11); 2114 - DRM_ERROR("num levels %d\n", t->num_levels); 2115 - DRM_ERROR("depth %d\n", t->txdepth); 2116 - DRM_ERROR("bpp %d\n", t->cpp); 2117 - DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2118 - DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2119 - DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2120 - DRM_ERROR("compress format %d\n", t->compress_format); 2108 + DRM_DEBUG("pitch %d\n", t->pitch); 2109 + DRM_DEBUG("use_pitch %d\n", t->use_pitch); 2110 + DRM_DEBUG("width %d\n", t->width); 2111 + DRM_DEBUG("width_11 %d\n", t->width_11); 2112 + DRM_DEBUG("height %d\n", t->height); 2113 + DRM_DEBUG("height_11 %d\n", t->height_11); 2114 + DRM_DEBUG("num levels %d\n", t->num_levels); 2115 + DRM_DEBUG("depth %d\n", t->txdepth); 2116 + DRM_DEBUG("bpp %d\n", t->cpp); 2117 + DRM_DEBUG("coordinate type %d\n", t->tex_coord_type); 2118 + DRM_DEBUG("width round to power of 2 %d\n", t->roundup_w); 2119 + DRM_DEBUG("height round to power of 2 %d\n", t->roundup_h); 2120 + DRM_DEBUG("compress format %d\n", t->compress_format); 2121 2121 } 2122 2122 2123 2123 static int r100_track_compress_size(int compress_format, int w, int h) ··· 2172 2172 size += track->textures[idx].cube_info[face].offset; 2173 2173 2174 2174 if (size > radeon_bo_size(cube_robj)) { 2175 - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2176 - size, radeon_bo_size(cube_robj)); 2175 + dev_warn_once(rdev->dev, 2176 + "Cube texture offset greater than object size %lu %lu\n", 2177 + size, radeon_bo_size(cube_robj)); 2177 2178 r100_cs_track_texture_print(&track->textures[idx]); 2178 2179 return -1; 2179 2180 } ··· 2197 2196 continue; 2198 2197 robj = track->textures[u].robj; 2199 2198 if (robj == NULL) { 2200 - DRM_ERROR("No texture bound to unit %u\n", u); 2199 + dev_warn_once(rdev->dev, "No texture bound to unit %u\n", u); 2201 2200 return -EINVAL; 2202 2201 } 2203 2202 size = 0; ··· 2250 2249 size *= 6; 2251 2250 break; 2252 2251 default: 2253 - DRM_ERROR("Invalid texture coordinate type %u for unit " 2254 - "%u\n", track->textures[u].tex_coord_type, u); 2252 + dev_warn_once(rdev->dev, "Invalid texture coordinate type %u for unit " 2253 + "%u\n", track->textures[u].tex_coord_type, u); 2255 2254 return -EINVAL; 2256 2255 } 2257 2256 if (size > radeon_bo_size(robj)) { 2258 - DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2259 - "%lu\n", u, size, radeon_bo_size(robj)); 2257 + dev_warn_once(rdev->dev, "Texture of unit %u needs %lu bytes but is " 2258 + "%lu\n", u, size, radeon_bo_size(robj)); 2260 2259 r100_cs_track_texture_print(&track->textures[u]); 2261 2260 return -EINVAL; 2262 2261 } ··· 2278 2277 2279 2278 for (i = 0; i < num_cb; i++) { 2280 2279 if (track->cb[i].robj == NULL) { 2281 - DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2280 + dev_warn_once(rdev->dev, "[drm] No buffer for color buffer %d !\n", i); 2282 2281 return -EINVAL; 2283 2282 } 2284 2283 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2285 2284 size += track->cb[i].offset; 2286 2285 if (size > radeon_bo_size(track->cb[i].robj)) { 2287 - DRM_ERROR("[drm] Buffer too small for color buffer %d " 2288 - "(need %lu have %lu) !\n", i, size, 2289 - radeon_bo_size(track->cb[i].robj)); 2290 - DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2291 - i, track->cb[i].pitch, track->cb[i].cpp, 2292 - track->cb[i].offset, track->maxy); 2286 + dev_warn_once(rdev->dev, "[drm] Buffer too small for color buffer %d " 2287 + "(need %lu have %lu) !\n", i, size, 2288 + radeon_bo_size(track->cb[i].robj)); 2289 + dev_warn_once(rdev->dev, "[drm] color buffer %d (%u %u %u %u)\n", 2290 + i, track->cb[i].pitch, track->cb[i].cpp, 2291 + track->cb[i].offset, track->maxy); 2293 2292 return -EINVAL; 2294 2293 } 2295 2294 } ··· 2297 2296 2298 2297 if (track->zb_dirty && track->z_enabled) { 2299 2298 if (track->zb.robj == NULL) { 2300 - DRM_ERROR("[drm] No buffer for z buffer !\n"); 2299 + dev_warn_once(rdev->dev, "[drm] No buffer for z buffer !\n"); 2301 2300 return -EINVAL; 2302 2301 } 2303 2302 size = track->zb.pitch * track->zb.cpp * track->maxy; 2304 2303 size += track->zb.offset; 2305 2304 if (size > radeon_bo_size(track->zb.robj)) { 2306 - DRM_ERROR("[drm] Buffer too small for z buffer " 2307 - "(need %lu have %lu) !\n", size, 2308 - radeon_bo_size(track->zb.robj)); 2309 - DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2310 - track->zb.pitch, track->zb.cpp, 2311 - track->zb.offset, track->maxy); 2305 + dev_warn_once(rdev->dev, "[drm] Buffer too small for z buffer " 2306 + "(need %lu have %lu) !\n", size, 2307 + radeon_bo_size(track->zb.robj)); 2308 + dev_warn_once(rdev->dev, "[drm] zbuffer (%u %u %u %u)\n", 2309 + track->zb.pitch, track->zb.cpp, 2310 + track->zb.offset, track->maxy); 2312 2311 return -EINVAL; 2313 2312 } 2314 2313 } ··· 2316 2315 2317 2316 if (track->aa_dirty && track->aaresolve) { 2318 2317 if (track->aa.robj == NULL) { 2319 - DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); 2318 + dev_warn_once(rdev->dev, "[drm] No buffer for AA resolve buffer %d !\n", i); 2320 2319 return -EINVAL; 2321 2320 } 2322 2321 /* I believe the format comes from colorbuffer0. */ 2323 2322 size = track->aa.pitch * track->cb[0].cpp * track->maxy; 2324 2323 size += track->aa.offset; 2325 2324 if (size > radeon_bo_size(track->aa.robj)) { 2326 - DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " 2327 - "(need %lu have %lu) !\n", i, size, 2328 - radeon_bo_size(track->aa.robj)); 2329 - DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", 2330 - i, track->aa.pitch, track->cb[0].cpp, 2331 - track->aa.offset, track->maxy); 2325 + dev_warn_once(rdev->dev, "[drm] Buffer too small for AA resolve buffer %d " 2326 + "(need %lu have %lu) !\n", i, size, 2327 + radeon_bo_size(track->aa.robj)); 2328 + dev_warn_once(rdev->dev, "[drm] AA resolve buffer %d (%u %u %u %u)\n", 2329 + i, track->aa.pitch, track->cb[0].cpp, 2330 + track->aa.offset, track->maxy); 2332 2331 return -EINVAL; 2333 2332 } 2334 2333 } ··· 2345 2344 for (i = 0; i < track->num_arrays; i++) { 2346 2345 size = track->arrays[i].esize * track->max_indx * 4UL; 2347 2346 if (track->arrays[i].robj == NULL) { 2348 - DRM_ERROR("(PW %u) Vertex array %u no buffer " 2349 - "bound\n", prim_walk, i); 2347 + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer " 2348 + "bound\n", prim_walk, i); 2350 2349 return -EINVAL; 2351 2350 } 2352 2351 if (size > radeon_bo_size(track->arrays[i].robj)) { 2353 - dev_err(rdev->dev, "(PW %u) Vertex array %u " 2354 - "need %lu dwords have %lu dwords\n", 2355 - prim_walk, i, size >> 2, 2356 - radeon_bo_size(track->arrays[i].robj) 2357 - >> 2); 2358 - DRM_ERROR("Max indices %u\n", track->max_indx); 2352 + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u " 2353 + "need %lu dwords have %lu dwords\n", 2354 + prim_walk, i, size >> 2, 2355 + radeon_bo_size(track->arrays[i].robj) 2356 + >> 2); 2357 + dev_warn_once(rdev->dev, "Max indices %u\n", track->max_indx); 2359 2358 return -EINVAL; 2360 2359 } 2361 2360 } ··· 2364 2363 for (i = 0; i < track->num_arrays; i++) { 2365 2364 size = track->arrays[i].esize * (nverts - 1) * 4UL; 2366 2365 if (track->arrays[i].robj == NULL) { 2367 - DRM_ERROR("(PW %u) Vertex array %u no buffer " 2368 - "bound\n", prim_walk, i); 2366 + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer " 2367 + "bound\n", prim_walk, i); 2369 2368 return -EINVAL; 2370 2369 } 2371 2370 if (size > radeon_bo_size(track->arrays[i].robj)) { 2372 - dev_err(rdev->dev, "(PW %u) Vertex array %u " 2373 - "need %lu dwords have %lu dwords\n", 2374 - prim_walk, i, size >> 2, 2375 - radeon_bo_size(track->arrays[i].robj) 2376 - >> 2); 2371 + dev_warn_once(rdev->dev, "(PW %u) Vertex array %u " 2372 + "need %lu dwords have %lu dwords\n", 2373 + prim_walk, i, size >> 2, 2374 + radeon_bo_size(track->arrays[i].robj) 2375 + >> 2); 2377 2376 return -EINVAL; 2378 2377 } 2379 2378 } ··· 2381 2380 case 3: 2382 2381 size = track->vtx_size * nverts; 2383 2382 if (size != track->immd_dwords) { 2384 - DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", 2385 - track->immd_dwords, size); 2386 - DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2387 - nverts, track->vtx_size); 2383 + dev_warn_once(rdev->dev, "IMMD draw %u dwors but needs %lu dwords\n", 2384 + track->immd_dwords, size); 2385 + dev_warn_once(rdev->dev, "VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", 2386 + nverts, track->vtx_size); 2388 2387 return -EINVAL; 2389 2388 } 2390 2389 break; 2391 2390 default: 2392 - DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2393 - prim_walk); 2391 + dev_warn_once(rdev->dev, "[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", 2392 + prim_walk); 2394 2393 return -EINVAL; 2395 2394 } 2396 2395
+17 -17
drivers/gpu/drm/radeon/r200.c
··· 163 163 case RADEON_CRTC_GUI_TRIG_VLINE: 164 164 r = r100_cs_packet_parse_vline(p); 165 165 if (r) { 166 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 167 - idx, reg); 166 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 167 + idx, reg); 168 168 radeon_cs_dump_packet(p, pkt); 169 169 return r; 170 170 } ··· 180 180 case RADEON_RB3D_DEPTHOFFSET: 181 181 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 182 182 if (r) { 183 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 184 - idx, reg); 183 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 184 + idx, reg); 185 185 radeon_cs_dump_packet(p, pkt); 186 186 return r; 187 187 } ··· 193 193 case RADEON_RB3D_COLOROFFSET: 194 194 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 195 195 if (r) { 196 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 197 - idx, reg); 196 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 197 + idx, reg); 198 198 radeon_cs_dump_packet(p, pkt); 199 199 return r; 200 200 } ··· 212 212 i = (reg - R200_PP_TXOFFSET_0) / 24; 213 213 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 214 214 if (r) { 215 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 216 - idx, reg); 215 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 216 + idx, reg); 217 217 radeon_cs_dump_packet(p, pkt); 218 218 return r; 219 219 } ··· 265 265 face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4; 266 266 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 267 267 if (r) { 268 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 269 - idx, reg); 268 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 269 + idx, reg); 270 270 radeon_cs_dump_packet(p, pkt); 271 271 return r; 272 272 } ··· 283 283 case RADEON_RB3D_COLORPITCH: 284 284 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 285 285 if (r) { 286 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 287 - idx, reg); 286 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 287 + idx, reg); 288 288 radeon_cs_dump_packet(p, pkt); 289 289 return r; 290 290 } ··· 326 326 track->cb[0].cpp = 4; 327 327 break; 328 328 default: 329 - DRM_ERROR("Invalid color buffer format (%d) !\n", 330 - ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 329 + dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n", 330 + ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 331 331 return -EINVAL; 332 332 } 333 333 if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) { 334 - DRM_ERROR("No support for depth xy offset in kms\n"); 334 + dev_warn_once(p->dev, "No support for depth xy offset in kms\n"); 335 335 return -EINVAL; 336 336 } 337 337 ··· 360 360 case RADEON_RB3D_ZPASS_ADDR: 361 361 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 362 362 if (r) { 363 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 364 - idx, reg); 363 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 364 + idx, reg); 365 365 radeon_cs_dump_packet(p, pkt); 366 366 return r; 367 367 }
+33 -33
drivers/gpu/drm/radeon/r300.c
··· 645 645 case RADEON_CRTC_GUI_TRIG_VLINE: 646 646 r = r100_cs_packet_parse_vline(p); 647 647 if (r) { 648 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 649 - idx, reg); 648 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 649 + idx, reg); 650 650 radeon_cs_dump_packet(p, pkt); 651 651 return r; 652 652 } ··· 664 664 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 665 665 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 666 666 if (r) { 667 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 668 - idx, reg); 667 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 668 + idx, reg); 669 669 radeon_cs_dump_packet(p, pkt); 670 670 return r; 671 671 } ··· 677 677 case R300_ZB_DEPTHOFFSET: 678 678 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 679 679 if (r) { 680 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 681 - idx, reg); 680 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 681 + idx, reg); 682 682 radeon_cs_dump_packet(p, pkt); 683 683 return r; 684 684 } ··· 706 706 i = (reg - R300_TX_OFFSET_0) >> 2; 707 707 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 708 708 if (r) { 709 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 710 - idx, reg); 709 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 710 + idx, reg); 711 711 radeon_cs_dump_packet(p, pkt); 712 712 return r; 713 713 } ··· 762 762 /* RB3D_CCTL */ 763 763 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ 764 764 p->rdev->cmask_filp != p->filp) { 765 - DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 765 + dev_warn_once(p->dev, "Invalid RB3D_CCTL: Cannot enable CMASK.\n"); 766 766 return -EINVAL; 767 767 } 768 768 track->num_cb = ((idx_value >> 5) & 0x3) + 1; ··· 779 779 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 780 780 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 781 781 if (r) { 782 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 783 - idx, reg); 782 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 783 + idx, reg); 784 784 radeon_cs_dump_packet(p, pkt); 785 785 return r; 786 786 } ··· 812 812 break; 813 813 case 5: 814 814 if (p->rdev->family < CHIP_RV515) { 815 - DRM_ERROR("Invalid color buffer format (%d)!\n", 816 - ((idx_value >> 21) & 0xF)); 815 + dev_warn_once(p->dev, "Invalid color buffer format (%d)!\n", 816 + ((idx_value >> 21) & 0xF)); 817 817 return -EINVAL; 818 818 } 819 819 fallthrough; ··· 827 827 track->cb[i].cpp = 16; 828 828 break; 829 829 default: 830 - DRM_ERROR("Invalid color buffer format (%d) !\n", 831 - ((idx_value >> 21) & 0xF)); 830 + dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n", 831 + ((idx_value >> 21) & 0xF)); 832 832 return -EINVAL; 833 833 } 834 834 track->cb_dirty = true; ··· 853 853 track->zb.cpp = 4; 854 854 break; 855 855 default: 856 - DRM_ERROR("Invalid z buffer format (%d) !\n", 857 - (idx_value & 0xF)); 856 + dev_warn_once(p->dev, "Invalid z buffer format (%d) !\n", 857 + (idx_value & 0xF)); 858 858 return -EINVAL; 859 859 } 860 860 track->zb_dirty = true; ··· 864 864 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 865 865 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 866 866 if (r) { 867 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 868 - idx, reg); 867 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 868 + idx, reg); 869 869 radeon_cs_dump_packet(p, pkt); 870 870 return r; 871 871 } ··· 962 962 break; 963 963 case R300_TX_FORMAT_ATI2N: 964 964 if (p->rdev->family < CHIP_R420) { 965 - DRM_ERROR("Invalid texture format %u\n", 966 - (idx_value & 0x1F)); 965 + dev_warn_once(p->dev, "Invalid texture format %u\n", 966 + (idx_value & 0x1F)); 967 967 return -EINVAL; 968 968 } 969 969 /* The same rules apply as for DXT3/5. */ ··· 974 974 track->textures[i].compress_format = R100_TRACK_COMP_DXT35; 975 975 break; 976 976 default: 977 - DRM_ERROR("Invalid texture format %u\n", 978 - (idx_value & 0x1F)); 977 + dev_warn_once(p->dev, "Invalid texture format %u\n", 978 + (idx_value & 0x1F)); 979 979 return -EINVAL; 980 980 } 981 981 track->tex_dirty = true; ··· 1041 1041 R100_TRACK_COMP_DXT1; 1042 1042 } 1043 1043 } else if (idx_value & (1 << 14)) { 1044 - DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1044 + dev_warn_once(p->dev, "Forbidden bit TXFORMAT_MSB\n"); 1045 1045 return -EINVAL; 1046 1046 } 1047 1047 track->tex_dirty = true; ··· 1079 1079 case R300_ZB_ZPASS_ADDR: 1080 1080 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1081 1081 if (r) { 1082 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1083 - idx, reg); 1082 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1083 + idx, reg); 1084 1084 radeon_cs_dump_packet(p, pkt); 1085 1085 return r; 1086 1086 } ··· 1121 1121 case R300_RB3D_AARESOLVE_OFFSET: 1122 1122 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1123 1123 if (r) { 1124 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1125 - idx, reg); 1124 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 1125 + idx, reg); 1126 1126 radeon_cs_dump_packet(p, pkt); 1127 1127 return r; 1128 1128 } ··· 1191 1191 case PACKET3_INDX_BUFFER: 1192 1192 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1193 1193 if (r) { 1194 - DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1194 + dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode); 1195 1195 radeon_cs_dump_packet(p, pkt); 1196 1196 return r; 1197 1197 } ··· 1207 1207 * PRIM_WALK must be equal to 3 vertex data in embedded 1208 1208 * in cmd stream */ 1209 1209 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) { 1210 - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1210 + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); 1211 1211 return -EINVAL; 1212 1212 } 1213 1213 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); ··· 1222 1222 * PRIM_WALK must be equal to 3 vertex data in embedded 1223 1223 * in cmd stream */ 1224 1224 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { 1225 - DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1225 + dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n"); 1226 1226 return -EINVAL; 1227 1227 } 1228 1228 track->vap_vf_cntl = radeon_get_ib_value(p, idx); ··· 1272 1272 case PACKET3_NOP: 1273 1273 break; 1274 1274 default: 1275 - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 1275 + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); 1276 1276 return -EINVAL; 1277 1277 } 1278 1278 return 0; ··· 1308 1308 r = r300_packet3_check(p, &pkt); 1309 1309 break; 1310 1310 default: 1311 - DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1311 + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); 1312 1312 return -EINVAL; 1313 1313 } 1314 1314 if (r) {
+227 -218
drivers/gpu/drm/radeon/r600_cs.c
··· 361 361 362 362 format = G_0280A0_FORMAT(track->cb_color_info[i]); 363 363 if (!r600_fmt_is_valid_color(format)) { 364 - dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 365 - __func__, __LINE__, format, 366 - i, track->cb_color_info[i]); 364 + dev_warn_once(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 365 + __func__, __LINE__, format, 366 + i, track->cb_color_info[i]); 367 367 return -EINVAL; 368 368 } 369 369 /* pitch in pixels */ ··· 384 384 array_check.blocksize = r600_fmt_get_blocksize(format); 385 385 if (r600_get_array_mode_alignment(&array_check, 386 386 &pitch_align, &height_align, &depth_align, &base_align)) { 387 - dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 388 - G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 389 - track->cb_color_info[i]); 387 + dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 388 + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 389 + track->cb_color_info[i]); 390 390 return -EINVAL; 391 391 } 392 392 switch (array_mode) { ··· 402 402 case V_0280A0_ARRAY_2D_TILED_THIN1: 403 403 break; 404 404 default: 405 - dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 406 - G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 407 - track->cb_color_info[i]); 405 + dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 406 + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 407 + track->cb_color_info[i]); 408 408 return -EINVAL; 409 409 } 410 410 411 411 if (!IS_ALIGNED(pitch, pitch_align)) { 412 - dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 413 - __func__, __LINE__, pitch, pitch_align, array_mode); 412 + dev_warn_once(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 413 + __func__, __LINE__, pitch, pitch_align, array_mode); 414 414 return -EINVAL; 415 415 } 416 416 if (!IS_ALIGNED(height, height_align)) { 417 - dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 418 - __func__, __LINE__, height, height_align, array_mode); 417 + dev_warn_once(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 418 + __func__, __LINE__, height, height_align, array_mode); 419 419 return -EINVAL; 420 420 } 421 421 if (!IS_ALIGNED(base_offset, base_align)) { 422 - dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, 423 - base_offset, base_align, array_mode); 422 + dev_warn_once(p->dev, 423 + "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, 424 + base_offset, base_align, array_mode); 424 425 return -EINVAL; 425 426 } 426 427 ··· 448 447 * broken userspace. 449 448 */ 450 449 } else { 451 - dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", 452 - __func__, i, array_mode, 453 - track->cb_color_bo_offset[i], tmp, 454 - radeon_bo_size(track->cb_color_bo[i]), 455 - pitch, height, r600_fmt_get_nblocksx(format, pitch), 456 - r600_fmt_get_nblocksy(format, height), 457 - r600_fmt_get_blocksize(format)); 450 + dev_warn_once(p->dev, 451 + "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", 452 + __func__, i, array_mode, 453 + track->cb_color_bo_offset[i], tmp, 454 + radeon_bo_size(track->cb_color_bo[i]), 455 + pitch, height, r600_fmt_get_nblocksx(format, pitch), 456 + r600_fmt_get_nblocksy(format, height), 457 + r600_fmt_get_blocksize(format)); 458 458 return -EINVAL; 459 459 } 460 460 } ··· 480 478 481 479 if (bytes + track->cb_color_frag_offset[i] > 482 480 radeon_bo_size(track->cb_color_frag_bo[i])) { 483 - dev_warn(p->dev, "%s FMASK_TILE_MAX too large " 484 - "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 485 - __func__, tile_max, bytes, 486 - track->cb_color_frag_offset[i], 487 - radeon_bo_size(track->cb_color_frag_bo[i])); 481 + dev_warn_once(p->dev, "%s FMASK_TILE_MAX too large " 482 + "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 483 + __func__, tile_max, bytes, 484 + track->cb_color_frag_offset[i], 485 + radeon_bo_size(track->cb_color_frag_bo[i])); 488 486 return -EINVAL; 489 487 } 490 488 } ··· 498 496 499 497 if (bytes + track->cb_color_tile_offset[i] > 500 498 radeon_bo_size(track->cb_color_tile_bo[i])) { 501 - dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " 502 - "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 503 - __func__, block_max, bytes, 504 - track->cb_color_tile_offset[i], 505 - radeon_bo_size(track->cb_color_tile_bo[i])); 499 + dev_warn_once(p->dev, "%s CMASK_BLOCK_MAX too large " 500 + "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 501 + __func__, block_max, bytes, 502 + track->cb_color_tile_offset[i], 503 + radeon_bo_size(track->cb_color_tile_bo[i])); 506 504 return -EINVAL; 507 505 } 508 506 break; 509 507 } 510 508 default: 511 - dev_warn(p->dev, "%s invalid tile mode\n", __func__); 509 + dev_warn_once(p->dev, "%s invalid tile mode\n", __func__); 512 510 return -EINVAL; 513 511 } 514 512 return 0; ··· 528 526 529 527 530 528 if (track->db_bo == NULL) { 531 - dev_warn(p->dev, "z/stencil with no depth buffer\n"); 529 + dev_warn_once(p->dev, "z/stencil with no depth buffer\n"); 532 530 return -EINVAL; 533 531 } 534 532 switch (G_028010_FORMAT(track->db_depth_info)) { ··· 546 544 bpe = 8; 547 545 break; 548 546 default: 549 - dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); 547 + dev_warn_once(p->dev, 548 + "z/stencil with invalid format %d\n", 549 + G_028010_FORMAT(track->db_depth_info)); 550 550 return -EINVAL; 551 551 } 552 552 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 553 553 if (!track->db_depth_size_idx) { 554 - dev_warn(p->dev, "z/stencil buffer size not set\n"); 554 + dev_warn_once(p->dev, "z/stencil buffer size not set\n"); 555 555 return -EINVAL; 556 556 } 557 557 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 558 558 tmp = (tmp / bpe) >> 6; 559 559 if (!tmp) { 560 - dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 561 - track->db_depth_size, bpe, track->db_offset, 562 - radeon_bo_size(track->db_bo)); 560 + dev_warn_once(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 561 + track->db_depth_size, bpe, track->db_offset, 562 + radeon_bo_size(track->db_bo)); 563 563 return -EINVAL; 564 564 } 565 565 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); ··· 583 579 array_check.blocksize = bpe; 584 580 if (r600_get_array_mode_alignment(&array_check, 585 581 &pitch_align, &height_align, &depth_align, &base_align)) { 586 - dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 587 - G_028010_ARRAY_MODE(track->db_depth_info), 588 - track->db_depth_info); 582 + dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 583 + G_028010_ARRAY_MODE(track->db_depth_info), 584 + track->db_depth_info); 589 585 return -EINVAL; 590 586 } 591 587 switch (array_mode) { ··· 596 592 case V_028010_ARRAY_2D_TILED_THIN1: 597 593 break; 598 594 default: 599 - dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 600 - G_028010_ARRAY_MODE(track->db_depth_info), 601 - track->db_depth_info); 595 + dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 596 + G_028010_ARRAY_MODE(track->db_depth_info), 597 + track->db_depth_info); 602 598 return -EINVAL; 603 599 } 604 600 605 601 if (!IS_ALIGNED(pitch, pitch_align)) { 606 - dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 602 + dev_warn_once(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 607 603 __func__, __LINE__, pitch, pitch_align, array_mode); 608 604 return -EINVAL; 609 605 } 610 606 if (!IS_ALIGNED(height, height_align)) { 611 - dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 607 + dev_warn_once(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 612 608 __func__, __LINE__, height, height_align, array_mode); 613 609 return -EINVAL; 614 610 } 615 611 if (!IS_ALIGNED(base_offset, base_align)) { 616 - dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, 612 + dev_warn_once(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, 617 613 base_offset, base_align, array_mode); 618 614 return -EINVAL; 619 615 } ··· 622 618 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 623 619 tmp = ntiles * bpe * 64 * nviews * track->nsamples; 624 620 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 625 - dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 626 - array_mode, 627 - track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 628 - radeon_bo_size(track->db_bo)); 621 + dev_warn_once(p->dev, 622 + "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 623 + array_mode, 624 + track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 625 + radeon_bo_size(track->db_bo)); 629 626 return -EINVAL; 630 627 } 631 628 } ··· 637 632 unsigned nbx, nby; 638 633 639 634 if (track->htile_bo == NULL) { 640 - dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 641 - __func__, __LINE__, track->db_depth_info); 635 + dev_warn_once(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 636 + __func__, __LINE__, track->db_depth_info); 642 637 return -EINVAL; 643 638 } 644 639 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 645 - dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 646 - __func__, __LINE__, track->db_depth_size); 640 + dev_warn_once(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 641 + __func__, __LINE__, track->db_depth_size); 647 642 return -EINVAL; 648 643 } 649 644 ··· 681 676 nby = round_up(nby, 16 * 8); 682 677 break; 683 678 default: 684 - dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 685 - __func__, __LINE__, track->npipes); 679 + dev_warn_once(p->dev, "%s:%d invalid num pipes %d\n", 680 + __func__, __LINE__, track->npipes); 686 681 return -EINVAL; 687 682 } 688 683 } ··· 694 689 size += track->htile_offset; 695 690 696 691 if (size > radeon_bo_size(track->htile_bo)) { 697 - dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 698 - __func__, __LINE__, radeon_bo_size(track->htile_bo), 699 - size, nbx, nby); 692 + dev_warn_once(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 693 + __func__, __LINE__, radeon_bo_size(track->htile_bo), 694 + size, nbx, nby); 700 695 return -EINVAL; 701 696 } 702 697 } ··· 723 718 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 724 719 (u64)track->vgt_strmout_size[i]; 725 720 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 726 - DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", 727 - i, offset, 728 - radeon_bo_size(track->vgt_strmout_bo[i])); 721 + dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n", 722 + i, offset, 723 + radeon_bo_size(track->vgt_strmout_bo[i])); 729 724 return -EINVAL; 730 725 } 731 726 } else { 732 - dev_warn(p->dev, "No buffer for streamout %d\n", i); 727 + dev_warn_once(p->dev, "No buffer for streamout %d\n", i); 733 728 return -EINVAL; 734 729 } 735 730 } ··· 758 753 (tmp >> (i * 4)) & 0xF) { 759 754 /* at least one component is enabled */ 760 755 if (track->cb_color_bo[i] == NULL) { 761 - dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 762 - __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 756 + dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 757 + __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 763 758 return -EINVAL; 764 759 } 765 760 /* perform rewrite of CB_COLOR[0-7]_SIZE */ ··· 846 841 /* check its a WAIT_REG_MEM */ 847 842 if (wait_reg_mem.type != RADEON_PACKET_TYPE3 || 848 843 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 849 - DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 844 + dev_warn_once(p->dev, "vline wait missing WAIT_REG_MEM segment\n"); 850 845 return -EINVAL; 851 846 } 852 847 853 848 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 854 849 /* bit 4 is reg (0) or mem (1) */ 855 850 if (wait_reg_mem_info & 0x10) { 856 - DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n"); 851 + dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on MEM instead of REG\n"); 857 852 return -EINVAL; 858 853 } 859 854 /* bit 8 is me (0) or pfp (1) */ 860 855 if (wait_reg_mem_info & 0x100) { 861 - DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n"); 856 + dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on PFP instead of ME\n"); 862 857 return -EINVAL; 863 858 } 864 859 /* waiting for value to be equal */ 865 860 if ((wait_reg_mem_info & 0x7) != 0x3) { 866 - DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 861 + dev_warn_once(p->dev, "vline WAIT_REG_MEM function not equal\n"); 867 862 return -EINVAL; 868 863 } 869 864 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) { 870 - DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 865 + dev_warn_once(p->dev, "vline WAIT_REG_MEM bad reg\n"); 871 866 return -EINVAL; 872 867 } 873 868 874 869 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) { 875 - DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 870 + dev_warn_once(p->dev, "vline WAIT_REG_MEM bad bit mask\n"); 876 871 return -EINVAL; 877 872 } 878 873 ··· 891 886 892 887 crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id); 893 888 if (!crtc) { 894 - DRM_ERROR("cannot find crtc %d\n", crtc_id); 889 + dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id); 895 890 return -ENOENT; 896 891 } 897 892 radeon_crtc = to_radeon_crtc(crtc); ··· 912 907 ib[h_idx] = header; 913 908 ib[h_idx + 4] = vline_status[crtc_id] >> 2; 914 909 } else { 915 - DRM_ERROR("unknown crtc reloc\n"); 910 + dev_warn_once(p->dev, "unknown crtc reloc\n"); 916 911 return -EINVAL; 917 912 } 918 913 return 0; ··· 928 923 case AVIVO_D1MODE_VLINE_START_END: 929 924 r = r600_cs_packet_parse_vline(p); 930 925 if (r) { 931 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 932 - idx, reg); 926 + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", 927 + idx, reg); 933 928 return r; 934 929 } 935 930 break; ··· 977 972 978 973 i = (reg >> 7); 979 974 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 980 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 975 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 981 976 return -EINVAL; 982 977 } 983 978 m = 1 << ((reg >> 2) & 31); ··· 1018 1013 case SQ_VSTMP_RING_BASE: 1019 1014 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1020 1015 if (r) { 1021 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1022 - "0x%04X\n", reg); 1016 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1017 + "0x%04X\n", reg); 1023 1018 return -EINVAL; 1024 1019 } 1025 1020 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1036 1031 radeon_cs_packet_next_is_pkt3_nop(p)) { 1037 1032 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1038 1033 if (r) { 1039 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1040 - "0x%04X\n", reg); 1034 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1035 + "0x%04X\n", reg); 1041 1036 return -EINVAL; 1042 1037 } 1043 1038 track->db_depth_info = radeon_get_ib_value(p, idx); ··· 1078 1073 case VGT_STRMOUT_BUFFER_BASE_3: 1079 1074 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1080 1075 if (r) { 1081 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1082 - "0x%04X\n", reg); 1076 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1077 + "0x%04X\n", reg); 1083 1078 return -EINVAL; 1084 1079 } 1085 1080 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; ··· 1101 1096 case CP_COHER_BASE: 1102 1097 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1103 1098 if (r) { 1104 - dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1105 - "0x%04X\n", reg); 1099 + dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE " 1100 + "0x%04X\n", reg); 1106 1101 return -EINVAL; 1107 1102 } 1108 1103 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1275 1270 case CB_COLOR7_BASE: 1276 1271 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1277 1272 if (r) { 1278 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1279 - "0x%04X\n", reg); 1273 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1274 + "0x%04X\n", reg); 1280 1275 return -EINVAL; 1281 1276 } 1282 1277 tmp = (reg - CB_COLOR0_BASE) / 4; ··· 1290 1285 case DB_DEPTH_BASE: 1291 1286 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1292 1287 if (r) { 1293 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1294 - "0x%04X\n", reg); 1288 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1289 + "0x%04X\n", reg); 1295 1290 return -EINVAL; 1296 1291 } 1297 1292 track->db_offset = radeon_get_ib_value(p, idx) << 8; ··· 1303 1298 case DB_HTILE_DATA_BASE: 1304 1299 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1305 1300 if (r) { 1306 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1307 - "0x%04X\n", reg); 1301 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1302 + "0x%04X\n", reg); 1308 1303 return -EINVAL; 1309 1304 } 1310 1305 track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8; ··· 1373 1368 case SQ_ALU_CONST_CACHE_VS_15: 1374 1369 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1375 1370 if (r) { 1376 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 1377 - "0x%04X\n", reg); 1371 + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " 1372 + "0x%04X\n", reg); 1378 1373 return -EINVAL; 1379 1374 } 1380 1375 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1382 1377 case SX_MEMORY_EXPORT_BASE: 1383 1378 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1384 1379 if (r) { 1385 - dev_warn(p->dev, "bad SET_CONFIG_REG " 1386 - "0x%04X\n", reg); 1380 + dev_warn_once(p->dev, "bad SET_CONFIG_REG " 1381 + "0x%04X\n", reg); 1387 1382 return -EINVAL; 1388 1383 } 1389 1384 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1392 1387 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1393 1388 break; 1394 1389 default: 1395 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1390 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1396 1391 return -EINVAL; 1397 1392 } 1398 1393 return 0; ··· 1548 1543 llevel = 0; 1549 1544 break; 1550 1545 default: 1551 - dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1546 + dev_warn_once(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1552 1547 return -EINVAL; 1553 1548 } 1554 1549 if (!r600_fmt_is_valid_texture(format, p->family)) { 1555 - dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1556 - __func__, __LINE__, format); 1550 + dev_warn_once(p->dev, "%s:%d texture invalid format %d\n", 1551 + __func__, __LINE__, format); 1557 1552 return -EINVAL; 1558 1553 } 1559 1554 1560 1555 if (r600_get_array_mode_alignment(&array_check, 1561 1556 &pitch_align, &height_align, &depth_align, &base_align)) { 1562 - dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1563 - __func__, __LINE__, G_038000_TILE_MODE(word0)); 1557 + dev_warn_once(p->dev, "%s:%d tex array mode (%d) invalid\n", 1558 + __func__, __LINE__, G_038000_TILE_MODE(word0)); 1564 1559 return -EINVAL; 1565 1560 } 1566 1561 1567 1562 /* XXX check height as well... */ 1568 1563 1569 1564 if (!IS_ALIGNED(pitch, pitch_align)) { 1570 - dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1571 - __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1565 + dev_warn_once(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1566 + __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1572 1567 return -EINVAL; 1573 1568 } 1574 1569 if (!IS_ALIGNED(base_offset, base_align)) { 1575 - dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", 1576 - __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); 1570 + dev_warn_once(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", 1571 + __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); 1577 1572 return -EINVAL; 1578 1573 } 1579 1574 if (!IS_ALIGNED(mip_offset, base_align)) { 1580 - dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", 1581 - __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); 1575 + dev_warn_once(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", 1576 + __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); 1582 1577 return -EINVAL; 1583 1578 } 1584 1579 1585 1580 if (blevel > llevel) { 1586 - dev_warn(p->dev, "texture blevel %d > llevel %d\n", 1587 - blevel, llevel); 1581 + dev_warn_once(p->dev, "texture blevel %d > llevel %d\n", 1582 + blevel, llevel); 1588 1583 } 1589 1584 if (is_array) { 1590 1585 barray = G_038014_BASE_ARRAY(word5); ··· 1597 1592 &l0_size, &mipmap_size); 1598 1593 /* using get ib will give us the offset into the texture bo */ 1599 1594 if ((l0_size + word2) > radeon_bo_size(texture)) { 1600 - dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1601 - w0, h0, pitch_align, height_align, 1602 - array_check.array_mode, format, word2, 1603 - l0_size, radeon_bo_size(texture)); 1604 - dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); 1595 + dev_warn_once(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1596 + w0, h0, pitch_align, height_align, 1597 + array_check.array_mode, format, word2, 1598 + l0_size, radeon_bo_size(texture)); 1599 + dev_warn_once(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); 1605 1600 return -EINVAL; 1606 1601 } 1607 1602 /* using get ib will give us the offset into the mipmap bo */ 1608 1603 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1609 - /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1604 + /*dev_warn_once(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1610 1605 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1611 1606 } 1612 1607 return 0; ··· 1618 1613 1619 1614 i = (reg >> 7); 1620 1615 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 1621 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1616 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1622 1617 return false; 1623 1618 } 1624 1619 m = 1 << ((reg >> 2) & 31); 1625 1620 if (!(r600_reg_safe_bm[i] & m)) 1626 1621 return true; 1627 - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1622 + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1628 1623 return false; 1629 1624 } 1630 1625 ··· 1653 1648 uint64_t offset; 1654 1649 1655 1650 if (pkt->count != 1) { 1656 - DRM_ERROR("bad SET PREDICATION\n"); 1651 + dev_warn_once(p->dev, "bad SET PREDICATION\n"); 1657 1652 return -EINVAL; 1658 1653 } 1659 1654 ··· 1665 1660 return 0; 1666 1661 1667 1662 if (pred_op > 2) { 1668 - DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1663 + dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op); 1669 1664 return -EINVAL; 1670 1665 } 1671 1666 1672 1667 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1673 1668 if (r) { 1674 - DRM_ERROR("bad SET PREDICATION\n"); 1669 + dev_warn_once(p->dev, "bad SET PREDICATION\n"); 1675 1670 return -EINVAL; 1676 1671 } 1677 1672 ··· 1686 1681 1687 1682 case PACKET3_START_3D_CMDBUF: 1688 1683 if (p->family >= CHIP_RV770 || pkt->count) { 1689 - DRM_ERROR("bad START_3D\n"); 1684 + dev_warn_once(p->dev, "bad START_3D\n"); 1690 1685 return -EINVAL; 1691 1686 } 1692 1687 break; 1693 1688 case PACKET3_CONTEXT_CONTROL: 1694 1689 if (pkt->count != 1) { 1695 - DRM_ERROR("bad CONTEXT_CONTROL\n"); 1690 + dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n"); 1696 1691 return -EINVAL; 1697 1692 } 1698 1693 break; 1699 1694 case PACKET3_INDEX_TYPE: 1700 1695 case PACKET3_NUM_INSTANCES: 1701 1696 if (pkt->count) { 1702 - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); 1697 + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES\n"); 1703 1698 return -EINVAL; 1704 1699 } 1705 1700 break; ··· 1707 1702 { 1708 1703 uint64_t offset; 1709 1704 if (pkt->count != 3) { 1710 - DRM_ERROR("bad DRAW_INDEX\n"); 1705 + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); 1711 1706 return -EINVAL; 1712 1707 } 1713 1708 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1714 1709 if (r) { 1715 - DRM_ERROR("bad DRAW_INDEX\n"); 1710 + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); 1716 1711 return -EINVAL; 1717 1712 } 1718 1713 ··· 1725 1720 1726 1721 r = r600_cs_track_check(p); 1727 1722 if (r) { 1728 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1723 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1729 1724 return r; 1730 1725 } 1731 1726 break; 1732 1727 } 1733 1728 case PACKET3_DRAW_INDEX_AUTO: 1734 1729 if (pkt->count != 1) { 1735 - DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1730 + dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n"); 1736 1731 return -EINVAL; 1737 1732 } 1738 1733 r = r600_cs_track_check(p); 1739 1734 if (r) { 1740 - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1735 + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1741 1736 return r; 1742 1737 } 1743 1738 break; 1744 1739 case PACKET3_DRAW_INDEX_IMMD_BE: 1745 1740 case PACKET3_DRAW_INDEX_IMMD: 1746 1741 if (pkt->count < 2) { 1747 - DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1742 + dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n"); 1748 1743 return -EINVAL; 1749 1744 } 1750 1745 r = r600_cs_track_check(p); 1751 1746 if (r) { 1752 - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1747 + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1753 1748 return r; 1754 1749 } 1755 1750 break; 1756 1751 case PACKET3_WAIT_REG_MEM: 1757 1752 if (pkt->count != 5) { 1758 - DRM_ERROR("bad WAIT_REG_MEM\n"); 1753 + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); 1759 1754 return -EINVAL; 1760 1755 } 1761 1756 /* bit 4 is reg (0) or mem (1) */ ··· 1764 1759 1765 1760 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1766 1761 if (r) { 1767 - DRM_ERROR("bad WAIT_REG_MEM\n"); 1762 + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); 1768 1763 return -EINVAL; 1769 1764 } 1770 1765 ··· 1775 1770 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1776 1771 ib[idx+2] = upper_32_bits(offset) & 0xff; 1777 1772 } else if (idx_value & 0x100) { 1778 - DRM_ERROR("cannot use PFP on REG wait\n"); 1773 + dev_warn_once(p->dev, "cannot use PFP on REG wait\n"); 1779 1774 return -EINVAL; 1780 1775 } 1781 1776 break; ··· 1784 1779 u32 command, size; 1785 1780 u64 offset, tmp; 1786 1781 if (pkt->count != 4) { 1787 - DRM_ERROR("bad CP DMA\n"); 1782 + dev_warn_once(p->dev, "bad CP DMA\n"); 1788 1783 return -EINVAL; 1789 1784 } 1790 1785 command = radeon_get_ib_value(p, idx+4); 1791 1786 size = command & 0x1fffff; 1792 1787 if (command & PACKET3_CP_DMA_CMD_SAS) { 1793 1788 /* src address space is register */ 1794 - DRM_ERROR("CP DMA SAS not supported\n"); 1789 + dev_warn_once(p->dev, "CP DMA SAS not supported\n"); 1795 1790 return -EINVAL; 1796 1791 } else { 1797 1792 if (command & PACKET3_CP_DMA_CMD_SAIC) { 1798 - DRM_ERROR("CP DMA SAIC only supported for registers\n"); 1793 + dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n"); 1799 1794 return -EINVAL; 1800 1795 } 1801 1796 /* src address space is memory */ 1802 1797 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1803 1798 if (r) { 1804 - DRM_ERROR("bad CP DMA SRC\n"); 1799 + dev_warn_once(p->dev, "bad CP DMA SRC\n"); 1805 1800 return -EINVAL; 1806 1801 } 1807 1802 ··· 1811 1806 offset = reloc->gpu_offset + tmp; 1812 1807 1813 1808 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1814 - dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 1815 - tmp + size, radeon_bo_size(reloc->robj)); 1809 + dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 1810 + tmp + size, radeon_bo_size(reloc->robj)); 1816 1811 return -EINVAL; 1817 1812 } 1818 1813 ··· 1821 1816 } 1822 1817 if (command & PACKET3_CP_DMA_CMD_DAS) { 1823 1818 /* dst address space is register */ 1824 - DRM_ERROR("CP DMA DAS not supported\n"); 1819 + dev_warn_once(p->dev, "CP DMA DAS not supported\n"); 1825 1820 return -EINVAL; 1826 1821 } else { 1827 1822 /* dst address space is memory */ 1828 1823 if (command & PACKET3_CP_DMA_CMD_DAIC) { 1829 - DRM_ERROR("CP DMA DAIC only supported for registers\n"); 1824 + dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n"); 1830 1825 return -EINVAL; 1831 1826 } 1832 1827 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1833 1828 if (r) { 1834 - DRM_ERROR("bad CP DMA DST\n"); 1829 + dev_warn_once(p->dev, "bad CP DMA DST\n"); 1835 1830 return -EINVAL; 1836 1831 } 1837 1832 ··· 1841 1836 offset = reloc->gpu_offset + tmp; 1842 1837 1843 1838 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1844 - dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 1845 - tmp + size, radeon_bo_size(reloc->robj)); 1839 + dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 1840 + tmp + size, radeon_bo_size(reloc->robj)); 1846 1841 return -EINVAL; 1847 1842 } 1848 1843 ··· 1853 1848 } 1854 1849 case PACKET3_SURFACE_SYNC: 1855 1850 if (pkt->count != 3) { 1856 - DRM_ERROR("bad SURFACE_SYNC\n"); 1851 + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); 1857 1852 return -EINVAL; 1858 1853 } 1859 1854 /* 0xffffffff/0x0 is flush all cache flag */ ··· 1861 1856 radeon_get_ib_value(p, idx + 2) != 0) { 1862 1857 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1863 1858 if (r) { 1864 - DRM_ERROR("bad SURFACE_SYNC\n"); 1859 + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); 1865 1860 return -EINVAL; 1866 1861 } 1867 1862 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1869 1864 break; 1870 1865 case PACKET3_EVENT_WRITE: 1871 1866 if (pkt->count != 2 && pkt->count != 0) { 1872 - DRM_ERROR("bad EVENT_WRITE\n"); 1867 + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 1873 1868 return -EINVAL; 1874 1869 } 1875 1870 if (pkt->count) { ··· 1877 1872 1878 1873 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1879 1874 if (r) { 1880 - DRM_ERROR("bad EVENT_WRITE\n"); 1875 + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 1881 1876 return -EINVAL; 1882 1877 } 1883 1878 offset = reloc->gpu_offset + ··· 1893 1888 uint64_t offset; 1894 1889 1895 1890 if (pkt->count != 4) { 1896 - DRM_ERROR("bad EVENT_WRITE_EOP\n"); 1891 + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); 1897 1892 return -EINVAL; 1898 1893 } 1899 1894 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1900 1895 if (r) { 1901 - DRM_ERROR("bad EVENT_WRITE\n"); 1896 + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); 1902 1897 return -EINVAL; 1903 1898 } 1904 1899 ··· 1916 1911 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 1917 1912 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 1918 1913 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 1919 - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 1914 + dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n"); 1920 1915 return -EINVAL; 1921 1916 } 1922 1917 for (i = 0; i < pkt->count; i++) { ··· 1932 1927 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 1933 1928 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 1934 1929 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 1935 - DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 1930 + dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n"); 1936 1931 return -EINVAL; 1937 1932 } 1938 1933 for (i = 0; i < pkt->count; i++) { ··· 1944 1939 break; 1945 1940 case PACKET3_SET_RESOURCE: 1946 1941 if (pkt->count % 7) { 1947 - DRM_ERROR("bad SET_RESOURCE\n"); 1942 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1948 1943 return -EINVAL; 1949 1944 } 1950 1945 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; ··· 1952 1947 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 1953 1948 (start_reg >= PACKET3_SET_RESOURCE_END) || 1954 1949 (end_reg >= PACKET3_SET_RESOURCE_END)) { 1955 - DRM_ERROR("bad SET_RESOURCE\n"); 1950 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1956 1951 return -EINVAL; 1957 1952 } 1958 1953 for (i = 0; i < (pkt->count / 7); i++) { ··· 1964 1959 /* tex base */ 1965 1960 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1966 1961 if (r) { 1967 - DRM_ERROR("bad SET_RESOURCE\n"); 1962 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1968 1963 return -EINVAL; 1969 1964 } 1970 1965 base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1978 1973 /* tex mip base */ 1979 1974 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 1980 1975 if (r) { 1981 - DRM_ERROR("bad SET_RESOURCE\n"); 1976 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 1982 1977 return -EINVAL; 1983 1978 } 1984 1979 mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 1999 1994 /* vtx base */ 2000 1995 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2001 1996 if (r) { 2002 - DRM_ERROR("bad SET_RESOURCE\n"); 1997 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2003 1998 return -EINVAL; 2004 1999 } 2005 2000 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 2006 2001 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 2007 2002 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2008 2003 /* force size to size of the buffer */ 2009 - dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2010 - size + offset, radeon_bo_size(reloc->robj)); 2004 + dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2005 + size + offset, radeon_bo_size(reloc->robj)); 2011 2006 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2012 2007 } 2013 2008 ··· 2020 2015 case SQ_TEX_VTX_INVALID_TEXTURE: 2021 2016 case SQ_TEX_VTX_INVALID_BUFFER: 2022 2017 default: 2023 - DRM_ERROR("bad SET_RESOURCE\n"); 2018 + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); 2024 2019 return -EINVAL; 2025 2020 } 2026 2021 } ··· 2032 2027 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 2033 2028 (start_reg >= PACKET3_SET_ALU_CONST_END) || 2034 2029 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 2035 - DRM_ERROR("bad SET_ALU_CONST\n"); 2030 + dev_warn_once(p->dev, "bad SET_ALU_CONST\n"); 2036 2031 return -EINVAL; 2037 2032 } 2038 2033 } ··· 2043 2038 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 2044 2039 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2045 2040 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2046 - DRM_ERROR("bad SET_BOOL_CONST\n"); 2041 + dev_warn_once(p->dev, "bad SET_BOOL_CONST\n"); 2047 2042 return -EINVAL; 2048 2043 } 2049 2044 break; ··· 2053 2048 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 2054 2049 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2055 2050 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2056 - DRM_ERROR("bad SET_LOOP_CONST\n"); 2051 + dev_warn_once(p->dev, "bad SET_LOOP_CONST\n"); 2057 2052 return -EINVAL; 2058 2053 } 2059 2054 break; ··· 2063 2058 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 2064 2059 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2065 2060 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2066 - DRM_ERROR("bad SET_CTL_CONST\n"); 2061 + dev_warn_once(p->dev, "bad SET_CTL_CONST\n"); 2067 2062 return -EINVAL; 2068 2063 } 2069 2064 break; 2070 2065 case PACKET3_SET_SAMPLER: 2071 2066 if (pkt->count % 3) { 2072 - DRM_ERROR("bad SET_SAMPLER\n"); 2067 + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); 2073 2068 return -EINVAL; 2074 2069 } 2075 2070 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; ··· 2077 2072 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 2078 2073 (start_reg >= PACKET3_SET_SAMPLER_END) || 2079 2074 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2080 - DRM_ERROR("bad SET_SAMPLER\n"); 2075 + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); 2081 2076 return -EINVAL; 2082 2077 } 2083 2078 break; 2084 2079 case PACKET3_STRMOUT_BASE_UPDATE: 2085 2080 /* RS780 and RS880 also need this */ 2086 2081 if (p->family < CHIP_RS780) { 2087 - DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2082 + dev_warn_once(p->dev, "STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2088 2083 return -EINVAL; 2089 2084 } 2090 2085 if (pkt->count != 1) { 2091 - DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); 2086 + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE packet count\n"); 2092 2087 return -EINVAL; 2093 2088 } 2094 2089 if (idx_value > 3) { 2095 - DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); 2090 + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE index\n"); 2096 2091 return -EINVAL; 2097 2092 } 2098 2093 { ··· 2100 2095 2101 2096 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2102 2097 if (r) { 2103 - DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); 2098 + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE reloc\n"); 2104 2099 return -EINVAL; 2105 2100 } 2106 2101 2107 2102 if (reloc->robj != track->vgt_strmout_bo[idx_value]) { 2108 - DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2103 + dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2109 2104 return -EINVAL; 2110 2105 } 2111 2106 2112 2107 offset = (u64)radeon_get_ib_value(p, idx+1) << 8; 2113 2108 if (offset != track->vgt_strmout_bo_offset[idx_value]) { 2114 - DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", 2115 - offset, track->vgt_strmout_bo_offset[idx_value]); 2109 + dev_warn_once(p->dev, 2110 + "bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", 2111 + offset, track->vgt_strmout_bo_offset[idx_value]); 2116 2112 return -EINVAL; 2117 2113 } 2118 2114 2119 2115 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2120 - DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", 2121 - offset + 4, radeon_bo_size(reloc->robj)); 2116 + dev_warn_once(p->dev, 2117 + "bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", 2118 + offset + 4, radeon_bo_size(reloc->robj)); 2122 2119 return -EINVAL; 2123 2120 } 2124 2121 ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ··· 2128 2121 break; 2129 2122 case PACKET3_SURFACE_BASE_UPDATE: 2130 2123 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2131 - DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2124 + dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n"); 2132 2125 return -EINVAL; 2133 2126 } 2134 2127 if (pkt->count) { 2135 - DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2128 + dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n"); 2136 2129 return -EINVAL; 2137 2130 } 2138 2131 break; 2139 2132 case PACKET3_STRMOUT_BUFFER_UPDATE: 2140 2133 if (pkt->count != 4) { 2141 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2134 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2142 2135 return -EINVAL; 2143 2136 } 2144 2137 /* Updating memory at DST_ADDRESS. */ ··· 2146 2139 u64 offset; 2147 2140 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2148 2141 if (r) { 2149 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2142 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2150 2143 return -EINVAL; 2151 2144 } 2152 2145 offset = radeon_get_ib_value(p, idx+1); 2153 2146 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2154 2147 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2155 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", 2156 - offset + 4, radeon_bo_size(reloc->robj)); 2148 + dev_warn_once(p->dev, 2149 + "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", 2150 + offset + 4, radeon_bo_size(reloc->robj)); 2157 2151 return -EINVAL; 2158 2152 } 2159 2153 offset += reloc->gpu_offset; ··· 2166 2158 u64 offset; 2167 2159 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2168 2160 if (r) { 2169 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2161 + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2170 2162 return -EINVAL; 2171 2163 } 2172 2164 offset = radeon_get_ib_value(p, idx+3); 2173 2165 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2174 2166 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2175 - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", 2176 - offset + 4, radeon_bo_size(reloc->robj)); 2167 + dev_warn_once(p->dev, 2168 + "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", 2169 + offset + 4, radeon_bo_size(reloc->robj)); 2177 2170 return -EINVAL; 2178 2171 } 2179 2172 offset += reloc->gpu_offset; ··· 2187 2178 u64 offset; 2188 2179 2189 2180 if (pkt->count != 3) { 2190 - DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2181 + dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n"); 2191 2182 return -EINVAL; 2192 2183 } 2193 2184 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2194 2185 if (r) { 2195 - DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2186 + dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n"); 2196 2187 return -EINVAL; 2197 2188 } 2198 2189 offset = radeon_get_ib_value(p, idx+0); 2199 2190 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2200 2191 if (offset & 0x7) { 2201 - DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); 2192 + dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n"); 2202 2193 return -EINVAL; 2203 2194 } 2204 2195 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2205 - DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", 2206 - offset + 8, radeon_bo_size(reloc->robj)); 2196 + dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", 2197 + offset + 8, radeon_bo_size(reloc->robj)); 2207 2198 return -EINVAL; 2208 2199 } 2209 2200 offset += reloc->gpu_offset; ··· 2213 2204 } 2214 2205 case PACKET3_COPY_DW: 2215 2206 if (pkt->count != 4) { 2216 - DRM_ERROR("bad COPY_DW (invalid count)\n"); 2207 + dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n"); 2217 2208 return -EINVAL; 2218 2209 } 2219 2210 if (idx_value & 0x1) { ··· 2221 2212 /* SRC is memory. */ 2222 2213 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2223 2214 if (r) { 2224 - DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2215 + dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n"); 2225 2216 return -EINVAL; 2226 2217 } 2227 2218 offset = radeon_get_ib_value(p, idx+1); 2228 2219 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2229 2220 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2230 - DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", 2231 - offset + 4, radeon_bo_size(reloc->robj)); 2221 + dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", 2222 + offset + 4, radeon_bo_size(reloc->robj)); 2232 2223 return -EINVAL; 2233 2224 } 2234 2225 offset += reloc->gpu_offset; ··· 2245 2236 /* DST is memory. */ 2246 2237 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); 2247 2238 if (r) { 2248 - DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2239 + dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n"); 2249 2240 return -EINVAL; 2250 2241 } 2251 2242 offset = radeon_get_ib_value(p, idx+3); 2252 2243 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2253 2244 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2254 - DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", 2255 - offset + 4, radeon_bo_size(reloc->robj)); 2245 + dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", 2246 + offset + 4, radeon_bo_size(reloc->robj)); 2256 2247 return -EINVAL; 2257 2248 } 2258 2249 offset += reloc->gpu_offset; ··· 2268 2259 case PACKET3_NOP: 2269 2260 break; 2270 2261 default: 2271 - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2262 + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); 2272 2263 return -EINVAL; 2273 2264 } 2274 2265 return 0; ··· 2315 2306 r = r600_packet3_check(p, &pkt); 2316 2307 break; 2317 2308 default: 2318 - DRM_ERROR("Unknown packet type %d !\n", pkt.type); 2309 + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); 2319 2310 kfree(p->track); 2320 2311 p->track = NULL; 2321 2312 return -EINVAL; ··· 2355 2346 2356 2347 *cs_reloc = NULL; 2357 2348 if (p->chunk_relocs == NULL) { 2358 - DRM_ERROR("No relocation chunk !\n"); 2349 + dev_warn_once(p->dev, "No relocation chunk !\n"); 2359 2350 return -EINVAL; 2360 2351 } 2361 2352 idx = p->dma_reloc_idx; 2362 2353 if (idx >= p->nrelocs) { 2363 - DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2364 - idx, p->nrelocs); 2354 + dev_warn_once(p->dev, "Relocs at %d after relocations chunk end %d !\n", 2355 + idx, p->nrelocs); 2365 2356 return -EINVAL; 2366 2357 } 2367 2358 *cs_reloc = &p->relocs[idx]; ··· 2394 2385 2395 2386 do { 2396 2387 if (p->idx >= ib_chunk->length_dw) { 2397 - DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 2398 - p->idx, ib_chunk->length_dw); 2388 + dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n", 2389 + p->idx, ib_chunk->length_dw); 2399 2390 return -EINVAL; 2400 2391 } 2401 2392 idx = p->idx; ··· 2408 2399 case DMA_PACKET_WRITE: 2409 2400 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2410 2401 if (r) { 2411 - DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2402 + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); 2412 2403 return -EINVAL; 2413 2404 } 2414 2405 if (tiled) { ··· 2426 2417 p->idx += count + 3; 2427 2418 } 2428 2419 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2429 - dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", 2430 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2420 + dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n", 2421 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2431 2422 return -EINVAL; 2432 2423 } 2433 2424 break; 2434 2425 case DMA_PACKET_COPY: 2435 2426 r = r600_dma_cs_next_reloc(p, &src_reloc); 2436 2427 if (r) { 2437 - DRM_ERROR("bad DMA_PACKET_COPY\n"); 2428 + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); 2438 2429 return -EINVAL; 2439 2430 } 2440 2431 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2441 2432 if (r) { 2442 - DRM_ERROR("bad DMA_PACKET_COPY\n"); 2433 + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); 2443 2434 return -EINVAL; 2444 2435 } 2445 2436 if (tiled) { ··· 2493 2484 } 2494 2485 } 2495 2486 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2496 - dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", 2497 - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2487 + dev_warn_once(p->dev, "DMA copy src buffer too small (%llu %lu)\n", 2488 + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2498 2489 return -EINVAL; 2499 2490 } 2500 2491 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2501 - dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n", 2502 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2492 + dev_warn_once(p->dev, "DMA write dst buffer too small (%llu %lu)\n", 2493 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2503 2494 return -EINVAL; 2504 2495 } 2505 2496 break; 2506 2497 case DMA_PACKET_CONSTANT_FILL: 2507 2498 if (p->family < CHIP_RV770) { 2508 - DRM_ERROR("Constant Fill is 7xx only !\n"); 2499 + dev_warn_once(p->dev, "Constant Fill is 7xx only !\n"); 2509 2500 return -EINVAL; 2510 2501 } 2511 2502 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2512 2503 if (r) { 2513 - DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2504 + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); 2514 2505 return -EINVAL; 2515 2506 } 2516 2507 dst_offset = radeon_get_ib_value(p, idx+1); 2517 2508 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 2518 2509 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2519 - dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 2520 - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2510 + dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 2511 + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2521 2512 return -EINVAL; 2522 2513 } 2523 2514 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); ··· 2528 2519 p->idx += 1; 2529 2520 break; 2530 2521 default: 2531 - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2522 + dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx); 2532 2523 return -EINVAL; 2533 2524 } 2534 2525 } while (p->idx < p->chunk_ib->length_dw);
+1 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 834 834 ib = p->ib.ptr; 835 835 idx = pkt->idx; 836 836 for (i = 0; i <= (pkt->count + 1); i++, idx++) 837 - DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 837 + dev_dbg(p->dev, "ib[%d]=0x%08X\n", idx, ib[idx]); 838 838 } 839 839 840 840 /**