drm: fix a LOR issue on FreeBSD for savage driver

Correct a LOR issue on FreeBSD by allocating temporary space and doing a single
DRM_COPY_FROM_USER rather than DRM_VERIFYAREA_READ followed by tons of
DRM_COPY_FROM_USER_UNCHECKED. I don't like the look of the temporary space
allocation, but I like the simplification in the rest of the file. Tested
with glxgears, tuxracer, and q3 on a savage4.

From: Eric Anholt <anholt@freebsd.org>
Signed-off-by: Dave Airlie <airlied@linux.ie>

authored by Dave Airlie and committed by Dave Airlie 3528af1b 952d751a

+171 -176
+7 -16
drivers/char/drm/savage_drv.h
··· 1 - /* savage_drv.h -- Private header for the savage driver 2 - * 3 * Copyright 2004 Felix Kuehling 4 * All Rights Reserved. 5 * ··· 192 /* Err, there is a macro wait_event in include/linux/wait.h. 193 * Avoid unwanted macro expansion. */ 194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv, 195 - drm_clip_rect_t * pbox); 196 void (*dma_flush) (struct drm_savage_private * dev_priv); 197 } drm_savage_private_t; 198 ··· 217 218 /* state functions */ 219 extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 220 - drm_clip_rect_t * pbox); 221 extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 222 - drm_clip_rect_t * pbox); 223 224 #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ 225 #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ ··· 502 503 #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) 504 505 - #define BCI_COPY_FROM_USER(src,n) do { \ 506 - unsigned int i; \ 507 - for (i = 0; i < n; ++i) { \ 508 - uint32_t val; \ 509 - DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \ 510 - BCI_WRITE(val); \ 511 - } \ 512 - } while(0) 513 - 514 /* 515 * command DMA support 516 */ ··· 527 528 #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) 529 530 - #define DMA_COPY_FROM_USER(src,n) do { \ 531 - DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \ 532 dma_ptr += n; \ 533 } while(0) 534
··· 1 + /* savage_drv.h -- Private header for the savage driver */ 2 + /* 3 * Copyright 2004 Felix Kuehling 4 * All Rights Reserved. 5 * ··· 192 /* Err, there is a macro wait_event in include/linux/wait.h. 193 * Avoid unwanted macro expansion. */ 194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv, 195 + const drm_clip_rect_t * pbox); 196 void (*dma_flush) (struct drm_savage_private * dev_priv); 197 } drm_savage_private_t; 198 ··· 217 218 /* state functions */ 219 extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 220 + const drm_clip_rect_t * pbox); 221 extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 222 + const drm_clip_rect_t * pbox); 223 224 #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ 225 #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ ··· 502 503 #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) 504 505 /* 506 * command DMA support 507 */ ··· 536 537 #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) 538 539 + #define DMA_COPY(src, n) do { \ 540 + memcpy(dma_ptr, (src), (n)*4); \ 541 dma_ptr += n; \ 542 } while(0) 543
+164 -160
drivers/char/drm/savage_state.c
··· 27 #include "savage_drv.h" 28 29 void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 30 - drm_clip_rect_t * pbox) 31 { 32 uint32_t scstart = dev_priv->state.s3d.new_scstart; 33 uint32_t scend = dev_priv->state.s3d.new_scend; ··· 53 } 54 55 void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 56 - drm_clip_rect_t * pbox) 57 { 58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; 59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; ··· 115 116 #define SAVE_STATE(reg,where) \ 117 if(start <= reg && start+count > reg) \ 118 - DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start]) 119 #define SAVE_STATE_MASK(reg,where,mask) do { \ 120 if(start <= reg && start+count > reg) { \ 121 uint32_t tmp; \ 122 - DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \ 123 dev_priv->state.where = (tmp & (mask)) | \ 124 (dev_priv->state.where & ~(mask)); \ 125 } \ 126 } while (0) 127 static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, 128 unsigned int start, unsigned int count, 129 - const uint32_t __user * regs) 130 { 131 if (start < SAVAGE_TEXPALADDR_S3D || 132 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { ··· 149 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); 150 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) 151 return savage_verify_texaddr(dev_priv, 0, 152 - dev_priv->state.s3d. 153 - texaddr); 154 } 155 156 return 0; ··· 157 158 static int savage_verify_state_s4(drm_savage_private_t * dev_priv, 159 unsigned int start, unsigned int count, 160 - const uint32_t __user * regs) 161 { 162 int ret = 0; 163 ··· 174 ~SAVAGE_SCISSOR_MASK_S4); 175 176 /* if any texture regs were changed ... */ 177 - if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) { 178 /* ... check texture state */ 179 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); 180 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); 181 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); 182 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) 183 - ret |= 184 - savage_verify_texaddr(dev_priv, 0, 185 - dev_priv->state.s4.texaddr0); 186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) 187 - ret |= 188 - savage_verify_texaddr(dev_priv, 1, 189 - dev_priv->state.s4.texaddr1); 190 } 191 192 return ret; ··· 196 197 static int savage_dispatch_state(drm_savage_private_t * dev_priv, 198 const drm_savage_cmd_header_t * cmd_header, 199 - const uint32_t __user * regs) 200 { 201 unsigned int count = cmd_header->state.count; 202 unsigned int start = cmd_header->state.start; ··· 207 208 if (!count) 209 return 0; 210 - 211 - if (DRM_VERIFYAREA_READ(regs, count * 4)) 212 - return DRM_ERR(EFAULT); 213 214 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 215 ret = savage_verify_state_s3d(dev_priv, start, count, regs); ··· 232 /* scissor regs are emitted in savage_dispatch_draw */ 233 if (start < SAVAGE_DRAWCTRL0_S4) { 234 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) 235 - count2 = 236 - count - (SAVAGE_DRAWCTRL1_S4 + 1 - start); 237 if (start + count > SAVAGE_DRAWCTRL0_S4) 238 count = SAVAGE_DRAWCTRL0_S4 - start; 239 } else if (start <= SAVAGE_DRAWCTRL1_S4) { ··· 259 while (count > 0) { 260 unsigned int n = count < 255 ? count : 255; 261 DMA_SET_REGISTERS(start, n); 262 - DMA_COPY_FROM_USER(regs, n); 263 count -= n; 264 start += n; 265 regs += n; ··· 417 418 static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, 419 const drm_savage_cmd_header_t * cmd_header, 420 - const uint32_t __user * vtxbuf, 421 - unsigned int vb_size, unsigned int vb_stride) 422 { 423 unsigned char reorder = 0; 424 unsigned int prim = cmd_header->prim.prim; ··· 503 504 for (i = start; i < start + count; ++i) { 505 unsigned int j = i + reorder[i % 3]; 506 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 507 - vtx_size); 508 } 509 510 DMA_COMMIT(); ··· 512 DMA_DRAW_PRIMITIVE(count, prim, skip); 513 514 if (vb_stride == vtx_size) { 515 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start], 516 - vtx_size * count); 517 } else { 518 for (i = start; i < start + count; ++i) { 519 - DMA_COPY_FROM_USER(&vtxbuf 520 - [vb_stride * i], 521 - vtx_size); 522 } 523 } 524 ··· 535 536 static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, 537 const drm_savage_cmd_header_t * cmd_header, 538 - const uint16_t __user * usr_idx, 539 const drm_buf_t * dmabuf) 540 { 541 unsigned char reorder = 0; ··· 622 while (n != 0) { 623 /* Can emit up to 255 indices (85 triangles) at once. */ 624 unsigned int count = n > 255 ? 255 : n; 625 - /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ 626 - uint16_t idx[255]; 627 628 - /* Copy and check indices */ 629 - DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2); 630 for (i = 0; i < count; ++i) { 631 if (idx[i] > dmabuf->total / 32) { 632 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 643 644 for (i = 1; i + 1 < count; i += 2) 645 BCI_WRITE(idx[i + reorder[i % 3]] | 646 - (idx[i + 1 + reorder[(i + 1) % 3]] << 647 - 16)); 648 if (i < count) 649 BCI_WRITE(idx[i + reorder[i % 3]]); 650 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ··· 665 BCI_WRITE(idx[i]); 666 } 667 668 - usr_idx += count; 669 n -= count; 670 671 prim |= BCI_CMD_DRAW_CONT; ··· 676 677 static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, 678 const drm_savage_cmd_header_t * cmd_header, 679 - const uint16_t __user * usr_idx, 680 - const uint32_t __user * vtxbuf, 681 unsigned int vb_size, unsigned int vb_stride) 682 { 683 unsigned char reorder = 0; ··· 742 while (n != 0) { 743 /* Can emit up to 255 vertices (85 triangles) at once. */ 744 unsigned int count = n > 255 ? 255 : n; 745 - /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ 746 - uint16_t idx[255]; 747 - 748 - /* Copy and check indices */ 749 - DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2); 750 for (i = 0; i < count; ++i) { 751 if (idx[i] > vb_size / (vb_stride * 4)) { 752 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 763 764 for (i = 0; i < count; ++i) { 765 unsigned int j = idx[i + reorder[i % 3]]; 766 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 767 - vtx_size); 768 } 769 770 DMA_COMMIT(); ··· 773 774 for (i = 0; i < count; ++i) { 775 unsigned int j = idx[i]; 776 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 777 - vtx_size); 778 } 779 780 DMA_COMMIT(); 781 } 782 783 - usr_idx += count; 784 n -= count; 785 786 prim |= BCI_CMD_DRAW_CONT; ··· 790 791 static int savage_dispatch_clear(drm_savage_private_t * dev_priv, 792 const drm_savage_cmd_header_t * cmd_header, 793 - const drm_savage_cmd_header_t __user * data, 794 unsigned int nbox, 795 - const drm_clip_rect_t __user * usr_boxes) 796 { 797 - unsigned int flags = cmd_header->clear0.flags, mask, value; 798 unsigned int clear_cmd; 799 unsigned int i, nbufs; 800 DMA_LOCALS; 801 802 if (nbox == 0) 803 return 0; 804 - 805 - DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask); 806 - DRM_GET_USER_UNCHECKED(value, &data->clear1.value); 807 808 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 809 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; ··· 811 if (nbufs == 0) 812 return 0; 813 814 - if (mask != 0xffffffff) { 815 /* set mask */ 816 BEGIN_DMA(2); 817 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); 818 - DMA_WRITE(mask); 819 DMA_COMMIT(); 820 } 821 for (i = 0; i < nbox; ++i) { 822 - drm_clip_rect_t box; 823 unsigned int x, y, w, h; 824 unsigned int buf; 825 - DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 826 - x = box.x1, y = box.y1; 827 - w = box.x2 - box.x1; 828 - h = box.y2 - box.y1; 829 BEGIN_DMA(nbufs * 6); 830 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { 831 if (!(flags & buf)) ··· 843 DMA_WRITE(dev_priv->depth_bd); 844 break; 845 } 846 - DMA_WRITE(value); 847 DMA_WRITE(BCI_X_Y(x, y)); 848 DMA_WRITE(BCI_W_H(w, h)); 849 } 850 DMA_COMMIT(); 851 } 852 - if (mask != 0xffffffff) { 853 /* reset mask */ 854 BEGIN_DMA(2); 855 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ··· 861 } 862 863 static int savage_dispatch_swap(drm_savage_private_t * dev_priv, 864 - unsigned int nbox, 865 - const drm_clip_rect_t __user * usr_boxes) 866 { 867 unsigned int swap_cmd; 868 unsigned int i; ··· 875 BCI_CMD_SET_ROP(swap_cmd, 0xCC); 876 877 for (i = 0; i < nbox; ++i) { 878 - drm_clip_rect_t box; 879 - DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 880 - 881 BEGIN_DMA(6); 882 DMA_WRITE(swap_cmd); 883 DMA_WRITE(dev_priv->back_offset); 884 DMA_WRITE(dev_priv->back_bd); 885 - DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 886 - DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 887 - DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1)); 888 DMA_COMMIT(); 889 } 890 ··· 890 } 891 892 static int savage_dispatch_draw(drm_savage_private_t * dev_priv, 893 - const drm_savage_cmd_header_t __user * start, 894 - const drm_savage_cmd_header_t __user * end, 895 const drm_buf_t * dmabuf, 896 - const unsigned int __user * usr_vtxbuf, 897 unsigned int vb_size, unsigned int vb_stride, 898 unsigned int nbox, 899 - const drm_clip_rect_t __user * usr_boxes) 900 { 901 unsigned int i, j; 902 int ret; 903 904 for (i = 0; i < nbox; ++i) { 905 - drm_clip_rect_t box; 906 - const drm_savage_cmd_header_t __user *usr_cmdbuf; 907 - DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 908 - dev_priv->emit_clip_rect(dev_priv, &box); 909 910 - usr_cmdbuf = start; 911 - while (usr_cmdbuf < end) { 912 drm_savage_cmd_header_t cmd_header; 913 - DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, 914 - sizeof(cmd_header)); 915 - usr_cmdbuf++; 916 switch (cmd_header.cmd.cmd) { 917 case SAVAGE_CMD_DMA_PRIM: 918 - ret = 919 - savage_dispatch_dma_prim(dev_priv, 920 - &cmd_header, 921 - dmabuf); 922 break; 923 case SAVAGE_CMD_VB_PRIM: 924 - ret = 925 - savage_dispatch_vb_prim(dev_priv, 926 - &cmd_header, 927 - (const uint32_t 928 - __user *) 929 - usr_vtxbuf, vb_size, 930 - vb_stride); 931 break; 932 case SAVAGE_CMD_DMA_IDX: 933 j = (cmd_header.idx.count + 3) / 4; 934 /* j was check in savage_bci_cmdbuf */ 935 - ret = 936 - savage_dispatch_dma_idx(dev_priv, 937 - &cmd_header, 938 - (const uint16_t 939 - __user *) 940 - usr_cmdbuf, dmabuf); 941 - usr_cmdbuf += j; 942 break; 943 case SAVAGE_CMD_VB_IDX: 944 j = (cmd_header.idx.count + 3) / 4; 945 /* j was check in savage_bci_cmdbuf */ 946 - ret = 947 - savage_dispatch_vb_idx(dev_priv, 948 - &cmd_header, 949 - (const uint16_t 950 - __user *)usr_cmdbuf, 951 - (const uint32_t 952 - __user *)usr_vtxbuf, 953 - vb_size, vb_stride); 954 - usr_cmdbuf += j; 955 break; 956 default: 957 /* What's the best return code? EFAULT? */ ··· 960 drm_device_dma_t *dma = dev->dma; 961 drm_buf_t *dmabuf; 962 drm_savage_cmdbuf_t cmdbuf; 963 - drm_savage_cmd_header_t __user *usr_cmdbuf; 964 - drm_savage_cmd_header_t __user *first_draw_cmd; 965 - unsigned int __user *usr_vtxbuf; 966 - drm_clip_rect_t __user *usr_boxes; 967 unsigned int i, j; 968 int ret = 0; 969 ··· 986 dmabuf = NULL; 987 } 988 989 - usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr; 990 - usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr; 991 - usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr; 992 - if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) || 993 - (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size)) 994 - || (cmdbuf.nbox 995 - && DRM_VERIFYAREA_READ(usr_boxes, 996 - cmdbuf.nbox * sizeof(drm_clip_rect_t)))) 997 - return DRM_ERR(EFAULT); 998 999 /* Make sure writes to DMA buffers are finished before sending 1000 * DMA commands to the graphics hardware. */ ··· 1046 first_draw_cmd = NULL; 1047 while (i < cmdbuf.size) { 1048 drm_savage_cmd_header_t cmd_header; 1049 - DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, 1050 - sizeof(cmd_header)); 1051 - usr_cmdbuf++; 1052 i++; 1053 1054 /* Group drawing commands with same state to minimize ··· 1067 case SAVAGE_CMD_DMA_PRIM: 1068 case SAVAGE_CMD_VB_PRIM: 1069 if (!first_draw_cmd) 1070 - first_draw_cmd = usr_cmdbuf - 1; 1071 - usr_cmdbuf += j; 1072 i += j; 1073 break; 1074 default: 1075 if (first_draw_cmd) { 1076 - ret = 1077 - savage_dispatch_draw(dev_priv, 1078 - first_draw_cmd, 1079 - usr_cmdbuf - 1, dmabuf, 1080 - usr_vtxbuf, 1081 - cmdbuf.vb_size, 1082 - cmdbuf.vb_stride, 1083 - cmdbuf.nbox, 1084 - usr_boxes); 1085 if (ret != 0) 1086 return ret; 1087 first_draw_cmd = NULL; ··· 1094 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1095 "beyond end of command buffer\n"); 1096 DMA_FLUSH(); 1097 - return DRM_ERR(EINVAL); 1098 } 1099 ret = savage_dispatch_state(dev_priv, &cmd_header, 1100 - (uint32_t __user *) 1101 - usr_cmdbuf); 1102 - usr_cmdbuf += j; 1103 i += j; 1104 break; 1105 case SAVAGE_CMD_CLEAR: ··· 1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1108 "beyond end of command buffer\n"); 1109 DMA_FLUSH(); 1110 - return DRM_ERR(EINVAL); 1111 } 1112 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1113 - usr_cmdbuf, 1114 - cmdbuf.nbox, usr_boxes); 1115 - usr_cmdbuf++; 1116 i++; 1117 break; 1118 case SAVAGE_CMD_SWAP: 1119 - ret = savage_dispatch_swap(dev_priv, 1120 - cmdbuf.nbox, usr_boxes); 1121 break; 1122 default: 1123 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1124 DMA_FLUSH(); 1125 - return DRM_ERR(EINVAL); 1126 } 1127 1128 if (ret != 0) { 1129 DMA_FLUSH(); 1130 - return ret; 1131 } 1132 } 1133 1134 if (first_draw_cmd) { 1135 - ret = 1136 - savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf, 1137 - dmabuf, usr_vtxbuf, cmdbuf.vb_size, 1138 - cmdbuf.vb_stride, cmdbuf.nbox, 1139 - usr_boxes); 1140 if (ret != 0) { 1141 DMA_FLUSH(); 1142 - return ret; 1143 } 1144 } 1145 ··· 1154 savage_freelist_put(dev, dmabuf); 1155 } 1156 1157 - return 0; 1158 }
··· 27 #include "savage_drv.h" 28 29 void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 30 + const drm_clip_rect_t * pbox) 31 { 32 uint32_t scstart = dev_priv->state.s3d.new_scstart; 33 uint32_t scend = dev_priv->state.s3d.new_scend; ··· 53 } 54 55 void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 56 + const drm_clip_rect_t * pbox) 57 { 58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; 59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; ··· 115 116 #define SAVE_STATE(reg,where) \ 117 if(start <= reg && start+count > reg) \ 118 + dev_priv->state.where = regs[reg - start] 119 #define SAVE_STATE_MASK(reg,where,mask) do { \ 120 if(start <= reg && start+count > reg) { \ 121 uint32_t tmp; \ 122 + tmp = regs[reg - start]; \ 123 dev_priv->state.where = (tmp & (mask)) | \ 124 (dev_priv->state.where & ~(mask)); \ 125 } \ 126 } while (0) 127 + 128 static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, 129 unsigned int start, unsigned int count, 130 + const uint32_t *regs) 131 { 132 if (start < SAVAGE_TEXPALADDR_S3D || 133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { ··· 148 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); 149 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) 150 return savage_verify_texaddr(dev_priv, 0, 151 + dev_priv->state.s3d.texaddr); 152 } 153 154 return 0; ··· 157 158 static int savage_verify_state_s4(drm_savage_private_t * dev_priv, 159 unsigned int start, unsigned int count, 160 + const uint32_t *regs) 161 { 162 int ret = 0; 163 ··· 174 ~SAVAGE_SCISSOR_MASK_S4); 175 176 /* if any texture regs were changed ... */ 177 + if (start <= SAVAGE_TEXDESCR_S4 && 178 + start + count > SAVAGE_TEXPALADDR_S4) { 179 /* ... check texture state */ 180 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); 181 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); 182 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); 183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) 184 + ret |= savage_verify_texaddr(dev_priv, 0, 185 + dev_priv->state.s4.texaddr0); 186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) 187 + ret |= savage_verify_texaddr(dev_priv, 1, 188 + dev_priv->state.s4.texaddr1); 189 } 190 191 return ret; ··· 197 198 static int savage_dispatch_state(drm_savage_private_t * dev_priv, 199 const drm_savage_cmd_header_t * cmd_header, 200 + const uint32_t *regs) 201 { 202 unsigned int count = cmd_header->state.count; 203 unsigned int start = cmd_header->state.start; ··· 208 209 if (!count) 210 return 0; 211 212 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 213 ret = savage_verify_state_s3d(dev_priv, start, count, regs); ··· 236 /* scissor regs are emitted in savage_dispatch_draw */ 237 if (start < SAVAGE_DRAWCTRL0_S4) { 238 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) 239 + count2 = count - 240 + (SAVAGE_DRAWCTRL1_S4 + 1 - start); 241 if (start + count > SAVAGE_DRAWCTRL0_S4) 242 count = SAVAGE_DRAWCTRL0_S4 - start; 243 } else if (start <= SAVAGE_DRAWCTRL1_S4) { ··· 263 while (count > 0) { 264 unsigned int n = count < 255 ? count : 255; 265 DMA_SET_REGISTERS(start, n); 266 + DMA_COPY(regs, n); 267 count -= n; 268 start += n; 269 regs += n; ··· 421 422 static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, 423 const drm_savage_cmd_header_t * cmd_header, 424 + const uint32_t *vtxbuf, unsigned int vb_size, 425 + unsigned int vb_stride) 426 { 427 unsigned char reorder = 0; 428 unsigned int prim = cmd_header->prim.prim; ··· 507 508 for (i = start; i < start + count; ++i) { 509 unsigned int j = i + reorder[i % 3]; 510 + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); 511 } 512 513 DMA_COMMIT(); ··· 517 DMA_DRAW_PRIMITIVE(count, prim, skip); 518 519 if (vb_stride == vtx_size) { 520 + DMA_COPY(&vtxbuf[vb_stride * start], 521 + vtx_size * count); 522 } else { 523 for (i = start; i < start + count; ++i) { 524 + DMA_COPY(&vtxbuf [vb_stride * i], 525 + vtx_size); 526 } 527 } 528 ··· 541 542 static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, 543 const drm_savage_cmd_header_t * cmd_header, 544 + const uint16_t *idx, 545 const drm_buf_t * dmabuf) 546 { 547 unsigned char reorder = 0; ··· 628 while (n != 0) { 629 /* Can emit up to 255 indices (85 triangles) at once. */ 630 unsigned int count = n > 255 ? 255 : n; 631 632 + /* check indices */ 633 for (i = 0; i < count; ++i) { 634 if (idx[i] > dmabuf->total / 32) { 635 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 652 653 for (i = 1; i + 1 < count; i += 2) 654 BCI_WRITE(idx[i + reorder[i % 3]] | 655 + (idx[i + 1 + 656 + reorder[(i + 1) % 3]] << 16)); 657 if (i < count) 658 BCI_WRITE(idx[i + reorder[i % 3]]); 659 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ··· 674 BCI_WRITE(idx[i]); 675 } 676 677 + idx += count; 678 n -= count; 679 680 prim |= BCI_CMD_DRAW_CONT; ··· 685 686 static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, 687 const drm_savage_cmd_header_t * cmd_header, 688 + const uint16_t *idx, 689 + const uint32_t *vtxbuf, 690 unsigned int vb_size, unsigned int vb_stride) 691 { 692 unsigned char reorder = 0; ··· 751 while (n != 0) { 752 /* Can emit up to 255 vertices (85 triangles) at once. */ 753 unsigned int count = n > 255 ? 255 : n; 754 + 755 + /* Check indices */ 756 for (i = 0; i < count; ++i) { 757 if (idx[i] > vb_size / (vb_stride * 4)) { 758 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 775 776 for (i = 0; i < count; ++i) { 777 unsigned int j = idx[i + reorder[i % 3]]; 778 + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); 779 } 780 781 DMA_COMMIT(); ··· 786 787 for (i = 0; i < count; ++i) { 788 unsigned int j = idx[i]; 789 + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); 790 } 791 792 DMA_COMMIT(); 793 } 794 795 + idx += count; 796 n -= count; 797 798 prim |= BCI_CMD_DRAW_CONT; ··· 804 805 static int savage_dispatch_clear(drm_savage_private_t * dev_priv, 806 const drm_savage_cmd_header_t * cmd_header, 807 + const drm_savage_cmd_header_t *data, 808 unsigned int nbox, 809 + const drm_clip_rect_t *boxes) 810 { 811 + unsigned int flags = cmd_header->clear0.flags; 812 unsigned int clear_cmd; 813 unsigned int i, nbufs; 814 DMA_LOCALS; 815 816 if (nbox == 0) 817 return 0; 818 819 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 820 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; ··· 828 if (nbufs == 0) 829 return 0; 830 831 + if (data->clear1.mask != 0xffffffff) { 832 /* set mask */ 833 BEGIN_DMA(2); 834 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); 835 + DMA_WRITE(data->clear1.mask); 836 DMA_COMMIT(); 837 } 838 for (i = 0; i < nbox; ++i) { 839 unsigned int x, y, w, h; 840 unsigned int buf; 841 + x = boxes[i].x1, y = boxes[i].y1; 842 + w = boxes[i].x2 - boxes[i].x1; 843 + h = boxes[i].y2 - boxes[i].y1; 844 BEGIN_DMA(nbufs * 6); 845 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { 846 if (!(flags & buf)) ··· 862 DMA_WRITE(dev_priv->depth_bd); 863 break; 864 } 865 + DMA_WRITE(data->clear1.value); 866 DMA_WRITE(BCI_X_Y(x, y)); 867 DMA_WRITE(BCI_W_H(w, h)); 868 } 869 DMA_COMMIT(); 870 } 871 + if (data->clear1.mask != 0xffffffff) { 872 /* reset mask */ 873 BEGIN_DMA(2); 874 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ··· 880 } 881 882 static int savage_dispatch_swap(drm_savage_private_t * dev_priv, 883 + unsigned int nbox, const drm_clip_rect_t *boxes) 884 { 885 unsigned int swap_cmd; 886 unsigned int i; ··· 895 BCI_CMD_SET_ROP(swap_cmd, 0xCC); 896 897 for (i = 0; i < nbox; ++i) { 898 BEGIN_DMA(6); 899 DMA_WRITE(swap_cmd); 900 DMA_WRITE(dev_priv->back_offset); 901 DMA_WRITE(dev_priv->back_bd); 902 + DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); 903 + DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); 904 + DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, 905 + boxes[i].y2 - boxes[i].y1)); 906 DMA_COMMIT(); 907 } 908 ··· 912 } 913 914 static int savage_dispatch_draw(drm_savage_private_t * dev_priv, 915 + const drm_savage_cmd_header_t *start, 916 + const drm_savage_cmd_header_t *end, 917 const drm_buf_t * dmabuf, 918 + const unsigned int *vtxbuf, 919 unsigned int vb_size, unsigned int vb_stride, 920 unsigned int nbox, 921 + const drm_clip_rect_t *boxes) 922 { 923 unsigned int i, j; 924 int ret; 925 926 for (i = 0; i < nbox; ++i) { 927 + const drm_savage_cmd_header_t *cmdbuf; 928 + dev_priv->emit_clip_rect(dev_priv, &boxes[i]); 929 930 + cmdbuf = start; 931 + while (cmdbuf < end) { 932 drm_savage_cmd_header_t cmd_header; 933 + cmd_header = *cmdbuf; 934 + cmdbuf++; 935 switch (cmd_header.cmd.cmd) { 936 case SAVAGE_CMD_DMA_PRIM: 937 + ret = savage_dispatch_dma_prim( 938 + dev_priv, &cmd_header, dmabuf); 939 break; 940 case SAVAGE_CMD_VB_PRIM: 941 + ret = savage_dispatch_vb_prim( 942 + dev_priv, &cmd_header, 943 + vtxbuf, vb_size, vb_stride); 944 break; 945 case SAVAGE_CMD_DMA_IDX: 946 j = (cmd_header.idx.count + 3) / 4; 947 /* j was check in savage_bci_cmdbuf */ 948 + ret = savage_dispatch_dma_idx(dev_priv, 949 + &cmd_header, (const uint16_t *)cmdbuf, 950 + dmabuf); 951 + cmdbuf += j; 952 break; 953 case SAVAGE_CMD_VB_IDX: 954 j = (cmd_header.idx.count + 3) / 4; 955 /* j was check in savage_bci_cmdbuf */ 956 + ret = savage_dispatch_vb_idx(dev_priv, 957 + &cmd_header, (const uint16_t *)cmdbuf, 958 + (const uint32_t *)vtxbuf, vb_size, 959 + vb_stride); 960 + cmdbuf += j; 961 break; 962 default: 963 /* What's the best return code? EFAULT? */ ··· 998 drm_device_dma_t *dma = dev->dma; 999 drm_buf_t *dmabuf; 1000 drm_savage_cmdbuf_t cmdbuf; 1001 + drm_savage_cmd_header_t *kcmd_addr = NULL; 1002 + drm_savage_cmd_header_t *first_draw_cmd; 1003 + unsigned int *kvb_addr = NULL; 1004 + drm_clip_rect_t *kbox_addr = NULL; 1005 unsigned int i, j; 1006 int ret = 0; 1007 ··· 1024 dmabuf = NULL; 1025 } 1026 1027 + /* Copy the user buffers into kernel temporary areas. This hasn't been 1028 + * a performance loss compared to VERIFYAREA_READ/ 1029 + * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct 1030 + * for locking on FreeBSD. 1031 + */ 1032 + if (cmdbuf.size) { 1033 + kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); 1034 + if (kcmd_addr == NULL) 1035 + return ENOMEM; 1036 + 1037 + if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, 1038 + cmdbuf.size * 8)) 1039 + { 1040 + drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1041 + return DRM_ERR(EFAULT); 1042 + } 1043 + cmdbuf.cmd_addr = kcmd_addr; 1044 + } 1045 + if (cmdbuf.vb_size) { 1046 + kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); 1047 + if (kvb_addr == NULL) { 1048 + ret = DRM_ERR(ENOMEM); 1049 + goto done; 1050 + } 1051 + 1052 + if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, 1053 + cmdbuf.vb_size)) { 1054 + ret = DRM_ERR(EFAULT); 1055 + goto done; 1056 + } 1057 + cmdbuf.vb_addr = kvb_addr; 1058 + } 1059 + if (cmdbuf.nbox) { 1060 + kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t), 1061 + DRM_MEM_DRIVER); 1062 + if (kbox_addr == NULL) { 1063 + ret = DRM_ERR(ENOMEM); 1064 + goto done; 1065 + } 1066 + 1067 + if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, 1068 + cmdbuf.nbox * sizeof(drm_clip_rect_t))) { 1069 + ret = DRM_ERR(EFAULT); 1070 + goto done; 1071 + } 1072 + cmdbuf.box_addr = kbox_addr; 1073 + } 1074 1075 /* Make sure writes to DMA buffers are finished before sending 1076 * DMA commands to the graphics hardware. */ ··· 1046 first_draw_cmd = NULL; 1047 while (i < cmdbuf.size) { 1048 drm_savage_cmd_header_t cmd_header; 1049 + cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; 1050 + cmdbuf.cmd_addr++; 1051 i++; 1052 1053 /* Group drawing commands with same state to minimize ··· 1068 case SAVAGE_CMD_DMA_PRIM: 1069 case SAVAGE_CMD_VB_PRIM: 1070 if (!first_draw_cmd) 1071 + first_draw_cmd = cmdbuf.cmd_addr - 1; 1072 + cmdbuf.cmd_addr += j; 1073 i += j; 1074 break; 1075 default: 1076 if (first_draw_cmd) { 1077 + ret = savage_dispatch_draw( 1078 + dev_priv, first_draw_cmd, 1079 + cmdbuf.cmd_addr - 1, 1080 + dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, 1081 + cmdbuf.vb_stride, 1082 + cmdbuf.nbox, cmdbuf.box_addr); 1083 if (ret != 0) 1084 return ret; 1085 first_draw_cmd = NULL; ··· 1098 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1099 "beyond end of command buffer\n"); 1100 DMA_FLUSH(); 1101 + ret = DRM_ERR(EINVAL); 1102 + goto done; 1103 } 1104 ret = savage_dispatch_state(dev_priv, &cmd_header, 1105 + (const uint32_t *)cmdbuf.cmd_addr); 1106 + cmdbuf.cmd_addr += j; 1107 i += j; 1108 break; 1109 case SAVAGE_CMD_CLEAR: ··· 1111 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1112 "beyond end of command buffer\n"); 1113 DMA_FLUSH(); 1114 + ret = DRM_ERR(EINVAL); 1115 + goto done; 1116 } 1117 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1118 + cmdbuf.cmd_addr, 1119 + cmdbuf.nbox, cmdbuf.box_addr); 1120 + cmdbuf.cmd_addr++; 1121 i++; 1122 break; 1123 case SAVAGE_CMD_SWAP: 1124 + ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, 1125 + cmdbuf.box_addr); 1126 break; 1127 default: 1128 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1129 DMA_FLUSH(); 1130 + ret = DRM_ERR(EINVAL); 1131 + goto done; 1132 } 1133 1134 if (ret != 0) { 1135 DMA_FLUSH(); 1136 + goto done; 1137 } 1138 } 1139 1140 if (first_draw_cmd) { 1141 + ret = savage_dispatch_draw ( 1142 + dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, 1143 + cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, 1144 + cmdbuf.nbox, cmdbuf.box_addr); 1145 if (ret != 0) { 1146 DMA_FLUSH(); 1147 + goto done; 1148 } 1149 } 1150 ··· 1157 savage_freelist_put(dev, dmabuf); 1158 } 1159 1160 + done: 1161 + /* If we didn't need to allocate them, these'll be NULL */ 1162 + drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1163 + drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); 1164 + drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t), 1165 + DRM_MEM_DRIVER); 1166 + 1167 + return ret; 1168 }