Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: check PS, WS index

Theoretically, it would be possible for a buggy or malicious VBIOS to
overwrite past the bounds of the passed parameters (or its own
workspace); add bounds checking to prevent this from happening.

Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3093
Signed-off-by: Alexander Richards <electrodeyt@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Alexander Richards and committed by
Alex Deucher
4630d503 a25dea47

+102 -74
+16 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
··· 1018 1018 if (clock_type == COMPUTE_ENGINE_PLL_PARAM) { 1019 1019 args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock); 1020 1020 1021 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1021 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1022 + sizeof(args)); 1022 1023 1023 1024 dividers->post_div = args.v3.ucPostDiv; 1024 1025 dividers->enable_post_div = (args.v3.ucCntlFlag & ··· 1039 1038 if (strobe_mode) 1040 1039 args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; 1041 1040 1042 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1041 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1042 + sizeof(args)); 1043 1043 1044 1044 dividers->post_div = args.v5.ucPostDiv; 1045 1045 dividers->enable_post_div = (args.v5.ucCntlFlag & ··· 1058 1056 /* fusion */ 1059 1057 args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ 1060 1058 1061 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1059 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1060 + sizeof(args)); 1062 1061 1063 1062 dividers->post_divider = dividers->post_div = args.v4.ucPostDiv; 1064 1063 dividers->real_clock = le32_to_cpu(args.v4.ulClock); ··· 1070 1067 args.v6_in.ulClock.ulComputeClockFlag = clock_type; 1071 1068 args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */ 1072 1069 1073 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1070 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1071 + sizeof(args)); 1074 1072 1075 1073 dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv); 1076 1074 dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac); ··· 1113 1109 if (strobe_mode) 1114 1110 args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN; 1115 1111 1116 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1112 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1113 + sizeof(args)); 1117 1114 1118 1115 mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac); 1119 1116 mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv); ··· 1156 1151 if (mem_clock) 1157 1152 args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); 1158 1153 1159 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1154 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1155 + sizeof(args)); 1160 1156 } 1161 1157 1162 1158 void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, ··· 1211 1205 args.v2.ucVoltageMode = 0; 1212 1206 args.v2.usVoltageLevel = 0; 1213 1207 1214 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1208 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1209 + sizeof(args)); 1215 1210 1216 1211 *voltage = le16_to_cpu(args.v2.usVoltageLevel); 1217 1212 break; ··· 1221 1214 args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; 1222 1215 args.v3.usVoltageLevel = cpu_to_le16(voltage_id); 1223 1216 1224 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1217 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, 1218 + sizeof(args)); 1225 1219 1226 1220 *voltage = le16_to_cpu(args.v3.usVoltageLevel); 1227 1221 break;
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 941 941 return -EINVAL; 942 942 } 943 943 944 - return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1); 944 + return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1, 945 + sizeof(asic_init_ps_v2_1)); 945 946 }
+30 -11
drivers/gpu/drm/amd/amdgpu/atom.c
··· 62 62 typedef struct { 63 63 struct atom_context *ctx; 64 64 uint32_t *ps, *ws; 65 + int ps_size, ws_size; 65 66 int ps_shift; 66 67 uint16_t start; 67 68 unsigned last_jump; ··· 71 70 } atom_exec_context; 72 71 73 72 int amdgpu_atom_debug; 74 - static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); 75 - int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); 73 + static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size); 74 + int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size); 76 75 77 76 static uint32_t atom_arg_mask[8] = 78 77 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, ··· 224 223 (*ptr)++; 225 224 /* get_unaligned_le32 avoids unaligned accesses from atombios 226 225 * tables, noticed on a DEC Alpha. */ 227 - val = get_unaligned_le32((u32 *)&ctx->ps[idx]); 226 + if (idx < ctx->ps_size) 227 + val = get_unaligned_le32((u32 *)&ctx->ps[idx]); 228 + else 229 + pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size); 228 230 if (print) 229 231 DEBUG("PS[0x%02X,0x%04X]", idx, val); 230 232 break; ··· 265 261 val = gctx->reg_block; 266 262 break; 267 263 default: 268 - val = ctx->ws[idx]; 264 + if (idx < ctx->ws_size) 265 + val = ctx->ws[idx]; 266 + else 267 + pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size); 269 268 } 270 269 break; 271 270 case ATOM_ARG_ID: ··· 502 495 idx = U8(*ptr); 503 496 (*ptr)++; 504 497 DEBUG("PS[0x%02X]", idx); 498 + if (idx >= ctx->ps_size) { 499 + pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size); 500 + return; 501 + } 505 502 ctx->ps[idx] = cpu_to_le32(val); 506 503 break; 507 504 case ATOM_ARG_WS: ··· 538 527 gctx->reg_block = val; 539 528 break; 540 529 default: 530 + if (idx >= ctx->ws_size) { 531 + pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size); 532 + return; 533 + } 541 534 ctx->ws[idx] = val; 542 535 } 543 536 break; ··· 639 624 else 640 625 SDEBUG(" table: %d\n", idx); 641 626 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 642 - r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 627 + r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift); 643 628 if (r) { 644 629 ctx->abort = true; 645 630 } ··· 1218 1203 atom_op_div32, ATOM_ARG_WS}, 1219 1204 }; 1220 1205 1221 - static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) 1206 + static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size) 1222 1207 { 1223 1208 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1224 1209 int len, ws, ps, ptr; ··· 1240 1225 ectx.ps_shift = ps / 4; 1241 1226 ectx.start = base; 1242 1227 ectx.ps = params; 1228 + ectx.ps_size = params_size; 1243 1229 ectx.abort = false; 1244 1230 ectx.last_jump = 0; 1245 - if (ws) 1231 + if (ws) { 1246 1232 ectx.ws = kcalloc(4, ws, GFP_KERNEL); 1247 - else 1233 + ectx.ws_size = ws; 1234 + } else { 1248 1235 ectx.ws = NULL; 1236 + ectx.ws_size = 0; 1237 + } 1249 1238 1250 1239 debug_depth++; 1251 1240 while (1) { ··· 1283 1264 return ret; 1284 1265 } 1285 1266 1286 - int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) 1267 + int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size) 1287 1268 { 1288 1269 int r; 1289 1270 ··· 1299 1280 /* reset divmul */ 1300 1281 ctx->divmul[0] = 0; 1301 1282 ctx->divmul[1] = 0; 1302 - r = amdgpu_atom_execute_table_locked(ctx, index, params); 1283 + r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size); 1303 1284 mutex_unlock(&ctx->mutex); 1304 1285 return r; 1305 1286 } ··· 1571 1552 1572 1553 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1573 1554 return 1; 1574 - ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1555 + ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16); 1575 1556 if (ret) 1576 1557 return ret; 1577 1558
+1 -1
drivers/gpu/drm/amd/amdgpu/atom.h
··· 156 156 extern int amdgpu_atom_debug; 157 157 158 158 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios); 159 - int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); 159 + int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size); 160 160 int amdgpu_atom_asic_init(struct atom_context *ctx); 161 161 void amdgpu_atom_destroy(struct atom_context *ctx); 162 162 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+14 -14
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
··· 77 77 args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border); 78 78 break; 79 79 } 80 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 80 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 81 81 } 82 82 83 83 void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) ··· 106 106 args.ucEnable = ATOM_SCALER_DISABLE; 107 107 break; 108 108 } 109 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 109 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 110 110 } 111 111 112 112 void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock) ··· 123 123 args.ucCRTC = amdgpu_crtc->crtc_id; 124 124 args.ucEnable = lock; 125 125 126 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 126 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 127 127 } 128 128 129 129 void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state) ··· 139 139 args.ucCRTC = amdgpu_crtc->crtc_id; 140 140 args.ucEnable = state; 141 141 142 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 142 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 143 143 } 144 144 145 145 void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state) ··· 155 155 args.ucCRTC = amdgpu_crtc->crtc_id; 156 156 args.ucBlanking = state; 157 157 158 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 158 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 159 159 } 160 160 161 161 void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) ··· 171 171 args.ucDispPipeId = amdgpu_crtc->crtc_id; 172 172 args.ucEnable = state; 173 173 174 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 174 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 175 175 } 176 176 177 177 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) ··· 183 183 184 184 args.ucEnable = ATOM_INIT; 185 185 186 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 186 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 187 187 } 188 188 189 189 void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, ··· 228 228 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 229 229 args.ucCRTC = amdgpu_crtc->crtc_id; 230 230 231 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 231 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 232 232 } 233 233 234 234 union atom_enable_ss { ··· 293 293 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); 294 294 args.v3.ucEnable = enable; 295 295 296 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 296 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 297 297 } 298 298 299 299 union adjust_pixel_clock { ··· 395 395 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 396 396 397 397 amdgpu_atom_execute_table(adev->mode_info.atom_context, 398 - index, (uint32_t *)&args); 398 + index, (uint32_t *)&args, sizeof(args)); 399 399 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 400 400 break; 401 401 case 3: ··· 428 428 args.v3.sInput.ucExtTransmitterID = 0; 429 429 430 430 amdgpu_atom_execute_table(adev->mode_info.atom_context, 431 - index, (uint32_t *)&args); 431 + index, (uint32_t *)&args, sizeof(args)); 432 432 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 433 433 if (args.v3.sOutput.ucRefDiv) { 434 434 amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; ··· 514 514 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 515 515 return; 516 516 } 517 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 517 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 518 518 } 519 519 520 520 union set_dce_clock { ··· 544 544 args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */ 545 545 args.v2_1.asParam.ucDCEClkType = clk_type; 546 546 args.v2_1.asParam.ucDCEClkSrc = clk_src; 547 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 547 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 548 548 ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10; 549 549 break; 550 550 default: ··· 740 740 return; 741 741 } 742 742 743 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 743 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 744 744 } 745 745 746 746 int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
+2 -2
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
··· 83 83 args.v2.ucDelay = delay / 10; 84 84 args.v2.ucHPD_ID = chan->rec.hpd; 85 85 86 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 86 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 87 87 88 88 *ack = args.v2.ucReplyStatus; 89 89 ··· 301 301 args.ucLaneNum = lane_num; 302 302 args.ucStatus = 0; 303 303 304 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 304 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 305 305 return args.ucStatus; 306 306 } 307 307
+8 -8
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
··· 335 335 args.ucDacStandard = ATOM_DAC1_PS2; 336 336 args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); 337 337 338 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 338 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 339 339 340 340 } 341 341 ··· 432 432 break; 433 433 } 434 434 435 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 435 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 436 436 } 437 437 438 438 int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) ··· 732 732 break; 733 733 } 734 734 735 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 735 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 736 736 737 737 } 738 738 ··· 1136 1136 break; 1137 1137 } 1138 1138 1139 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1139 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 1140 1140 } 1141 1141 1142 1142 bool ··· 1164 1164 1165 1165 args.v1.ucAction = action; 1166 1166 1167 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1167 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 1168 1168 1169 1169 /* wait for the panel to power up */ 1170 1170 if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { ··· 1288 1288 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1289 1289 return; 1290 1290 } 1291 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1291 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 1292 1292 } 1293 1293 1294 1294 static void ··· 1633 1633 return; 1634 1634 } 1635 1635 1636 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1636 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 1637 1637 } 1638 1638 1639 1639 /* This only needs to be called once at startup */ ··· 1706 1706 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; 1707 1707 } 1708 1708 1709 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 1709 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 1710 1710 1711 1711 return true; 1712 1712 } else
+2 -2
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
··· 86 86 args.ucSlaveAddr = slave_addr << 1; 87 87 args.ucLineNumber = chan->rec.i2c_id; 88 88 89 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 89 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 90 90 91 91 /* error */ 92 92 if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { ··· 172 172 args.ucSlaveAddr = slave_addr; 173 173 args.ucLineNumber = line_number; 174 174 175 - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); 175 + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args)); 176 176 }
+1 -1
drivers/gpu/drm/amd/display/dc/bios/command_table.c
··· 37 37 #define EXEC_BIOS_CMD_TABLE(command, params)\ 38 38 (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ 39 39 GetIndexIntoMasterTable(COMMAND, command), \ 40 - (uint32_t *)&params) == 0) 40 + (uint32_t *)&params, sizeof(params)) == 0) 41 41 42 42 #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\ 43 43 amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
+1 -1
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
··· 49 49 #define EXEC_BIOS_CMD_TABLE(fname, params)\ 50 50 (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ 51 51 GET_INDEX_INTO_MASTER_TABLE(command, fname), \ 52 - (uint32_t *)&params) == 0) 52 + (uint32_t *)&params, sizeof(params)) == 0) 53 53 54 54 #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\ 55 55 amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
+21 -21
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
··· 226 226 227 227 return amdgpu_atom_execute_table(adev->mode_info.atom_context, 228 228 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), 229 - (uint32_t *)&engine_clock_parameters); 229 + (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters)); 230 230 } 231 231 232 232 /* ··· 297 297 298 298 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 299 299 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), 300 - (uint32_t *)&mpll_parameters); 300 + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); 301 301 302 302 if (0 == result) { 303 303 mpll_param->mpll_fb_divider.clk_frac = ··· 345 345 346 346 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 347 347 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), 348 - (uint32_t *)&mpll_parameters); 348 + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); 349 349 350 350 if (!result) 351 351 mpll_param->mpll_post_divider = ··· 366 366 367 367 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 368 368 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), 369 - (uint32_t *)&mpll_parameters); 369 + (uint32_t *)&mpll_parameters, sizeof(mpll_parameters)); 370 370 371 371 /* VEGAM's mpll takes sometime to finish computing */ 372 372 udelay(10); ··· 396 396 397 397 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 398 398 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 399 - (uint32_t *)&pll_parameters); 399 + (uint32_t *)&pll_parameters, sizeof(pll_parameters)); 400 400 401 401 if (0 == result) { 402 402 dividers->pll_post_divider = pll_parameters.ucPostDiv; ··· 420 420 421 421 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 422 422 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 423 - (uint32_t *)&pll_patameters); 423 + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); 424 424 425 425 if (0 == result) { 426 426 dividers->pll_post_divider = ··· 457 457 458 458 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 459 459 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 460 - (uint32_t *)&pll_patameters); 460 + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); 461 461 462 462 if (0 == result) { 463 463 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac); ··· 490 490 491 491 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 492 492 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), 493 - (uint32_t *)&pll_patameters); 493 + (uint32_t *)&pll_patameters, sizeof(pll_patameters)); 494 494 495 495 if (0 == result) { 496 496 dividers->pll_post_divider = ··· 773 773 774 774 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 775 775 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 776 - (uint32_t *)&sOutput_FuseValues); 776 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 777 777 778 778 if (result) 779 779 return result; ··· 794 794 795 795 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 796 796 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 797 - (uint32_t *)&sOutput_FuseValues); 797 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 798 798 799 799 if (result) 800 800 return result; ··· 814 814 815 815 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 816 816 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 817 - (uint32_t *)&sOutput_FuseValues); 817 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 818 818 819 819 if (result) 820 820 return result; ··· 835 835 836 836 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 837 837 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 838 - (uint32_t *)&sOutput_FuseValues); 838 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 839 839 840 840 if (result) 841 841 return result; ··· 857 857 858 858 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 859 859 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 860 - (uint32_t *)&sOutput_FuseValues); 860 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 861 861 if (result) 862 862 return result; 863 863 ··· 878 878 879 879 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 880 880 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 881 - (uint32_t *)&sOutput_FuseValues); 881 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 882 882 883 883 if (result) 884 884 return result; ··· 909 909 910 910 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 911 911 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 912 - (uint32_t *)&sOutput_FuseValues); 912 + (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues)); 913 913 914 914 if (result) 915 915 return result; ··· 1134 1134 1135 1135 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 1136 1136 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1137 - (uint32_t *)&get_voltage_info_param_space); 1137 + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); 1138 1138 1139 1139 *voltage = result ? 0 : 1140 1140 le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) ··· 1179 1179 1180 1180 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 1181 1181 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1182 - (uint32_t *)&get_voltage_info_param_space); 1182 + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); 1183 1183 1184 1184 if (0 != result) 1185 1185 return result; ··· 1359 1359 1360 1360 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 1361 1361 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 1362 - (uint32_t *)&efuse_param); 1362 + (uint32_t *)&efuse_param, sizeof(efuse_param)); 1363 1363 *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask; 1364 1364 1365 1365 return result; ··· 1380 1380 1381 1381 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 1382 1382 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), 1383 - (uint32_t *)&memory_clock_parameters); 1383 + (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters)); 1384 1384 1385 1385 return result; 1386 1386 } ··· 1399 1399 1400 1400 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 1401 1401 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1402 - (uint32_t *)&get_voltage_info_param_space); 1402 + (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space)); 1403 1403 1404 1404 *voltage = result ? 0 : 1405 1405 le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); ··· 1526 1526 1527 1527 result = amdgpu_atom_execute_table(adev->mode_info.atom_context, 1528 1528 GetIndexIntoMasterTable(COMMAND, SetVoltage), 1529 - (uint32_t *)voltage_parameters); 1529 + (uint32_t *)voltage_parameters, sizeof(voltage_parameters)); 1530 1530 1531 1531 *virtual_voltage_id = voltage_parameters->usVoltageLevel; 1532 1532
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
··· 258 258 idx = GetIndexIntoMasterCmdTable(computegpuclockparam); 259 259 260 260 if (amdgpu_atom_execute_table( 261 - adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters)) 261 + adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters, sizeof(pll_parameters))) 262 262 return -EINVAL; 263 263 264 264 pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) ··· 505 505 ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); 506 506 507 507 if (amdgpu_atom_execute_table( 508 - adev->mode_info.atom_context, ix, (uint32_t *)&parameters)) 508 + adev->mode_info.atom_context, ix, (uint32_t *)&parameters, sizeof(parameters))) 509 509 return -EINVAL; 510 510 511 511 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 514 514 getsmuclockinfo); 515 515 516 516 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 517 - (uint32_t *)&input); 517 + (uint32_t *)&input, sizeof(input)); 518 518 if (ret) 519 519 return -EINVAL; 520 520
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
··· 301 301 getsmuclockinfo); 302 302 303 303 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 304 - (uint32_t *)&input); 304 + (uint32_t *)&input, sizeof(input)); 305 305 if (ret) 306 306 return -EINVAL; 307 307