Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu:implement the reset MB func for vega10

they are lack in the bringup stage, we need them for GPU reset
feature.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Monk Liu and committed by
Alex Deucher
f98b617e 94b4fd72

+138
+133
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 28 28 #include "vega10/GC/gc_9_0_offset.h" 29 29 #include "vega10/GC/gc_9_0_sh_mask.h" 30 30 #include "soc15.h" 31 + #include "vega10_ih.h" 31 32 #include "soc15_common.h" 32 33 #include "mxgpu_ai.h" 33 34 ··· 181 180 return 0; 182 181 } 183 182 183 + static int xgpu_ai_request_reset(struct amdgpu_device *adev) 184 + { 185 + return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 186 + } 187 + 184 188 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 185 189 bool init) 186 190 { ··· 207 201 return r; 208 202 } 209 203 204 + static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 205 + struct amdgpu_irq_src *source, 206 + struct amdgpu_iv_entry *entry) 207 + { 208 + DRM_DEBUG("get ack intr and do nothing.\n"); 209 + return 0; 210 + } 211 + 212 + static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 213 + struct amdgpu_irq_src *source, 214 + unsigned type, 215 + enum amdgpu_interrupt_state state) 216 + { 217 + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 218 + 219 + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 220 + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 221 + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 222 + 223 + return 0; 224 + } 225 + 226 + static void xgpu_ai_mailbox_flr_work(struct work_struct *work) 227 + { 228 + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 229 + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 230 + 231 + /* wait until RCV_MSG become 3 */ 232 + if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { 233 + pr_err("failed to recieve FLR_CMPL\n"); 234 + return; 235 + } 236 + 237 + /* Trigger recovery due to world switch failure */ 238 + amdgpu_sriov_gpu_reset(adev, false); 239 + } 240 + 241 + static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 242 + struct amdgpu_irq_src *src, 243 + unsigned type, 244 + enum amdgpu_interrupt_state state) 245 + { 246 + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 247 + 248 + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 249 + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 250 + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 251 + 252 + return 0; 253 + } 254 + 255 + static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 256 + struct amdgpu_irq_src *source, 257 + struct amdgpu_iv_entry *entry) 258 + { 259 + int r; 260 + 261 + /* see what event we get */ 262 + r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); 263 + 264 + /* only handle FLR_NOTIFY now */ 265 + if (!r) 266 + schedule_work(&adev->virt.flr_work); 267 + 268 + return 0; 269 + } 270 + 271 + static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 272 + .set = xgpu_ai_set_mailbox_ack_irq, 273 + .process = xgpu_ai_mailbox_ack_irq, 274 + }; 275 + 276 + static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 277 + .set = xgpu_ai_set_mailbox_rcv_irq, 278 + .process = xgpu_ai_mailbox_rcv_irq, 279 + }; 280 + 281 + void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 282 + { 283 + adev->virt.ack_irq.num_types = 1; 284 + adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 285 + adev->virt.rcv_irq.num_types = 1; 286 + adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 287 + } 288 + 289 + int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 290 + { 291 + int r; 292 + 293 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq); 294 + if (r) 295 + return r; 296 + 297 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq); 298 + if (r) { 299 + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 300 + return r; 301 + } 302 + 303 + return 0; 304 + } 305 + 306 + int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 307 + { 308 + int r; 309 + 310 + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 311 + if (r) 312 + return r; 313 + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 314 + if (r) { 315 + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 316 + return r; 317 + } 318 + 319 + INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 320 + 321 + return 0; 322 + } 323 + 324 + void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 325 + { 326 + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 327 + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 328 + } 329 + 210 330 const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 211 331 .req_full_gpu = xgpu_ai_request_full_gpu_access, 212 332 .rel_full_gpu = xgpu_ai_release_full_gpu_access, 333 + .reset_gpu = xgpu_ai_request_reset, 213 334 };
+5
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
··· 44 44 45 45 extern const struct amdgpu_virt_ops xgpu_ai_virt_ops; 46 46 47 + void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev); 48 + int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev); 49 + int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev); 50 + void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev); 51 + 47 52 #endif