Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu/virt: impl mailbox for ai

Implement mailbox protocol for AI so that guest vf can communicate
with GPU hypervisor.

Signed-off-by: Xiangliang Yu <Xiangliang.Yu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Xiangliang Yu and committed by
Alex Deucher
c9c9de93 ebe0a809

+255 -1
+1 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 34 34 amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o 35 35 36 36 amdgpu-y += \ 37 - vi.o mxgpu_vi.o nbio_v6_1.o soc15.o 37 + vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o 38 38 39 39 # add GMC block 40 40 amdgpu-y += \
+207
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include "amdgpu.h" 25 + #include "vega10/soc15ip.h" 26 + #include "vega10/NBIO/nbio_6_1_offset.h" 27 + #include "vega10/NBIO/nbio_6_1_sh_mask.h" 28 + #include "vega10/GC/gc_9_0_offset.h" 29 + #include "vega10/GC/gc_9_0_sh_mask.h" 30 + #include "soc15.h" 31 + #include "soc15_common.h" 32 + #include "mxgpu_ai.h" 33 + 34 + static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 35 + { 36 + u32 reg; 37 + int timeout = AI_MAILBOX_TIMEDOUT; 38 + u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); 39 + 40 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 41 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 42 + reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1); 43 + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 44 + mmBIF_BX_PF0_MAILBOX_CONTROL), reg); 45 + 46 + /*Wait for RCV_MSG_VALID to be 0*/ 47 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 48 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 49 + while (reg & mask) { 50 + if (timeout <= 0) { 51 + pr_err("RCV_MSG_VALID is not cleared\n"); 52 + break; 53 + } 54 + mdelay(1); 55 + timeout -=1; 56 + 57 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 58 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 59 + } 60 + } 61 + 62 + static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 63 + { 64 + u32 reg; 65 + 66 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 67 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 68 + reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, 69 + TRN_MSG_VALID, val ? 1 : 0); 70 + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), 71 + reg); 72 + } 73 + 74 + static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev, 75 + enum idh_request req) 76 + { 77 + u32 reg; 78 + 79 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 80 + mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 81 + reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 82 + MSGBUF_DATA, req); 83 + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 84 + reg); 85 + 86 + xgpu_ai_mailbox_set_valid(adev, true); 87 + } 88 + 89 + static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 90 + enum idh_event event) 91 + { 92 + u32 reg; 93 + u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID); 94 + 95 + if (event != IDH_FLR_NOTIFICATION_CMPL) { 96 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 97 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 98 + if (!(reg & mask)) 99 + return -ENOENT; 100 + } 101 + 102 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 103 + mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 104 + if (reg != event) 105 + return -ENOENT; 106 + 107 + xgpu_ai_mailbox_send_ack(adev); 108 + 109 + return 0; 110 + } 111 + 112 + static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 113 + { 114 + int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 115 + u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); 116 + u32 reg; 117 + 118 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 119 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 120 + while (!(reg & mask)) { 121 + if (timeout <= 0) { 122 + pr_err("Doesn't get ack from pf.\n"); 123 + r = -ETIME; 124 + break; 125 + } 126 + msleep(1); 127 + timeout -= 1; 128 + 129 + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 130 + mmBIF_BX_PF0_MAILBOX_CONTROL)); 131 + } 132 + 133 + return r; 134 + } 135 + 136 + static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) 137 + { 138 + int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 139 + 140 + r = xgpu_ai_mailbox_rcv_msg(adev, event); 141 + while (r) { 142 + if (timeout <= 0) { 143 + pr_err("Doesn't get ack from pf.\n"); 144 + r = -ETIME; 145 + break; 146 + } 147 + msleep(1); 148 + timeout -= 1; 149 + 150 + r = xgpu_ai_mailbox_rcv_msg(adev, event); 151 + } 152 + 153 + return r; 154 + } 155 + 156 + 157 + static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 158 + enum idh_request req) 159 + { 160 + int r; 161 + 162 + xgpu_ai_mailbox_trans_msg(adev, req); 163 + 164 + /* start to poll ack */ 165 + r = xgpu_ai_poll_ack(adev); 166 + if (r) 167 + return r; 168 + 169 + xgpu_ai_mailbox_set_valid(adev, false); 170 + 171 + /* start to check msg if request is idh_req_gpu_init_access */ 172 + if (req == IDH_REQ_GPU_INIT_ACCESS || 173 + req == IDH_REQ_GPU_FINI_ACCESS || 174 + req == IDH_REQ_GPU_RESET_ACCESS) { 175 + r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 176 + if (r) 177 + return r; 178 + } 179 + 180 + return 0; 181 + } 182 + 183 + static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 184 + bool init) 185 + { 186 + enum idh_request req; 187 + 188 + req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 189 + return xgpu_ai_send_access_requests(adev, req); 190 + } 191 + 192 + static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 193 + bool init) 194 + { 195 + enum idh_request req; 196 + int r = 0; 197 + 198 + req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 199 + r = xgpu_ai_send_access_requests(adev, req); 200 + 201 + return r; 202 + } 203 + 204 + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 205 + .req_full_gpu = xgpu_ai_request_full_gpu_access, 206 + .rel_full_gpu = xgpu_ai_release_full_gpu_access, 207 + };
+47
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __MXGPU_AI_H__ 25 + #define __MXGPU_AI_H__ 26 + 27 + #define AI_MAILBOX_TIMEDOUT 150000 28 + 29 + enum idh_request { 30 + IDH_REQ_GPU_INIT_ACCESS = 1, 31 + IDH_REL_GPU_INIT_ACCESS, 32 + IDH_REQ_GPU_FINI_ACCESS, 33 + IDH_REL_GPU_FINI_ACCESS, 34 + IDH_REQ_GPU_RESET_ACCESS 35 + }; 36 + 37 + enum idh_event { 38 + IDH_CLR_MSG_BUF = 0, 39 + IDH_READY_TO_ACCESS_GPU, 40 + IDH_FLR_NOTIFICATION, 41 + IDH_FLR_NOTIFICATION_CMPL, 42 + IDH_EVENT_MAX 43 + }; 44 + 45 + extern const struct amdgpu_virt_ops xgpu_ai_virt_ops; 46 + 47 + #endif