Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: switch to new amdgpu_nbio structure

no functional change, just switch to new structures

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Hawking Zhang and committed by
Alex Deucher
bebc0762 078ef4e9

+108 -154
+4 -59
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 73 73 #include "amdgpu_gmc.h" 74 74 #include "amdgpu_gfx.h" 75 75 #include "amdgpu_sdma.h" 76 + #include "amdgpu_nbio.h" 76 77 #include "amdgpu_dm.h" 77 78 #include "amdgpu_virt.h" 78 79 #include "amdgpu_csa.h" ··· 645 644 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 646 645 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 647 646 648 - 649 - /* 650 - * amdgpu nbio functions 651 - * 652 - */ 653 - struct nbio_hdp_flush_reg { 654 - u32 ref_and_mask_cp0; 655 - u32 ref_and_mask_cp1; 656 - u32 ref_and_mask_cp2; 657 - u32 ref_and_mask_cp3; 658 - u32 ref_and_mask_cp4; 659 - u32 ref_and_mask_cp5; 660 - u32 ref_and_mask_cp6; 661 - u32 ref_and_mask_cp7; 662 - u32 ref_and_mask_cp8; 663 - u32 ref_and_mask_cp9; 664 - u32 ref_and_mask_sdma0; 665 - u32 ref_and_mask_sdma1; 666 - u32 ref_and_mask_sdma2; 667 - u32 ref_and_mask_sdma3; 668 - u32 ref_and_mask_sdma4; 669 - u32 ref_and_mask_sdma5; 670 - u32 ref_and_mask_sdma6; 671 - u32 ref_and_mask_sdma7; 672 - }; 673 - 674 647 struct amdgpu_mmio_remap { 675 648 u32 reg_offset; 676 649 resource_size_t bus_addr; 677 - }; 678 - 679 - struct amdgpu_nbio_funcs { 680 - const struct nbio_hdp_flush_reg *hdp_flush_reg; 681 - u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); 682 - u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); 683 - u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); 684 - u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); 685 - u32 (*get_rev_id)(struct amdgpu_device *adev); 686 - void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); 687 - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 688 - u32 (*get_memsize)(struct amdgpu_device *adev); 689 - void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, 690 - bool use_doorbell, int doorbell_index, int doorbell_size); 691 - void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, 692 - int doorbell_index, int instance); 693 - void (*enable_doorbell_aperture)(struct amdgpu_device *adev, 694 - bool enable); 695 - void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, 696 - bool enable); 697 - void (*ih_doorbell_range)(struct amdgpu_device *adev, 698 - bool use_doorbell, int doorbell_index); 699 - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, 700 - bool enable); 701 - void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, 702 - bool enable); 703 - void (*get_clockgating_state)(struct amdgpu_device *adev, 704 - u32 *flags); 705 - void (*ih_control)(struct amdgpu_device *adev); 706 - void (*init_registers)(struct amdgpu_device *adev); 707 - void (*detect_hw_virt)(struct amdgpu_device *adev); 708 - void (*remap_hdp_registers)(struct amdgpu_device *adev); 709 650 }; 710 651 711 652 struct amdgpu_df_funcs { ··· 864 921 u32 cg_flags; 865 922 u32 pg_flags; 866 923 924 + /* nbio */ 925 + struct amdgpu_nbio nbio; 926 + 867 927 /* gfx */ 868 928 struct amdgpu_gfx gfx; 869 929 ··· 920 974 /* soc15 register offset based on ip, instance and segment */ 921 975 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 922 976 923 - const struct amdgpu_nbio_funcs *nbio_funcs; 924 977 const struct amdgpu_df_funcs *df_funcs; 925 978 const struct amdgpu_mmhub_funcs *mmhub_funcs; 926 979
+8 -8
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
··· 99 99 unsigned long flags, address, data; 100 100 uint32_t ficadl_val, ficadh_val; 101 101 102 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 103 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 102 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 103 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 104 104 105 105 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 106 106 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); ··· 122 122 { 123 123 unsigned long flags, address, data; 124 124 125 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 126 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 125 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 126 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 127 127 128 128 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 129 129 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); ··· 150 150 { 151 151 unsigned long flags, address, data; 152 152 153 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 154 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 153 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 154 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 155 155 156 156 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 157 157 WREG32(address, lo_addr); ··· 172 172 { 173 173 unsigned long flags, address, data; 174 174 175 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 176 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 175 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 176 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 177 177 178 178 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 179 179 WREG32(address, lo_addr);
+7 -7
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 2421 2421 } 2422 2422 2423 2423 if (amdgpu_emu_mode == 1) 2424 - adev->nbio_funcs->hdp_flush(adev, NULL); 2424 + adev->nbio.funcs->hdp_flush(adev, NULL); 2425 2425 2426 2426 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); 2427 2427 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); ··· 2491 2491 } 2492 2492 2493 2493 if (amdgpu_emu_mode == 1) 2494 - adev->nbio_funcs->hdp_flush(adev, NULL); 2494 + adev->nbio.funcs->hdp_flush(adev, NULL); 2495 2495 2496 2496 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); 2497 2497 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); ··· 2560 2560 } 2561 2561 2562 2562 if (amdgpu_emu_mode == 1) 2563 - adev->nbio_funcs->hdp_flush(adev, NULL); 2563 + adev->nbio.funcs->hdp_flush(adev, NULL); 2564 2564 2565 2565 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); 2566 2566 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); ··· 2881 2881 } 2882 2882 2883 2883 if (amdgpu_emu_mode == 1) 2884 - adev->nbio_funcs->hdp_flush(adev, NULL); 2884 + adev->nbio.funcs->hdp_flush(adev, NULL); 2885 2885 2886 2886 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); 2887 2887 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); ··· 4335 4335 { 4336 4336 struct amdgpu_device *adev = ring->adev; 4337 4337 u32 ref_and_mask, reg_mem_engine; 4338 - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 4338 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 4339 4339 4340 4340 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4341 4341 switch (ring->me) { ··· 4355 4355 } 4356 4356 4357 4357 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4358 - adev->nbio_funcs->get_hdp_flush_req_offset(adev), 4359 - adev->nbio_funcs->get_hdp_flush_done_offset(adev), 4358 + adev->nbio.funcs->get_hdp_flush_req_offset(adev), 4359 + adev->nbio.funcs->get_hdp_flush_done_offset(adev), 4360 4360 ref_and_mask, ref_and_mask, 0x20); 4361 4361 } 4362 4362
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 4972 4972 { 4973 4973 struct amdgpu_device *adev = ring->adev; 4974 4974 u32 ref_and_mask, reg_mem_engine; 4975 - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 4975 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 4976 4976 4977 4977 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4978 4978 switch (ring->me) { ··· 4992 4992 } 4993 4993 4994 4994 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4995 - adev->nbio_funcs->get_hdp_flush_req_offset(adev), 4996 - adev->nbio_funcs->get_hdp_flush_done_offset(adev), 4995 + adev->nbio.funcs->get_hdp_flush_req_offset(adev), 4996 + adev->nbio.funcs->get_hdp_flush_done_offset(adev), 4997 4997 ref_and_mask, ref_and_mask, 0x20); 4998 4998 } 4999 4999
+3 -3
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 278 278 int r; 279 279 280 280 /* flush hdp cache */ 281 - adev->nbio_funcs->hdp_flush(adev, NULL); 281 + adev->nbio.funcs->hdp_flush(adev, NULL); 282 282 283 283 mutex_lock(&adev->mman.gtt_window_lock); 284 284 ··· 557 557 558 558 /* size in MB on si */ 559 559 adev->gmc.mc_vram_size = 560 - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; 560 + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 561 561 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 562 562 adev->gmc.visible_vram_size = adev->gmc.aper_size; 563 563 ··· 794 794 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); 795 795 796 796 /* Flush HDP after it is initialized */ 797 - adev->nbio_funcs->hdp_flush(adev, NULL); 797 + adev->nbio.funcs->hdp_flush(adev, NULL); 798 798 799 799 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 800 800 false : true;
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 996 996 997 997 /* size in MB on si */ 998 998 adev->gmc.mc_vram_size = 999 - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; 999 + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 1000 1000 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 1001 1001 1002 1002 if (!(adev->flags & AMD_IS_APU)) { ··· 1361 1361 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); 1362 1362 1363 1363 /* After HDP is initialized, flush HDP.*/ 1364 - adev->nbio_funcs->hdp_flush(adev, NULL); 1364 + adev->nbio.funcs->hdp_flush(adev, NULL); 1365 1365 1366 1366 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 1367 1367 value = false;
+2 -2
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
··· 117 117 /* disable irqs */ 118 118 navi10_ih_disable_interrupts(adev); 119 119 120 - adev->nbio_funcs->ih_control(adev); 120 + adev->nbio.funcs->ih_control(adev); 121 121 122 122 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ 123 123 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); ··· 162 162 } 163 163 WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); 164 164 165 - adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell, 165 + adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, 166 166 ih->doorbell_index); 167 167 168 168 tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
-1
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
··· 311 311 } 312 312 313 313 const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { 314 - .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg, 315 314 .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, 316 315 .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, 317 316 .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
+1
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
··· 26 26 27 27 #include "soc15_common.h" 28 28 29 + extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; 29 30 extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; 30 31 31 32 #endif
+1 -2
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
··· 226 226 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); 227 227 } 228 228 229 - static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { 229 + const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { 230 230 .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, 231 231 .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, 232 232 .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, ··· 277 277 } 278 278 279 279 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { 280 - .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg, 281 280 .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, 282 281 .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, 283 282 .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
+1
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
··· 26 26 27 27 #include "soc15_common.h" 28 28 29 + extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg; 29 30 extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs; 30 31 31 32 #endif
-1
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
··· 292 292 } 293 293 294 294 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { 295 - .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg, 296 295 .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset, 297 296 .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset, 298 297 .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
+1
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
··· 26 26 27 27 #include "soc15_common.h" 28 28 29 + extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg; 29 30 extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs; 30 31 31 32 #endif
+1 -2
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
··· 266 266 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); 267 267 } 268 268 269 - static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { 269 + const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { 270 270 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, 271 271 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, 272 272 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, ··· 316 316 } 317 317 318 318 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { 319 - .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg, 320 319 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, 321 320 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, 322 321 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
+1
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
··· 26 26 27 27 #include "soc15_common.h" 28 28 29 + extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; 29 30 extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; 30 31 31 32 #endif
+18 -16
drivers/gpu/drm/amd/amdgpu/nv.c
··· 46 46 #include "gmc_v10_0.h" 47 47 #include "gfxhub_v2_0.h" 48 48 #include "mmhub_v2_0.h" 49 + #include "nbio_v2_3.h" 49 50 #include "nv.h" 50 51 #include "navi10_ih.h" 51 52 #include "gfx_v10_0.h" ··· 64 63 { 65 64 unsigned long flags, address, data; 66 65 u32 r; 67 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 68 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 66 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 67 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 69 68 70 69 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 71 70 WREG32(address, reg); ··· 79 78 { 80 79 unsigned long flags, address, data; 81 80 82 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 83 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 81 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 82 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 84 83 85 84 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 86 85 WREG32(address, reg); ··· 120 119 121 120 static u32 nv_get_config_memsize(struct amdgpu_device *adev) 122 121 { 123 - return adev->nbio_funcs->get_memsize(adev); 122 + return adev->nbio.funcs->get_memsize(adev); 124 123 } 125 124 126 125 static u32 nv_get_xclk(struct amdgpu_device *adev) ··· 280 279 281 280 /* wait for asic to come out of reset */ 282 281 for (i = 0; i < adev->usec_timeout; i++) { 283 - u32 memsize = adev->nbio_funcs->get_memsize(adev); 282 + u32 memsize = adev->nbio.funcs->get_memsize(adev); 284 283 285 284 if (memsize != 0xffffffff) 286 285 break; ··· 367 366 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 368 367 bool enable) 369 368 { 370 - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 371 - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 369 + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 370 + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 372 371 } 373 372 374 373 static const struct amdgpu_ip_block_version nv_common_ip_block = ··· 422 421 if (r) 423 422 return r; 424 423 425 - adev->nbio_funcs = &nbio_v2_3_funcs; 424 + adev->nbio.funcs = &nbio_v2_3_funcs; 425 + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 426 426 427 - adev->nbio_funcs->detect_hw_virt(adev); 427 + adev->nbio.funcs->detect_hw_virt(adev); 428 428 429 429 switch (adev->asic_type) { 430 430 case CHIP_NAVI10: ··· 482 480 483 481 static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 484 482 { 485 - return adev->nbio_funcs->get_rev_id(adev); 483 + return adev->nbio.funcs->get_rev_id(adev); 486 484 } 487 485 488 486 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 489 487 { 490 - adev->nbio_funcs->hdp_flush(adev, ring); 488 + adev->nbio.funcs->hdp_flush(adev, ring); 491 489 } 492 490 493 491 static void nv_invalidate_hdp(struct amdgpu_device *adev, ··· 694 692 /* enable aspm */ 695 693 nv_program_aspm(adev); 696 694 /* setup nbio registers */ 697 - adev->nbio_funcs->init_registers(adev); 695 + adev->nbio.funcs->init_registers(adev); 698 696 /* enable the doorbell aperture */ 699 697 nv_enable_doorbell_aperture(adev, true); 700 698 ··· 856 854 case CHIP_NAVI10: 857 855 case CHIP_NAVI14: 858 856 case CHIP_NAVI12: 859 - adev->nbio_funcs->update_medium_grain_clock_gating(adev, 857 + adev->nbio.funcs->update_medium_grain_clock_gating(adev, 860 858 state == AMD_CG_STATE_GATE ? true : false); 861 - adev->nbio_funcs->update_medium_grain_light_sleep(adev, 859 + adev->nbio.funcs->update_medium_grain_light_sleep(adev, 862 860 state == AMD_CG_STATE_GATE ? true : false); 863 861 nv_update_hdp_mem_power_gating(adev, 864 862 state == AMD_CG_STATE_GATE ? true : false); ··· 886 884 if (amdgpu_sriov_vf(adev)) 887 885 *flags = 0; 888 886 889 - adev->nbio_funcs->get_clockgating_state(adev, flags); 887 + adev->nbio.funcs->get_clockgating_state(adev, flags); 890 888 891 889 /* AMD_CG_SUPPORT_HDP_MGCG */ 892 890 tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
+3 -3
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 746 746 { 747 747 struct amdgpu_device *adev = ring->adev; 748 748 u32 ref_and_mask = 0; 749 - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 749 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 750 750 751 751 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; 752 752 753 753 sdma_v4_0_wait_reg_mem(ring, 0, 1, 754 - adev->nbio_funcs->get_hdp_flush_done_offset(adev), 755 - adev->nbio_funcs->get_hdp_flush_req_offset(adev), 754 + adev->nbio.funcs->get_hdp_flush_done_offset(adev), 755 + adev->nbio.funcs->get_hdp_flush_req_offset(adev), 756 756 ref_and_mask, ref_and_mask, 10); 757 757 } 758 758
+4 -4
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 406 406 { 407 407 struct amdgpu_device *adev = ring->adev; 408 408 u32 ref_and_mask = 0; 409 - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 409 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 410 410 411 411 if (ring->me == 0) 412 412 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; ··· 416 416 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 417 417 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 418 418 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 419 - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2); 420 - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2); 419 + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); 420 + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); 421 421 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 422 422 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 423 423 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | ··· 683 683 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); 684 684 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); 685 685 686 - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, 686 + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, 687 687 ring->doorbell_index, 20); 688 688 689 689 if (amdgpu_sriov_vf(adev))
+39 -32
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 58 58 #include "mmhub_v1_0.h" 59 59 #include "df_v1_7.h" 60 60 #include "df_v3_6.h" 61 + #include "nbio_v6_1.h" 62 + #include "nbio_v7_0.h" 63 + #include "nbio_v7_4.h" 61 64 #include "vega10_ih.h" 62 65 #include "sdma_v4_0.h" 63 66 #include "uvd_v7_0.h" ··· 94 91 { 95 92 unsigned long flags, address, data; 96 93 u32 r; 97 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 98 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 94 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 95 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 99 96 100 97 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 101 98 WREG32(address, reg); ··· 109 106 { 110 107 unsigned long flags, address, data; 111 108 112 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 113 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 109 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 110 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 114 111 115 112 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 116 113 WREG32(address, reg); ··· 124 121 { 125 122 unsigned long flags, address, data; 126 123 u64 r; 127 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 128 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 124 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 125 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 129 126 130 127 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 131 128 /* read low 32 bit */ ··· 145 142 { 146 143 unsigned long flags, address, data; 147 144 148 - address = adev->nbio_funcs->get_pcie_index_offset(adev); 149 - data = adev->nbio_funcs->get_pcie_data_offset(adev); 145 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 146 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 150 147 151 148 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 152 149 /* write low 32 bit */ ··· 265 262 266 263 static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 267 264 { 268 - return adev->nbio_funcs->get_memsize(adev); 265 + return adev->nbio.funcs->get_memsize(adev); 269 266 } 270 267 271 268 static u32 soc15_get_xclk(struct amdgpu_device *adev) ··· 464 461 465 462 /* wait for asic to come out of reset */ 466 463 for (i = 0; i < adev->usec_timeout; i++) { 467 - u32 memsize = adev->nbio_funcs->get_memsize(adev); 464 + u32 memsize = adev->nbio.funcs->get_memsize(adev); 468 465 469 466 if (memsize != 0xffffffff) 470 467 break; ··· 627 624 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 628 625 bool enable) 629 626 { 630 - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); 631 - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); 627 + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 628 + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 632 629 } 633 630 634 631 static const struct amdgpu_ip_block_version vega10_common_ip_block = ··· 642 639 643 640 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 644 641 { 645 - return adev->nbio_funcs->get_rev_id(adev); 642 + return adev->nbio.funcs->get_rev_id(adev); 646 643 } 647 644 648 645 int soc15_set_ip_blocks(struct amdgpu_device *adev) ··· 668 665 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 669 666 adev->gmc.xgmi.supported = true; 670 667 671 - if (adev->flags & AMD_IS_APU) 672 - adev->nbio_funcs = &nbio_v7_0_funcs; 673 - else if (adev->asic_type == CHIP_VEGA20 || 674 - adev->asic_type == CHIP_ARCTURUS) 675 - adev->nbio_funcs = &nbio_v7_4_funcs; 676 - else 677 - adev->nbio_funcs = &nbio_v6_1_funcs; 668 + if (adev->flags & AMD_IS_APU) { 669 + adev->nbio.funcs = &nbio_v7_0_funcs; 670 + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 671 + } else if (adev->asic_type == CHIP_VEGA20 || 672 + adev->asic_type == CHIP_ARCTURUS) { 673 + adev->nbio.funcs = &nbio_v7_4_funcs; 674 + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 675 + } else { 676 + adev->nbio.funcs = &nbio_v6_1_funcs; 677 + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 678 + } 678 679 679 680 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 680 681 adev->df_funcs = &df_v3_6_funcs; ··· 686 679 adev->df_funcs = &df_v1_7_funcs; 687 680 688 681 adev->rev_id = soc15_get_rev_id(adev); 689 - adev->nbio_funcs->detect_hw_virt(adev); 682 + adev->nbio.funcs->detect_hw_virt(adev); 690 683 691 684 if (amdgpu_sriov_vf(adev)) 692 685 adev->virt.ops = &xgpu_ai_virt_ops; ··· 792 785 793 786 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 794 787 { 795 - adev->nbio_funcs->hdp_flush(adev, ring); 788 + adev->nbio.funcs->hdp_flush(adev, ring); 796 789 } 797 790 798 791 static void soc15_invalidate_hdp(struct amdgpu_device *adev, ··· 1248 1241 if (!amdgpu_sriov_vf(adev)) { 1249 1242 for (i = 0; i < adev->sdma.num_instances; i++) { 1250 1243 ring = &adev->sdma.instance[i].ring; 1251 - adev->nbio_funcs->sdma_doorbell_range(adev, i, 1244 + adev->nbio.funcs->sdma_doorbell_range(adev, i, 1252 1245 ring->use_doorbell, ring->doorbell_index, 1253 1246 adev->doorbell_index.sdma_doorbell_range); 1254 1247 } 1255 1248 1256 - adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1249 + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1257 1250 adev->irq.ih.doorbell_index); 1258 1251 } 1259 1252 } ··· 1267 1260 /* enable aspm */ 1268 1261 soc15_program_aspm(adev); 1269 1262 /* setup nbio registers */ 1270 - adev->nbio_funcs->init_registers(adev); 1263 + adev->nbio.funcs->init_registers(adev); 1271 1264 /* remap HDP registers to a hole in mmio space, 1272 1265 * for the purpose of expose those registers 1273 1266 * to process space 1274 1267 */ 1275 - if (adev->nbio_funcs->remap_hdp_registers) 1276 - adev->nbio_funcs->remap_hdp_registers(adev); 1268 + if (adev->nbio.funcs->remap_hdp_registers) 1269 + adev->nbio.funcs->remap_hdp_registers(adev); 1277 1270 1278 1271 /* enable the doorbell aperture */ 1279 1272 soc15_enable_doorbell_aperture(adev, true); ··· 1436 1429 case CHIP_VEGA10: 1437 1430 case CHIP_VEGA12: 1438 1431 case CHIP_VEGA20: 1439 - adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1432 + adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1440 1433 state == AMD_CG_STATE_GATE ? true : false); 1441 - adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1434 + adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1442 1435 state == AMD_CG_STATE_GATE ? true : false); 1443 1436 soc15_update_hdp_light_sleep(adev, 1444 1437 state == AMD_CG_STATE_GATE ? true : false); ··· 1453 1446 break; 1454 1447 case CHIP_RAVEN: 1455 1448 case CHIP_RENOIR: 1456 - adev->nbio_funcs->update_medium_grain_clock_gating(adev, 1449 + adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1457 1450 state == AMD_CG_STATE_GATE ? true : false); 1458 - adev->nbio_funcs->update_medium_grain_light_sleep(adev, 1451 + adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1459 1452 state == AMD_CG_STATE_GATE ? true : false); 1460 1453 soc15_update_hdp_light_sleep(adev, 1461 1454 state == AMD_CG_STATE_GATE ? true : false); ··· 1484 1477 if (amdgpu_sriov_vf(adev)) 1485 1478 *flags = 0; 1486 1479 1487 - adev->nbio_funcs->get_clockgating_state(adev, flags); 1480 + adev->nbio.funcs->get_clockgating_state(adev, flags); 1488 1481 1489 1482 /* AMD_CG_SUPPORT_HDP_LS */ 1490 1483 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
··· 244 244 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 245 245 int i, r; 246 246 247 - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, 247 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 248 248 ring->doorbell_index, 0); 249 249 250 250 ring->sched.ready = true;
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 255 255 continue; 256 256 ring = &adev->vcn.inst[j].ring_dec; 257 257 258 - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, 258 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 259 259 ring->doorbell_index, j); 260 260 261 261 r = amdgpu_ring_test_ring(ring);
+1 -1
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
··· 226 226 /* disable irqs */ 227 227 vega10_ih_disable_interrupts(adev); 228 228 229 - adev->nbio_funcs->ih_control(adev); 229 + adev->nbio.funcs->ih_control(adev); 230 230 231 231 ih = &adev->irq.ih; 232 232 /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+1 -1
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 460 460 return ret; 461 461 462 462 /* flush hdp cache */ 463 - adev->nbio_funcs->hdp_flush(adev, NULL); 463 + adev->nbio.funcs->hdp_flush(adev, NULL); 464 464 465 465 if (!drv2smu) 466 466 memcpy(table_data, table->cpu_addr, table->size);
+1 -1
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
··· 137 137 priv->smu_tables.entry[table_id].table_id); 138 138 139 139 /* flush hdp cache */ 140 - adev->nbio_funcs->hdp_flush(adev, NULL); 140 + adev->nbio.funcs->hdp_flush(adev, NULL); 141 141 142 142 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, 143 143 priv->smu_tables.entry[table_id].size);
+1 -1
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
··· 58 58 priv->smu_tables.entry[table_id].table_id); 59 59 60 60 /* flush hdp cache */ 61 - adev->nbio_funcs->hdp_flush(adev, NULL); 61 + adev->nbio.funcs->hdp_flush(adev, NULL); 62 62 63 63 memcpy(table, priv->smu_tables.entry[table_id].table, 64 64 priv->smu_tables.entry[table_id].size);
+1 -1
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
··· 66 66 return -EINVAL); 67 67 68 68 /* flush hdp cache */ 69 - adev->nbio_funcs->hdp_flush(adev, NULL); 69 + adev->nbio.funcs->hdp_flush(adev, NULL); 70 70 71 71 memcpy(table, priv->smu_tables.entry[table_id].table, 72 72 priv->smu_tables.entry[table_id].size);
+2 -2
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
··· 189 189 return ret); 190 190 191 191 /* flush hdp cache */ 192 - adev->nbio_funcs->hdp_flush(adev, NULL); 192 + adev->nbio.funcs->hdp_flush(adev, NULL); 193 193 194 194 memcpy(table, priv->smu_tables.entry[table_id].table, 195 195 priv->smu_tables.entry[table_id].size); ··· 290 290 return ret); 291 291 292 292 /* flush hdp cache */ 293 - adev->nbio_funcs->hdp_flush(adev, NULL); 293 + adev->nbio.funcs->hdp_flush(adev, NULL); 294 294 295 295 memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, 296 296 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);