Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: clean up sumo_rlc_init() for code sharing

This will eventually be shared with newer asics to
reduce code duplication.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

+123 -118
+123 -118
drivers/gpu/drm/radeon/evergreen.c
··· 3910 3910 dws = rdev->rlc.reg_list_size; 3911 3911 cs_data = rdev->rlc.cs_data; 3912 3912 3913 - /* save restore block */ 3914 - if (rdev->rlc.save_restore_obj == NULL) { 3915 - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 3916 - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); 3917 - if (r) { 3918 - dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 3919 - return r; 3913 + if (src_ptr) { 3914 + /* save restore block */ 3915 + if (rdev->rlc.save_restore_obj == NULL) { 3916 + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 3917 + RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); 3918 + if (r) { 3919 + dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); 3920 + return r; 3921 + } 3920 3922 } 3921 - } 3922 3923 3923 - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 3924 - if (unlikely(r != 0)) { 3925 - sumo_rlc_fini(rdev); 3926 - return r; 3927 - } 3928 - r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 3929 - &rdev->rlc.save_restore_gpu_addr); 3930 - if (r) { 3931 - radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3932 - dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 3933 - sumo_rlc_fini(rdev); 3934 - return r; 3935 - } 3936 - r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); 3937 - if (r) { 3938 - dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); 3939 - sumo_rlc_fini(rdev); 3940 - return r; 3941 - } 3942 - /* write the sr buffer */ 3943 - dst_ptr = rdev->rlc.sr_ptr; 3944 - /* format: 3945 - * dw0: (reg2 << 16) | reg1 3946 - * dw1: reg1 save space 3947 - * dw2: reg2 save space 3948 - */ 3949 - for (i = 0; i < dws; i++) { 3950 - data = src_ptr[i] >> 2; 3951 - i++; 3952 - if (i < dws) 3953 - data |= (src_ptr[i] >> 2) << 16; 3954 - j = (((i - 1) * 3) / 2); 3955 - dst_ptr[j] = data; 3956 - } 3957 - j = ((i * 3) / 2); 3958 - dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; 3959 - 3960 - radeon_bo_kunmap(rdev->rlc.save_restore_obj); 3961 - radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3962 - 3963 - /* clear state block */ 3964 - reg_list_num = 0; 3965 - dws = 0; 3966 - for (i = 0; cs_data[i].section != NULL; i++) { 3967 - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 3968 - reg_list_num++; 3969 - dws += cs_data[i].section[j].reg_count; 3970 - } 3971 - } 3972 - reg_list_blk_index = (3 * reg_list_num + 2); 3973 - dws += reg_list_blk_index; 3974 - 3975 - if (rdev->rlc.clear_state_obj == NULL) { 3976 - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 3977 - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 3978 - if (r) { 3979 - dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 3924 + r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); 3925 + if (unlikely(r != 0)) { 3980 3926 sumo_rlc_fini(rdev); 3981 3927 return r; 3982 3928 } 3983 - } 3984 - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 3985 - if (unlikely(r != 0)) { 3986 - sumo_rlc_fini(rdev); 3987 - return r; 3988 - } 3989 - r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 3990 - &rdev->rlc.clear_state_gpu_addr); 3991 - if (r) { 3992 - 3993 - radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3994 - dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 3995 - sumo_rlc_fini(rdev); 3996 - return r; 3997 - } 3998 - r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); 3999 - if (r) { 4000 - dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); 4001 - sumo_rlc_fini(rdev); 4002 - return r; 4003 - } 4004 - /* set up the cs buffer */ 4005 - dst_ptr = rdev->rlc.cs_ptr; 4006 - reg_list_hdr_blk_index = 0; 4007 - reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 4008 - data = upper_32_bits(reg_list_mc_addr); 4009 - dst_ptr[reg_list_hdr_blk_index] = data; 4010 - reg_list_hdr_blk_index++; 4011 - for (i = 0; cs_data[i].section != NULL; i++) { 4012 - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 4013 - reg_num = cs_data[i].section[j].reg_count; 4014 - data = reg_list_mc_addr & 0xffffffff; 4015 - dst_ptr[reg_list_hdr_blk_index] = data; 4016 - reg_list_hdr_blk_index++; 4017 - 4018 - data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 4019 - dst_ptr[reg_list_hdr_blk_index] = data; 4020 - reg_list_hdr_blk_index++; 4021 - 4022 - data = 0x08000000 | (reg_num * 4); 4023 - dst_ptr[reg_list_hdr_blk_index] = data; 4024 - reg_list_hdr_blk_index++; 4025 - 4026 - for (k = 0; k < reg_num; k++) { 4027 - data = cs_data[i].section[j].extent[k]; 4028 - dst_ptr[reg_list_blk_index + k] = data; 4029 - } 4030 - reg_list_mc_addr += reg_num * 4; 4031 - reg_list_blk_index += reg_num; 3929 + r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, 3930 + &rdev->rlc.save_restore_gpu_addr); 3931 + if (r) { 3932 + radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3933 + dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); 3934 + sumo_rlc_fini(rdev); 3935 + return r; 4032 3936 } 4033 - } 4034 - dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; 4035 3937 4036 - radeon_bo_kunmap(rdev->rlc.clear_state_obj); 4037 - radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3938 + r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); 3939 + if (r) { 3940 + dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); 3941 + sumo_rlc_fini(rdev); 3942 + return r; 3943 + } 3944 + /* write the sr buffer */ 3945 + dst_ptr = rdev->rlc.sr_ptr; 3946 + /* format: 3947 + * dw0: (reg2 << 16) | reg1 3948 + * dw1: reg1 save space 3949 + * dw2: reg2 save space 3950 + */ 3951 + for (i = 0; i < dws; i++) { 3952 + data = src_ptr[i] >> 2; 3953 + i++; 3954 + if (i < dws) 3955 + data |= (src_ptr[i] >> 2) << 16; 3956 + j = (((i - 1) * 3) / 2); 3957 + dst_ptr[j] = data; 3958 + } 3959 + j = ((i * 3) / 2); 3960 + dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; 3961 + 3962 + radeon_bo_kunmap(rdev->rlc.save_restore_obj); 3963 + radeon_bo_unreserve(rdev->rlc.save_restore_obj); 3964 + } 3965 + 3966 + if (cs_data) { 3967 + /* clear state block */ 3968 + reg_list_num = 0; 3969 + dws = 0; 3970 + for (i = 0; cs_data[i].section != NULL; i++) { 3971 + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 3972 + reg_list_num++; 3973 + dws += cs_data[i].section[j].reg_count; 3974 + } 3975 + } 3976 + reg_list_blk_index = (3 * reg_list_num + 2); 3977 + dws += reg_list_blk_index; 3978 + 3979 + if (rdev->rlc.clear_state_obj == NULL) { 3980 + r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, 3981 + RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); 3982 + if (r) { 3983 + dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); 3984 + sumo_rlc_fini(rdev); 3985 + return r; 3986 + } 3987 + } 3988 + r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); 3989 + if (unlikely(r != 0)) { 3990 + sumo_rlc_fini(rdev); 3991 + return r; 3992 + } 3993 + r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, 3994 + &rdev->rlc.clear_state_gpu_addr); 3995 + if (r) { 3996 + radeon_bo_unreserve(rdev->rlc.clear_state_obj); 3997 + dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); 3998 + sumo_rlc_fini(rdev); 3999 + return r; 4000 + } 4001 + 4002 + r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); 4003 + if (r) { 4004 + dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); 4005 + sumo_rlc_fini(rdev); 4006 + return r; 4007 + } 4008 + /* set up the cs buffer */ 4009 + dst_ptr = rdev->rlc.cs_ptr; 4010 + reg_list_hdr_blk_index = 0; 4011 + reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); 4012 + data = upper_32_bits(reg_list_mc_addr); 4013 + dst_ptr[reg_list_hdr_blk_index] = data; 4014 + reg_list_hdr_blk_index++; 4015 + for (i = 0; cs_data[i].section != NULL; i++) { 4016 + for (j = 0; cs_data[i].section[j].extent != NULL; j++) { 4017 + reg_num = cs_data[i].section[j].reg_count; 4018 + data = reg_list_mc_addr & 0xffffffff; 4019 + dst_ptr[reg_list_hdr_blk_index] = data; 4020 + reg_list_hdr_blk_index++; 4021 + 4022 + data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; 4023 + dst_ptr[reg_list_hdr_blk_index] = data; 4024 + reg_list_hdr_blk_index++; 4025 + 4026 + data = 0x08000000 | (reg_num * 4); 4027 + dst_ptr[reg_list_hdr_blk_index] = data; 4028 + reg_list_hdr_blk_index++; 4029 + 4030 + for (k = 0; k < reg_num; k++) { 4031 + data = cs_data[i].section[j].extent[k]; 4032 + dst_ptr[reg_list_blk_index + k] = data; 4033 + } 4034 + reg_list_mc_addr += reg_num * 4; 4035 + reg_list_blk_index += reg_num; 4036 + } 4037 + } 4038 + dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; 4039 + 4040 + radeon_bo_kunmap(rdev->rlc.clear_state_obj); 4041 + radeon_bo_unreserve(rdev->rlc.clear_state_obj); 4042 + } 4038 4043 4039 4044 return 0; 4040 4045 }