Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next

Some more radeon and amdgpu stuff for drm-next. Mostly just bug fixes
for new features and cleanups.

* 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux:
drm/amdgpu: fix rb bitmap & cu bitmap calculation
drm/amdgpu: trace the pd_addr in vm_grab_id as well
drm/amdgpu: fix VM faults caused by vm_grab_id() v4
drm/amdgpu: update radeon acpi header
drm/radeon: update radeon acpi header
drm/amd: cleanup get_mfd_cell_dev()
drm/amdgpu: fix error handling in amdgpu_bo_list_set
drm/amd/powerplay: fix code style warning.
drm/amd: Do not make DRM_AMD_ACP default to y
drm/amdgpu/gfx: fix off by one in rb rework (v2)

+133 -123
-1
drivers/gpu/drm/amd/acp/Kconfig
··· 2 2 3 3 config DRM_AMD_ACP 4 4 bool "Enable ACP IP support" 5 - default y 6 5 select MFD_CORE 7 6 select PM_GENERIC_DOMAINS if PM 8 7 help
+9 -7
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 769 769 uint32_t *ptr; 770 770 struct amdgpu_fence *fence; 771 771 struct amdgpu_user_fence *user; 772 - bool grabbed_vmid; 773 772 struct amdgpu_vm *vm; 773 + unsigned vm_id; 774 + uint64_t vm_pd_addr; 774 775 struct amdgpu_ctx *ctx; 775 776 uint32_t gds_base, gds_size; 776 777 uint32_t gws_base, gws_size; ··· 878 877 }; 879 878 880 879 struct amdgpu_vm_id { 881 - unsigned id; 882 - uint64_t pd_gpu_addr; 880 + struct amdgpu_vm_manager_id *mgr_id; 881 + uint64_t pd_gpu_addr; 883 882 /* last flushed PD/PT update */ 884 - struct fence *flushed_updates; 883 + struct fence *flushed_updates; 885 884 }; 886 885 887 886 struct amdgpu_vm { ··· 955 954 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 956 955 struct amdgpu_vm *vm); 957 956 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 958 - struct amdgpu_sync *sync, struct fence *fence); 957 + struct amdgpu_sync *sync, struct fence *fence, 958 + unsigned *vm_id, uint64_t *vm_pd_addr); 959 959 void amdgpu_vm_flush(struct amdgpu_ring *ring, 960 - struct amdgpu_vm *vm, 961 - struct fence *updates); 960 + unsigned vmid, 961 + uint64_t pd_addr); 962 962 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 963 963 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 964 964 struct amdgpu_vm *vm);
+2 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 240 240 static struct device *get_mfd_cell_dev(const char *device_name, int r) 241 241 { 242 242 char auto_dev_name[25]; 243 - char buf[8]; 244 243 struct device *dev; 245 244 246 - sprintf(buf, ".%d.auto", r); 247 - strcpy(auto_dev_name, device_name); 248 - strcat(auto_dev_name, buf); 245 + snprintf(auto_dev_name, sizeof(auto_dev_name), 246 + "%s.%d.auto", device_name, r); 249 247 dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); 250 248 dev_info(dev, "device %s added to pm domain\n", auto_dev_name); 251 249
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 118 118 usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm); 119 119 if (usermm) { 120 120 if (usermm != current->mm) { 121 + amdgpu_bo_unref(&entry->robj); 121 122 r = -EPERM; 122 123 goto error_free; 123 124 } ··· 152 151 return 0; 153 152 154 153 error_free: 154 + while (i--) 155 + amdgpu_bo_unref(&array[i].robj); 155 156 drm_free_large(array); 156 157 return r; 157 158 }
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 75 75 } 76 76 77 77 ib->vm = vm; 78 + ib->vm_id = 0; 78 79 79 80 return 0; 80 81 } ··· 140 139 return -EINVAL; 141 140 } 142 141 143 - if (vm && !ibs->grabbed_vmid) { 142 + if (vm && !ibs->vm_id) { 144 143 dev_err(adev->dev, "VM IB without ID\n"); 145 144 return -EINVAL; 146 145 } ··· 153 152 154 153 if (vm) { 155 154 /* do context switch */ 156 - amdgpu_vm_flush(ring, vm, last_vm_update); 155 + amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr); 157 156 158 157 if (ring->funcs->emit_gds_switch) 159 - amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 158 + amdgpu_ring_emit_gds_switch(ring, ib->vm_id, 160 159 ib->gds_base, ib->gds_size, 161 160 ib->gws_base, ib->gws_size, 162 161 ib->oa_base, ib->oa_size);
+11 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 105 105 106 106 struct fence *fence = amdgpu_sync_get_fence(&job->sync); 107 107 108 - if (fence == NULL && vm && !job->ibs->grabbed_vmid) { 108 + if (fence == NULL && vm && !job->ibs->vm_id) { 109 109 struct amdgpu_ring *ring = job->ring; 110 + unsigned i, vm_id; 111 + uint64_t vm_pd_addr; 110 112 int r; 111 113 112 114 r = amdgpu_vm_grab_id(vm, ring, &job->sync, 113 - &job->base.s_fence->base); 115 + &job->base.s_fence->base, 116 + &vm_id, &vm_pd_addr); 114 117 if (r) 115 118 DRM_ERROR("Error getting VM ID (%d)\n", r); 116 - else 117 - job->ibs->grabbed_vmid = true; 119 + else { 120 + for (i = 0; i < job->num_ibs; ++i) { 121 + job->ibs[i].vm_id = vm_id; 122 + job->ibs[i].vm_pd_addr = vm_pd_addr; 123 + } 124 + } 118 125 119 126 fence = amdgpu_sync_get_fence(&job->sync); 120 127 }
+11 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 100 100 101 101 102 102 TRACE_EVENT(amdgpu_vm_grab_id, 103 - TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring), 104 - TP_ARGS(vm, vmid, ring), 103 + TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, 104 + uint64_t pd_addr), 105 + TP_ARGS(vm, ring, vmid, pd_addr), 105 106 TP_STRUCT__entry( 106 107 __field(struct amdgpu_vm *, vm) 107 - __field(u32, vmid) 108 108 __field(u32, ring) 109 + __field(u32, vmid) 110 + __field(u64, pd_addr) 109 111 ), 110 112 111 113 TP_fast_assign( 112 114 __entry->vm = vm; 113 - __entry->vmid = vmid; 114 115 __entry->ring = ring; 116 + __entry->vmid = vmid; 117 + __entry->pd_addr = pd_addr; 115 118 ), 116 - TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid, 117 - __entry->ring) 119 + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, 120 + __entry->ring, __entry->vmid, __entry->pd_addr) 118 121 ); 119 122 120 123 TRACE_EVENT(amdgpu_vm_bo_map, ··· 234 231 __entry->ring = ring; 235 232 __entry->id = id; 236 233 ), 237 - TP_printk("pd_addr=%010Lx, ring=%u, id=%u", 238 - __entry->pd_addr, __entry->ring, __entry->id) 234 + TP_printk("ring=%u, id=%u, pd_addr=%010Lx", 235 + __entry->ring, __entry->id, __entry->pd_addr) 239 236 ); 240 237 241 238 TRACE_EVENT(amdgpu_bo_list_set,
+63 -57
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 50 50 * SI supports 16. 51 51 */ 52 52 53 + /* Special value that no flush is necessary */ 54 + #define AMDGPU_VM_NO_FLUSH (~0ll) 55 + 53 56 /** 54 57 * amdgpu_vm_num_pde - return the number of page directory entries 55 58 * ··· 160 157 * Allocate an id for the vm, adding fences to the sync obj as necessary. 161 158 */ 162 159 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 163 - struct amdgpu_sync *sync, struct fence *fence) 160 + struct amdgpu_sync *sync, struct fence *fence, 161 + unsigned *vm_id, uint64_t *vm_pd_addr) 164 162 { 165 - struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 163 + uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 166 164 struct amdgpu_device *adev = ring->adev; 167 - struct amdgpu_vm_manager_id *id; 165 + struct amdgpu_vm_id *id = &vm->ids[ring->idx]; 166 + struct fence *updates = sync->last_vm_update; 168 167 int r; 169 168 170 169 mutex_lock(&adev->vm_manager.lock); 171 170 172 171 /* check if the id is still valid */ 173 - if (vm_id->id) { 172 + if (id->mgr_id) { 173 + struct fence *flushed = id->flushed_updates; 174 + bool is_later; 174 175 long owner; 175 176 176 - id = &adev->vm_manager.ids[vm_id->id]; 177 - owner = atomic_long_read(&id->owner); 178 - if (owner == (long)vm) { 179 - list_move_tail(&id->list, &adev->vm_manager.ids_lru); 180 - trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 177 + if (!flushed) 178 + is_later = true; 179 + else if (!updates) 180 + is_later = false; 181 + else 182 + is_later = fence_is_later(updates, flushed); 181 183 182 - fence_put(id->active); 183 - id->active = fence_get(fence); 184 + owner = atomic_long_read(&id->mgr_id->owner); 185 + if (!is_later && owner == (long)id && 186 + pd_addr == id->pd_gpu_addr) { 187 + 188 + fence_put(id->mgr_id->active); 189 + id->mgr_id->active = fence_get(fence); 190 + 191 + list_move_tail(&id->mgr_id->list, 192 + &adev->vm_manager.ids_lru); 193 + 194 + *vm_id = id->mgr_id - adev->vm_manager.ids; 195 + *vm_pd_addr = AMDGPU_VM_NO_FLUSH; 196 + trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, 197 + *vm_pd_addr); 184 198 185 199 mutex_unlock(&adev->vm_manager.lock); 186 200 return 0; 187 201 } 188 202 } 189 203 190 - /* we definately need to flush */ 191 - vm_id->pd_gpu_addr = ~0ll; 204 + id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, 205 + struct amdgpu_vm_manager_id, 206 + list); 192 207 193 - id = list_first_entry(&adev->vm_manager.ids_lru, 194 - struct amdgpu_vm_manager_id, 195 - list); 196 - list_move_tail(&id->list, &adev->vm_manager.ids_lru); 197 - atomic_long_set(&id->owner, (long)vm); 198 - 199 - vm_id->id = id - adev->vm_manager.ids; 200 - trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx); 201 - 202 - r = amdgpu_sync_fence(ring->adev, sync, id->active); 203 - 208 + r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); 204 209 if (!r) { 205 - fence_put(id->active); 206 - id->active = fence_get(fence); 210 + fence_put(id->mgr_id->active); 211 + id->mgr_id->active = fence_get(fence); 212 + 213 + fence_put(id->flushed_updates); 214 + id->flushed_updates = fence_get(updates); 215 + 216 + id->pd_gpu_addr = pd_addr; 217 + 218 + list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); 219 + atomic_long_set(&id->mgr_id->owner, (long)id); 220 + 221 + *vm_id = id->mgr_id - adev->vm_manager.ids; 222 + *vm_pd_addr = pd_addr; 223 + trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); 207 224 } 208 225 209 226 mutex_unlock(&adev->vm_manager.lock); ··· 234 211 * amdgpu_vm_flush - hardware flush the vm 235 212 * 236 213 * @ring: ring to use for flush 237 - * @vm: vm we want to flush 238 - * @updates: last vm update that we waited for 214 + * @vmid: vmid number to use 215 + * @pd_addr: address of the page directory 239 216 * 240 - * Flush the vm. 217 + * Emit a VM flush when it is necessary. 241 218 */ 242 219 void amdgpu_vm_flush(struct amdgpu_ring *ring, 243 - struct amdgpu_vm *vm, 244 - struct fence *updates) 220 + unsigned vmid, 221 + uint64_t pd_addr) 245 222 { 246 - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 247 - struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 248 - struct fence *flushed_updates = vm_id->flushed_updates; 249 - bool is_later; 250 - 251 - if (!flushed_updates) 252 - is_later = true; 253 - else if (!updates) 254 - is_later = false; 255 - else 256 - is_later = fence_is_later(updates, flushed_updates); 257 - 258 - if (pd_addr != vm_id->pd_gpu_addr || is_later) { 259 - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 260 - if (is_later) { 261 - vm_id->flushed_updates = fence_get(updates); 262 - fence_put(flushed_updates); 263 - } 264 - vm_id->pd_gpu_addr = pd_addr; 265 - amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 223 + if (pd_addr != AMDGPU_VM_NO_FLUSH) { 224 + trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid); 225 + amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr); 266 226 } 267 227 } 268 228 ··· 1290 1284 int i, r; 1291 1285 1292 1286 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1293 - vm->ids[i].id = 0; 1287 + vm->ids[i].mgr_id = NULL; 1294 1288 vm->ids[i].flushed_updates = NULL; 1295 1289 } 1296 1290 vm->va = RB_ROOT; ··· 1387 1381 amdgpu_bo_unref(&vm->page_directory); 1388 1382 fence_put(vm->page_directory_fence); 1389 1383 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1390 - unsigned id = vm->ids[i].id; 1384 + struct amdgpu_vm_id *id = &vm->ids[i]; 1391 1385 1392 - atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, 1393 - (long)vm, 0); 1394 - fence_put(vm->ids[i].flushed_updates); 1386 + if (id->mgr_id) 1387 + atomic_long_cmpxchg(&id->mgr_id->owner, 1388 + (long)id, 0); 1389 + fence_put(id->flushed_updates); 1395 1390 } 1396 - 1397 1391 } 1398 1392 1399 1393 /**
+1 -1
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 212 212 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 213 213 struct amdgpu_ib *ib) 214 214 { 215 - u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 215 + u32 extra_bits = ib->vm_id & 0xf; 216 216 u32 next_rptr = ring->wptr + 5; 217 217 218 218 while ((next_rptr & 7) != 4)
-3
drivers/gpu/drm/amd/amdgpu/cikd.h
··· 46 46 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 47 47 #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 48 48 49 - #define CIK_RB_BITMAP_WIDTH_PER_SH 2 50 - #define HAWAII_RB_BITMAP_WIDTH_PER_SH 4 51 - 52 49 #define AMDGPU_NUM_OF_VMIDS 8 53 50 54 51 #define PIPEID(x) ((x) << 0)
+11 -17
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 1635 1635 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) 1636 1636 { 1637 1637 int i, j; 1638 - u32 data, tmp, num_rbs = 0; 1638 + u32 data; 1639 1639 u32 active_rbs = 0; 1640 + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1641 + adev->gfx.config.max_sh_per_se; 1640 1642 1641 1643 mutex_lock(&adev->grbm_idx_mutex); 1642 1644 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1643 1645 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1644 1646 gfx_v7_0_select_se_sh(adev, i, j); 1645 1647 data = gfx_v7_0_get_rb_active_bitmap(adev); 1646 - if (adev->asic_type == CHIP_HAWAII) 1647 - active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1648 - HAWAII_RB_BITMAP_WIDTH_PER_SH); 1649 - else 1650 - active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1651 - CIK_RB_BITMAP_WIDTH_PER_SH); 1648 + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1649 + rb_bitmap_width_per_sh); 1652 1650 } 1653 1651 } 1654 1652 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 1655 1653 mutex_unlock(&adev->grbm_idx_mutex); 1656 1654 1657 1655 adev->gfx.config.backend_enable_mask = active_rbs; 1658 - tmp = active_rbs; 1659 - while (tmp >>= 1) 1660 - num_rbs++; 1661 - adev->gfx.config.num_rbs = num_rbs; 1656 + adev->gfx.config.num_rbs = hweight32(active_rbs); 1662 1657 } 1663 1658 1664 1659 /** ··· 2041 2046 else 2042 2047 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 2043 2048 2044 - control |= ib->length_dw | 2045 - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); 2049 + control |= ib->length_dw | (ib->vm_id << 24); 2046 2050 2047 2051 amdgpu_ring_write(ring, header); 2048 2052 amdgpu_ring_write(ring, ··· 2069 2075 2070 2076 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 2071 2077 2072 - control |= ib->length_dw | 2073 - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); 2078 + control |= ib->length_dw | (ib->vm_id << 24); 2074 2079 2075 2080 amdgpu_ring_write(ring, header); 2076 2081 amdgpu_ring_write(ring, ··· 3818 3825 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 3819 3826 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 3820 3827 3821 - mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se / 3822 - adev->gfx.config.max_sh_per_se); 3828 + mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh); 3823 3829 3824 3830 return (~data) & mask; 3825 3831 } ··· 5228 5236 5229 5237 if (!adev || !cu_info) 5230 5238 return -EINVAL; 5239 + 5240 + memset(cu_info, 0, sizeof(*cu_info)); 5231 5241 5232 5242 mutex_lock(&adev->grbm_idx_mutex); 5233 5243 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+10 -12
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 2613 2613 static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) 2614 2614 { 2615 2615 int i, j; 2616 - u32 data, tmp, num_rbs = 0; 2616 + u32 data; 2617 2617 u32 active_rbs = 0; 2618 + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 2619 + adev->gfx.config.max_sh_per_se; 2618 2620 2619 2621 mutex_lock(&adev->grbm_idx_mutex); 2620 2622 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { ··· 2624 2622 gfx_v8_0_select_se_sh(adev, i, j); 2625 2623 data = gfx_v8_0_get_rb_active_bitmap(adev); 2626 2624 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 2627 - RB_BITMAP_WIDTH_PER_SH); 2625 + rb_bitmap_width_per_sh); 2628 2626 } 2629 2627 } 2630 2628 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 2631 2629 mutex_unlock(&adev->grbm_idx_mutex); 2632 2630 2633 2631 adev->gfx.config.backend_enable_mask = active_rbs; 2634 - tmp = active_rbs; 2635 - while (tmp >>= 1) 2636 - num_rbs++; 2637 - adev->gfx.config.num_rbs = num_rbs; 2632 + adev->gfx.config.num_rbs = hweight32(active_rbs); 2638 2633 } 2639 2634 2640 2635 /** ··· 4621 4622 else 4622 4623 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4623 4624 4624 - control |= ib->length_dw | 4625 - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); 4625 + control |= ib->length_dw | (ib->vm_id << 24); 4626 4626 4627 4627 amdgpu_ring_write(ring, header); 4628 4628 amdgpu_ring_write(ring, ··· 4650 4652 4651 4653 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4652 4654 4653 - control |= ib->length_dw | 4654 - (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); 4655 + control |= ib->length_dw | (ib->vm_id << 24); 4655 4656 4656 4657 amdgpu_ring_write(ring, header); 4657 4658 amdgpu_ring_write(ring, ··· 5128 5131 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 5129 5132 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 5130 5133 5131 - mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / 5132 - adev->gfx.config.max_sh_per_se); 5134 + mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); 5133 5135 5134 5136 return (~data) & mask; 5135 5137 } ··· 5141 5145 5142 5146 if (!adev || !cu_info) 5143 5147 return -EINVAL; 5148 + 5149 + memset(cu_info, 0, sizeof(*cu_info)); 5144 5150 5145 5151 mutex_lock(&adev->grbm_idx_mutex); 5146 5152 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 244 244 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, 245 245 struct amdgpu_ib *ib) 246 246 { 247 - u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 247 + u32 vmid = ib->vm_id & 0xf; 248 248 u32 next_rptr = ring->wptr + 5; 249 249 250 250 while ((next_rptr & 7) != 2)
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 355 355 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 356 356 struct amdgpu_ib *ib) 357 357 { 358 - u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 358 + u32 vmid = ib->vm_id & 0xf; 359 359 u32 next_rptr = ring->wptr + 5; 360 360 361 361 while ((next_rptr & 7) != 2)
-2
drivers/gpu/drm/amd/amdgpu/vid.h
··· 71 71 #define VMID(x) ((x) << 4) 72 72 #define QUEUEID(x) ((x) << 8) 73 73 74 - #define RB_BITMAP_WIDTH_PER_SH 2 75 - 76 74 #define MC_SEQ_MISC0__MT__MASK 0xf0000000 77 75 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 78 76 #define MC_SEQ_MISC0__MT__DDR2 0x20000000
+2
drivers/gpu/drm/amd/include/amd_acpi.h
··· 340 340 # define ATPX_FIXED_NOT_SUPPORTED (1 << 9) 341 341 # define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) 342 342 # define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) 343 + # define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12) 344 + # define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14) 343 345 #define ATPX_FUNCTION_POWER_CONTROL 0x2 344 346 /* ARG0: ATPX_FUNCTION_POWER_CONTROL 345 347 * ARG1:
+2 -2
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
··· 606 606 607 607 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || 608 608 hwmgr->hwmgr_func->set_pp_table == NULL) 609 - return -EINVAL; 609 + return -EINVAL; 610 610 611 611 return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); 612 612 } ··· 623 623 624 624 if (hwmgr == NULL || hwmgr->hwmgr_func == NULL || 625 625 hwmgr->hwmgr_func->force_clock_level == NULL) 626 - return -EINVAL; 626 + return -EINVAL; 627 627 628 628 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level); 629 629 }
+2
drivers/gpu/drm/radeon/radeon_acpi.h
··· 291 291 # define ATPX_FIXED_NOT_SUPPORTED (1 << 9) 292 292 # define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) 293 293 # define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) 294 + # define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12) 295 + # define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14) 294 296 #define ATPX_FUNCTION_POWER_CONTROL 0x2 295 297 /* ARG0: ATPX_FUNCTION_POWER_CONTROL 296 298 * ARG1: