Merge branch 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux into drm-next

A few fixes for 4.14. Nothing too major.

+237 -161
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 76 extern int amdgpu_modeset; 77 extern int amdgpu_vram_limit; 78 extern int amdgpu_vis_vram_limit; 79 - extern unsigned amdgpu_gart_size; 80 extern int amdgpu_gtt_size; 81 extern int amdgpu_moverate; 82 extern int amdgpu_benchmarking;
··· 76 extern int amdgpu_modeset; 77 extern int amdgpu_vram_limit; 78 extern int amdgpu_vis_vram_limit; 79 + extern int amdgpu_gart_size; 80 extern int amdgpu_gtt_size; 81 extern int amdgpu_moverate; 82 extern int amdgpu_benchmarking;
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
··· 155 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) 156 { 157 return (struct kfd2kgd_calls *)&kfd2kgd; 158 - return (struct kfd2kgd_calls *)&kfd2kgd; 159 } 160 161 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
··· 155 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) 156 { 157 return (struct kfd2kgd_calls *)&kfd2kgd; 158 } 159 160 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+3 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1079 GFP_KERNEL); 1080 p->num_post_dep_syncobjs = 0; 1081 1082 for (i = 0; i < num_deps; ++i) { 1083 p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); 1084 if (!p->post_dep_syncobjs[i]) ··· 1153 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); 1154 job->uf_sequence = cs->out.handle; 1155 amdgpu_job_free_resources(job); 1156 - amdgpu_cs_parser_fini(p, 0, true); 1157 1158 trace_amdgpu_cs_ioctl(job); 1159 amd_sched_entity_push_job(&job->base); ··· 1210 goto out; 1211 1212 r = amdgpu_cs_submit(&parser, cs); 1213 - if (r) 1214 - goto out; 1215 1216 - return 0; 1217 out: 1218 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 1219 return r;
··· 1079 GFP_KERNEL); 1080 p->num_post_dep_syncobjs = 0; 1081 1082 + if (!p->post_dep_syncobjs) 1083 + return -ENOMEM; 1084 + 1085 for (i = 0; i < num_deps; ++i) { 1086 p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); 1087 if (!p->post_dep_syncobjs[i]) ··· 1150 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); 1151 job->uf_sequence = cs->out.handle; 1152 amdgpu_job_free_resources(job); 1153 1154 trace_amdgpu_cs_ioctl(job); 1155 amd_sched_entity_push_job(&job->base); ··· 1208 goto out; 1209 1210 r = amdgpu_cs_submit(&parser, cs); 1211 1212 out: 1213 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 1214 return r;
+2 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1062 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1063 } 1064 1065 - if (amdgpu_gart_size < 32) { 1066 /* gart size must be greater or equal to 32M */ 1067 dev_warn(adev->dev, "gart size (%d) too small\n", 1068 amdgpu_gart_size); 1069 - amdgpu_gart_size = 32; 1070 } 1071 1072 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { ··· 2619 r = amdgpu_bo_validate(bo->shadow); 2620 if (r) { 2621 DRM_ERROR("bo validate failed!\n"); 2622 - goto err; 2623 - } 2624 - 2625 - r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem); 2626 - if (r) { 2627 - DRM_ERROR("%p bind failed\n", bo->shadow); 2628 goto err; 2629 } 2630
··· 1062 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1063 } 1064 1065 + if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1066 /* gart size must be greater or equal to 32M */ 1067 dev_warn(adev->dev, "gart size (%d) too small\n", 1068 amdgpu_gart_size); 1069 + amdgpu_gart_size = -1; 1070 } 1071 1072 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { ··· 2619 r = amdgpu_bo_validate(bo->shadow); 2620 if (r) { 2621 DRM_ERROR("bo validate failed!\n"); 2622 goto err; 2623 } 2624
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 76 77 int amdgpu_vram_limit = 0; 78 int amdgpu_vis_vram_limit = 0; 79 - unsigned amdgpu_gart_size = 256; 80 int amdgpu_gtt_size = -1; /* auto */ 81 int amdgpu_moverate = -1; /* auto */ 82 int amdgpu_benchmarking = 0; ··· 128 MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); 129 module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); 130 131 - MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); 132 module_param_named(gartsize, amdgpu_gart_size, uint, 0600); 133 134 MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
··· 76 77 int amdgpu_vram_limit = 0; 78 int amdgpu_vis_vram_limit = 0; 79 + int amdgpu_gart_size = -1; /* auto */ 80 int amdgpu_gtt_size = -1; /* auto */ 81 int amdgpu_moverate = -1; /* auto */ 82 int amdgpu_benchmarking = 0; ··· 128 MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); 129 module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); 130 131 + MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)"); 132 module_param_named(gartsize, amdgpu_gart_size, uint, 0600); 133 134 MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
-12
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
··· 57 */ 58 59 /** 60 - * amdgpu_gart_set_defaults - set the default gart_size 61 - * 62 - * @adev: amdgpu_device pointer 63 - * 64 - * Set the default gart_size based on parameters and available VRAM. 65 - */ 66 - void amdgpu_gart_set_defaults(struct amdgpu_device *adev) 67 - { 68 - adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; 69 - } 70 - 71 - /** 72 * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table 73 * 74 * @adev: amdgpu_device pointer
··· 57 */ 58 59 /** 60 * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table 61 * 62 * @adev: amdgpu_device pointer
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
··· 56 const struct amdgpu_gart_funcs *gart_funcs; 57 }; 58 59 - void amdgpu_gart_set_defaults(struct amdgpu_device *adev); 60 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); 61 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); 62 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
··· 56 const struct amdgpu_gart_funcs *gart_funcs; 57 }; 58 59 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); 60 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); 61 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
+5 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 108 * 109 * Allocate the address space for a node. 110 */ 111 - int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, 112 - struct ttm_buffer_object *tbo, 113 - const struct ttm_place *place, 114 - struct ttm_mem_reg *mem) 115 { 116 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 117 struct amdgpu_gtt_mgr *mgr = man->priv; ··· 143 fpfn, lpfn, mode); 144 spin_unlock(&mgr->lock); 145 146 - if (!r) { 147 mem->start = node->start; 148 - if (&tbo->mem == mem) 149 - tbo->offset = (tbo->mem.start << PAGE_SHIFT) + 150 - tbo->bdev->man[tbo->mem.mem_type].gpu_offset; 151 - } 152 153 return r; 154 }
··· 108 * 109 * Allocate the address space for a node. 110 */ 111 + static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, 112 + struct ttm_buffer_object *tbo, 113 + const struct ttm_place *place, 114 + struct ttm_mem_reg *mem) 115 { 116 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 117 struct amdgpu_gtt_mgr *mgr = man->priv; ··· 143 fpfn, lpfn, mode); 144 spin_unlock(&mgr->lock); 145 146 + if (!r) 147 mem->start = node->start; 148 149 return r; 150 }
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
··· 221 222 spin_lock_init(&adev->irq.lock); 223 224 - /* Disable vblank irqs aggressively for power-saving */ 225 - adev->ddev->vblank_disable_immediate = true; 226 227 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); 228 if (r) {
··· 221 222 spin_lock_init(&adev->irq.lock); 223 224 + if (!adev->enable_virtual_display) 225 + /* Disable vblank irqs aggressively for power-saving */ 226 + adev->ddev->vblank_disable_immediate = true; 227 228 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); 229 if (r) {
+23 -23
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 91 92 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 93 places[c].fpfn = 0; 94 - places[c].lpfn = 0; 95 places[c].flags = TTM_PL_FLAG_TT; 96 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 97 places[c].flags |= TTM_PL_FLAG_WC | ··· 449 if (bo->shadow) 450 return 0; 451 452 - bo->flags |= AMDGPU_GEM_CREATE_SHADOW; 453 - memset(&placements, 0, 454 - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); 455 - 456 - amdgpu_ttm_placement_init(adev, &placement, 457 - placements, AMDGPU_GEM_DOMAIN_GTT, 458 - AMDGPU_GEM_CREATE_CPU_GTT_USWC); 459 460 r = amdgpu_bo_create_restricted(adev, size, byte_align, true, 461 AMDGPU_GEM_DOMAIN_GTT, 462 - AMDGPU_GEM_CREATE_CPU_GTT_USWC, 463 NULL, &placement, 464 bo->tbo.resv, 465 0, ··· 486 { 487 struct ttm_placement placement = {0}; 488 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 489 int r; 490 491 - memset(&placements, 0, 492 - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); 493 494 - amdgpu_ttm_placement_init(adev, &placement, 495 - placements, domain, flags); 496 - 497 - r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, 498 - domain, flags, sg, &placement, 499 - resv, init_value, bo_ptr); 500 if (r) 501 return r; 502 503 - if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { 504 - if (!resv) { 505 - r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); 506 - WARN_ON(r != 0); 507 - } 508 509 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); 510 511 if (!resv) 512 - ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); 513 514 if (r) 515 amdgpu_bo_unref(bo_ptr);
··· 91 92 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 93 places[c].fpfn = 0; 94 + if (flags & AMDGPU_GEM_CREATE_SHADOW) 95 + places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; 96 + else 97 + places[c].lpfn = 0; 98 places[c].flags = TTM_PL_FLAG_TT; 99 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 100 places[c].flags |= TTM_PL_FLAG_WC | ··· 446 if (bo->shadow) 447 return 0; 448 449 + memset(&placements, 0, sizeof(placements)); 450 + amdgpu_ttm_placement_init(adev, &placement, placements, 451 + AMDGPU_GEM_DOMAIN_GTT, 452 + AMDGPU_GEM_CREATE_CPU_GTT_USWC | 453 + AMDGPU_GEM_CREATE_SHADOW); 454 455 r = amdgpu_bo_create_restricted(adev, size, byte_align, true, 456 AMDGPU_GEM_DOMAIN_GTT, 457 + AMDGPU_GEM_CREATE_CPU_GTT_USWC | 458 + AMDGPU_GEM_CREATE_SHADOW, 459 NULL, &placement, 460 bo->tbo.resv, 461 0, ··· 484 { 485 struct ttm_placement placement = {0}; 486 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 487 + uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; 488 int r; 489 490 + memset(&placements, 0, sizeof(placements)); 491 + amdgpu_ttm_placement_init(adev, &placement, placements, 492 + domain, parent_flags); 493 494 + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, 495 + parent_flags, sg, &placement, resv, 496 + init_value, bo_ptr); 497 if (r) 498 return r; 499 500 + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { 501 + if (!resv) 502 + WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, 503 + NULL)); 504 505 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); 506 507 if (!resv) 508 + reservation_object_unlock((*bo_ptr)->tbo.resv); 509 510 if (r) 511 amdgpu_bo_unref(bo_ptr);
+12 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 170 unsigned irq_type) 171 { 172 int r; 173 174 if (ring->adev == NULL) { 175 if (adev->num_rings >= AMDGPU_MAX_RINGS) ··· 188 ring->adev = adev; 189 ring->idx = adev->num_rings++; 190 adev->rings[ring->idx] = ring; 191 - r = amdgpu_fence_driver_init_ring(ring, 192 - amdgpu_sched_hw_submission); 193 if (r) 194 return r; 195 } ··· 227 return r; 228 } 229 230 - ring->ring_size = roundup_pow_of_two(max_dw * 4 * 231 - amdgpu_sched_hw_submission); 232 233 ring->buf_mask = (ring->ring_size / 4) - 1; 234 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
··· 170 unsigned irq_type) 171 { 172 int r; 173 + int sched_hw_submission = amdgpu_sched_hw_submission; 174 + 175 + /* Set the hw submission limit higher for KIQ because 176 + * it's used for a number of gfx/compute tasks by both 177 + * KFD and KGD which may have outstanding fences and 178 + * it doesn't really use the gpu scheduler anyway; 179 + * KIQ tasks get submitted directly to the ring. 180 + */ 181 + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 182 + sched_hw_submission = max(sched_hw_submission, 256); 183 184 if (ring->adev == NULL) { 185 if (adev->num_rings >= AMDGPU_MAX_RINGS) ··· 178 ring->adev = adev; 179 ring->idx = adev->num_rings++; 180 adev->rings[ring->idx] = ring; 181 + r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); 182 if (r) 183 return r; 184 } ··· 218 return r; 219 } 220 221 + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); 222 223 ring->buf_mask = (ring->ring_size / 4) - 1; 224 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
+44 -34
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 761 sg_free_table(ttm->sg); 762 } 763 764 - static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 765 - { 766 - struct amdgpu_ttm_tt *gtt = (void *)ttm; 767 - uint64_t flags; 768 - int r; 769 - 770 - spin_lock(&gtt->adev->gtt_list_lock); 771 - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); 772 - gtt->offset = (u64)mem->start << PAGE_SHIFT; 773 - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, 774 - ttm->pages, gtt->ttm.dma_address, flags); 775 - 776 - if (r) { 777 - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 778 - ttm->num_pages, gtt->offset); 779 - goto error_gart_bind; 780 - } 781 - 782 - list_add_tail(&gtt->list, &gtt->adev->gtt_list); 783 - error_gart_bind: 784 - spin_unlock(&gtt->adev->gtt_list_lock); 785 - return r; 786 - 787 - } 788 - 789 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 790 struct ttm_mem_reg *bo_mem) 791 { 792 struct amdgpu_ttm_tt *gtt = (void*)ttm; 793 int r = 0; 794 795 if (gtt->userptr) { ··· 785 bo_mem->mem_type == AMDGPU_PL_OA) 786 return -EINVAL; 787 788 - if (amdgpu_gtt_mgr_is_allocated(bo_mem)) 789 - r = amdgpu_ttm_do_bind(ttm, bo_mem); 790 791 return r; 792 } 793 ··· 815 816 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) 817 { 818 struct ttm_tt *ttm = bo->ttm; 819 int r; 820 821 if (!ttm || amdgpu_ttm_is_bound(ttm)) 822 return 0; 823 824 - r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, 825 - NULL, bo_mem); 826 - if (r) { 827 - DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); 828 - return r; 829 - } 830 831 - return amdgpu_ttm_do_bind(ttm, bo_mem); 832 } 833 834 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
··· 761 sg_free_table(ttm->sg); 762 } 763 764 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, 765 struct ttm_mem_reg *bo_mem) 766 { 767 struct amdgpu_ttm_tt *gtt = (void*)ttm; 768 + uint64_t flags; 769 int r = 0; 770 771 if (gtt->userptr) { ··· 809 bo_mem->mem_type == AMDGPU_PL_OA) 810 return -EINVAL; 811 812 + if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) 813 + return 0; 814 815 + spin_lock(&gtt->adev->gtt_list_lock); 816 + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 817 + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 818 + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, 819 + ttm->pages, gtt->ttm.dma_address, flags); 820 + 821 + if (r) { 822 + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", 823 + ttm->num_pages, gtt->offset); 824 + goto error_gart_bind; 825 + } 826 + 827 + list_add_tail(&gtt->list, &gtt->adev->gtt_list); 828 + error_gart_bind: 829 + spin_unlock(&gtt->adev->gtt_list_lock); 830 return r; 831 } 832 ··· 824 825 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) 826 { 827 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 828 struct ttm_tt *ttm = bo->ttm; 829 + struct ttm_mem_reg tmp; 830 + 831 + struct ttm_placement placement; 832 + struct ttm_place placements; 833 int r; 834 835 if (!ttm || amdgpu_ttm_is_bound(ttm)) 836 return 0; 837 838 + tmp = bo->mem; 839 + tmp.mm_node = NULL; 840 + placement.num_placement = 1; 841 + placement.placement = &placements; 842 + placement.num_busy_placement = 1; 843 + placement.busy_placement = &placements; 844 + placements.fpfn = 0; 845 + placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; 846 + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 847 848 + r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); 849 + if (unlikely(r)) 850 + return r; 851 + 852 + r = ttm_bo_move_ttm(bo, true, false, &tmp); 853 + if (unlikely(r)) 854 + ttm_bo_mem_put(bo, &tmp); 855 + else 856 + bo->offset = (bo->mem.start << PAGE_SHIFT) + 857 + bo->bdev->man[bo->mem.mem_type].gpu_offset; 858 + 859 + return r; 860 } 861 862 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 62 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; 63 64 bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); 65 - int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, 66 - struct ttm_buffer_object *tbo, 67 - const struct ttm_place *place, 68 - struct ttm_mem_reg *mem); 69 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 70 71 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
··· 62 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; 63 64 bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); 65 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 66 67 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+20 -26
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 165 unsigned i; 166 int r; 167 168 - if (parent->bo->shadow) { 169 - struct amdgpu_bo *shadow = parent->bo->shadow; 170 - 171 - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); 172 - if (r) 173 - return r; 174 - } 175 - 176 if (use_cpu_for_update) { 177 r = amdgpu_bo_kmap(parent->bo, NULL); 178 if (r) ··· 1269 /* In the case of a mixed PT the PDE must point to it*/ 1270 if (p->adev->asic_type < CHIP_VEGA10 || 1271 nptes != AMDGPU_VM_PTE_COUNT(p->adev) || 1272 - p->func == amdgpu_vm_do_copy_ptes || 1273 !(flags & AMDGPU_PTE_VALID)) { 1274 1275 dst = amdgpu_bo_gpu_offset(entry->bo); ··· 1286 entry->addr = (dst | flags); 1287 1288 if (use_cpu_update) { 1289 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); 1290 pde = pd_addr + (entry - parent->entries) * 8; 1291 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); 1292 } else { 1293 if (parent->bo->shadow) { 1294 pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); ··· 1616 * 1617 * @adev: amdgpu_device pointer 1618 * @exclusive: fence we need to sync to 1619 - * @gtt_flags: flags as they are used for GTT 1620 * @pages_addr: DMA addresses to use for mapping 1621 * @vm: requested vm 1622 * @mapping: mapped range and flags to use for the update ··· 1629 */ 1630 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1631 struct dma_fence *exclusive, 1632 - uint64_t gtt_flags, 1633 dma_addr_t *pages_addr, 1634 struct amdgpu_vm *vm, 1635 struct amdgpu_bo_va_mapping *mapping, ··· 1683 } 1684 1685 if (pages_addr) { 1686 - if (flags == gtt_flags) 1687 - src = adev->gart.table_addr + 1688 - (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; 1689 - else 1690 - max_entries = min(max_entries, 16ull * 1024ull); 1691 addr = 0; 1692 } else if (flags & AMDGPU_PTE_VALID) { 1693 addr += adev->vm_manager.vram_base_offset; ··· 1728 struct amdgpu_vm *vm = bo_va->base.vm; 1729 struct amdgpu_bo_va_mapping *mapping; 1730 dma_addr_t *pages_addr = NULL; 1731 - uint64_t gtt_flags, flags; 1732 struct ttm_mem_reg *mem; 1733 struct drm_mm_node *nodes; 1734 struct dma_fence *exclusive; 1735 int r; 1736 1737 if (clear || !bo_va->base.bo) { ··· 1751 exclusive = reservation_object_get_excl(bo->tbo.resv); 1752 } 1753 1754 - if (bo) { 1755 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1756 - gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && 1757 - adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? 1758 - flags : 0; 1759 - } else { 1760 flags = 0x0; 1761 - gtt_flags = ~0x0; 1762 - } 1763 1764 spin_lock(&vm->status_lock); 1765 if (!list_empty(&bo_va->base.vm_status)) ··· 1762 spin_unlock(&vm->status_lock); 1763 1764 list_for_each_entry(mapping, &bo_va->invalids, list) { 1765 - r = amdgpu_vm_bo_split_mapping(adev, exclusive, 1766 - gtt_flags, pages_addr, vm, 1767 mapping, flags, nodes, 1768 &bo_va->last_pt_update); 1769 if (r)
··· 165 unsigned i; 166 int r; 167 168 if (use_cpu_for_update) { 169 r = amdgpu_bo_kmap(parent->bo, NULL); 170 if (r) ··· 1277 /* In the case of a mixed PT the PDE must point to it*/ 1278 if (p->adev->asic_type < CHIP_VEGA10 || 1279 nptes != AMDGPU_VM_PTE_COUNT(p->adev) || 1280 + p->src || 1281 !(flags & AMDGPU_PTE_VALID)) { 1282 1283 dst = amdgpu_bo_gpu_offset(entry->bo); ··· 1294 entry->addr = (dst | flags); 1295 1296 if (use_cpu_update) { 1297 + /* In case a huge page is replaced with a system 1298 + * memory mapping, p->pages_addr != NULL and 1299 + * amdgpu_vm_cpu_set_ptes would try to translate dst 1300 + * through amdgpu_vm_map_gart. But dst is already a 1301 + * GPU address (of the page table). Disable 1302 + * amdgpu_vm_map_gart temporarily. 1303 + */ 1304 + dma_addr_t *tmp; 1305 + 1306 + tmp = p->pages_addr; 1307 + p->pages_addr = NULL; 1308 + 1309 pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); 1310 pde = pd_addr + (entry - parent->entries) * 8; 1311 amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); 1312 + 1313 + p->pages_addr = tmp; 1314 } else { 1315 if (parent->bo->shadow) { 1316 pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); ··· 1610 * 1611 * @adev: amdgpu_device pointer 1612 * @exclusive: fence we need to sync to 1613 * @pages_addr: DMA addresses to use for mapping 1614 * @vm: requested vm 1615 * @mapping: mapped range and flags to use for the update ··· 1624 */ 1625 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1626 struct dma_fence *exclusive, 1627 dma_addr_t *pages_addr, 1628 struct amdgpu_vm *vm, 1629 struct amdgpu_bo_va_mapping *mapping, ··· 1679 } 1680 1681 if (pages_addr) { 1682 + max_entries = min(max_entries, 16ull * 1024ull); 1683 addr = 0; 1684 } else if (flags & AMDGPU_PTE_VALID) { 1685 addr += adev->vm_manager.vram_base_offset; ··· 1728 struct amdgpu_vm *vm = bo_va->base.vm; 1729 struct amdgpu_bo_va_mapping *mapping; 1730 dma_addr_t *pages_addr = NULL; 1731 struct ttm_mem_reg *mem; 1732 struct drm_mm_node *nodes; 1733 struct dma_fence *exclusive; 1734 + uint64_t flags; 1735 int r; 1736 1737 if (clear || !bo_va->base.bo) { ··· 1751 exclusive = reservation_object_get_excl(bo->tbo.resv); 1752 } 1753 1754 + if (bo) 1755 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1756 + else 1757 flags = 0x0; 1758 1759 spin_lock(&vm->status_lock); 1760 if (!list_empty(&bo_va->base.vm_status)) ··· 1767 spin_unlock(&vm->status_lock); 1768 1769 list_for_each_entry(mapping, &bo_va->invalids, list) { 1770 + r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, 1771 mapping, flags, nodes, 1772 &bo_va->last_pt_update); 1773 if (r)
+6 -6
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 4579 mqd->compute_misc_reserved = 0x00000003; 4580 if (!(adev->flags & AMD_IS_APU)) { 4581 mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr 4582 - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); 4583 mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr 4584 - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); 4585 } 4586 eop_base_addr = ring->eop_gpu_addr >> 8; 4587 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; ··· 4768 mutex_unlock(&adev->srbm_mutex); 4769 } else { 4770 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); 4771 - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; 4772 - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; 4773 mutex_lock(&adev->srbm_mutex); 4774 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4775 gfx_v8_0_mqd_init(ring); ··· 4792 4793 if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { 4794 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); 4795 - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; 4796 - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; 4797 mutex_lock(&adev->srbm_mutex); 4798 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4799 gfx_v8_0_mqd_init(ring);
··· 4579 mqd->compute_misc_reserved = 0x00000003; 4580 if (!(adev->flags & AMD_IS_APU)) { 4581 mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr 4582 + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); 4583 mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr 4584 + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); 4585 } 4586 eop_base_addr = ring->eop_gpu_addr >> 8; 4587 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; ··· 4768 mutex_unlock(&adev->srbm_mutex); 4769 } else { 4770 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); 4771 + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 4772 + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 4773 mutex_lock(&adev->srbm_mutex); 4774 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4775 gfx_v8_0_mqd_init(ring); ··· 4792 4793 if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { 4794 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); 4795 + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 4796 + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 4797 mutex_lock(&adev->srbm_mutex); 4798 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4799 gfx_v8_0_mqd_init(ring);
+2 -3
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
··· 124 125 static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) 126 { 127 - uint32_t tmp, field; 128 129 /* Setup L2 cache */ 130 tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); ··· 143 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 144 WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); 145 146 - field = adev->vm_manager.fragment_size; 147 tmp = mmVM_L2_CNTL3_DEFAULT; 148 - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); 149 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 150 WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); 151
··· 124 125 static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) 126 { 127 + uint32_t tmp; 128 129 /* Setup L2 cache */ 130 tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); ··· 143 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 144 WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); 145 146 tmp = mmVM_L2_CNTL3_DEFAULT; 147 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 148 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 149 WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); 150
+18 -1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 332 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 333 adev->mc.visible_vram_size = adev->mc.aper_size; 334 335 - amdgpu_gart_set_defaults(adev); 336 gmc_v6_0_vram_gtt_location(adev, &adev->mc); 337 338 return 0;
··· 332 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 333 adev->mc.visible_vram_size = adev->mc.aper_size; 334 335 + /* set the gart size */ 336 + if (amdgpu_gart_size == -1) { 337 + switch (adev->asic_type) { 338 + case CHIP_HAINAN: /* no MM engines */ 339 + default: 340 + adev->mc.gart_size = 256ULL << 20; 341 + break; 342 + case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ 343 + case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ 344 + case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ 345 + case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ 346 + adev->mc.gart_size = 1024ULL << 20; 347 + break; 348 + } 349 + } else { 350 + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 351 + } 352 + 353 gmc_v6_0_vram_gtt_location(adev, &adev->mc); 354 355 return 0;
+21 -1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 386 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 387 adev->mc.visible_vram_size = adev->mc.real_vram_size; 388 389 - amdgpu_gart_set_defaults(adev); 390 gmc_v7_0_vram_gtt_location(adev, &adev->mc); 391 392 return 0;
··· 386 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 387 adev->mc.visible_vram_size = adev->mc.real_vram_size; 388 389 + /* set the gart size */ 390 + if (amdgpu_gart_size == -1) { 391 + switch (adev->asic_type) { 392 + case CHIP_TOPAZ: /* no MM engines */ 393 + default: 394 + adev->mc.gart_size = 256ULL << 20; 395 + break; 396 + #ifdef CONFIG_DRM_AMDGPU_CIK 397 + case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ 398 + case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ 399 + case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ 400 + case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ 401 + case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ 402 + adev->mc.gart_size = 1024ULL << 20; 403 + break; 404 + #endif 405 + } 406 + } else { 407 + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 408 + } 409 + 410 gmc_v7_0_vram_gtt_location(adev, &adev->mc); 411 412 return 0;
+20 -1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 562 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 563 adev->mc.visible_vram_size = adev->mc.real_vram_size; 564 565 - amdgpu_gart_set_defaults(adev); 566 gmc_v8_0_vram_gtt_location(adev, &adev->mc); 567 568 return 0;
··· 562 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 563 adev->mc.visible_vram_size = adev->mc.real_vram_size; 564 565 + /* set the gart size */ 566 + if (amdgpu_gart_size == -1) { 567 + switch (adev->asic_type) { 568 + case CHIP_POLARIS11: /* all engines support GPUVM */ 569 + case CHIP_POLARIS10: /* all engines support GPUVM */ 570 + case CHIP_POLARIS12: /* all engines support GPUVM */ 571 + default: 572 + adev->mc.gart_size = 256ULL << 20; 573 + break; 574 + case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ 575 + case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ 576 + case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ 577 + case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ 578 + adev->mc.gart_size = 1024ULL << 20; 579 + break; 580 + } 581 + } else { 582 + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 583 + } 584 + 585 gmc_v8_0_vram_gtt_location(adev, &adev->mc); 586 587 return 0;
+15 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 499 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 500 adev->mc.visible_vram_size = adev->mc.real_vram_size; 501 502 - amdgpu_gart_set_defaults(adev); 503 gmc_v9_0_vram_gtt_location(adev, &adev->mc); 504 505 return 0;
··· 499 if (adev->mc.visible_vram_size > adev->mc.real_vram_size) 500 adev->mc.visible_vram_size = adev->mc.real_vram_size; 501 502 + /* set the gart size */ 503 + if (amdgpu_gart_size == -1) { 504 + switch (adev->asic_type) { 505 + case CHIP_VEGA10: /* all engines support GPUVM */ 506 + default: 507 + adev->mc.gart_size = 256ULL << 20; 508 + break; 509 + case CHIP_RAVEN: /* DCE SG support */ 510 + adev->mc.gart_size = 1024ULL << 20; 511 + break; 512 + } 513 + } else { 514 + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; 515 + } 516 + 517 gmc_v9_0_vram_gtt_location(adev, &adev->mc); 518 519 return 0;
+2 -3
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
··· 138 139 static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) 140 { 141 - uint32_t tmp, field; 142 143 /* Setup L2 cache */ 144 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); ··· 157 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 158 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); 159 160 - field = adev->vm_manager.fragment_size; 161 tmp = mmVM_L2_CNTL3_DEFAULT; 162 - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); 163 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 164 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); 165
··· 138 139 static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) 140 { 141 + uint32_t tmp; 142 143 /* Setup L2 cache */ 144 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); ··· 157 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 158 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); 159 160 tmp = mmVM_L2_CNTL3_DEFAULT; 161 + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 162 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 163 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); 164
+2 -2
drivers/gpu/drm/amd/include/vi_structs.h
··· 419 struct vi_mqd mqd; 420 uint32_t wptr_poll_mem; 421 uint32_t rptr_report_mem; 422 - uint32_t dyamic_cu_mask; 423 - uint32_t dyamic_rb_mask; 424 }; 425 426 struct cz_mqd {
··· 419 struct vi_mqd mqd; 420 uint32_t wptr_poll_mem; 421 uint32_t rptr_report_mem; 422 + uint32_t dynamic_cu_mask; 423 + uint32_t dynamic_rb_mask; 424 }; 425 426 struct cz_mqd {
+8 -3
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 1558 */ 1559 1560 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, 1561 - uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) 1562 { 1563 struct phm_ppt_v2_information *table_info = 1564 (struct phm_ppt_v2_information *)(hwmgr->pptable); ··· 1609 current_gfxclk_level->SsSlewFrac = 1610 cpu_to_le16(dividers.usPll_ss_slew_frac); 1611 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); 1612 1613 return 0; 1614 } ··· 1692 for (i = 0; i < dpm_table->count; i++) { 1693 result = vega10_populate_single_gfx_level(hwmgr, 1694 dpm_table->dpm_levels[i].value, 1695 - &(pp_table->GfxclkLevel[i])); 1696 if (result) 1697 return result; 1698 } ··· 1702 while (i < NUM_GFXCLK_DPM_LEVELS) { 1703 result = vega10_populate_single_gfx_level(hwmgr, 1704 dpm_table->dpm_levels[j].value, 1705 - &(pp_table->GfxclkLevel[i])); 1706 if (result) 1707 return result; 1708 i++;
··· 1558 */ 1559 1560 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, 1561 + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, 1562 + uint32_t *acg_freq) 1563 { 1564 struct phm_ppt_v2_information *table_info = 1565 (struct phm_ppt_v2_information *)(hwmgr->pptable); ··· 1608 current_gfxclk_level->SsSlewFrac = 1609 cpu_to_le16(dividers.usPll_ss_slew_frac); 1610 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); 1611 + 1612 + *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ 1613 1614 return 0; 1615 } ··· 1689 for (i = 0; i < dpm_table->count; i++) { 1690 result = vega10_populate_single_gfx_level(hwmgr, 1691 dpm_table->dpm_levels[i].value, 1692 + &(pp_table->GfxclkLevel[i]), 1693 + &(pp_table->AcgFreqTable[i])); 1694 if (result) 1695 return result; 1696 } ··· 1698 while (i < NUM_GFXCLK_DPM_LEVELS) { 1699 result = vega10_populate_single_gfx_level(hwmgr, 1700 dpm_table->dpm_levels[j].value, 1701 + &(pp_table->GfxclkLevel[i]), 1702 + &(pp_table->AcgFreqTable[i])); 1703 if (result) 1704 return result; 1705 i++;
+4 -2
drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
··· 315 uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; 316 GbVdroopTable_t AcgBtcGbVdroopTable; 317 QuadraticInt_t AcgAvfsGb; 318 - uint32_t Reserved[4]; 319 320 /* Padding - ignore */ 321 - uint32_t MmHubPadding[7]; /* SMU internal use */ 322 323 } PPTable_t; 324
··· 315 uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; 316 GbVdroopTable_t AcgBtcGbVdroopTable; 317 QuadraticInt_t AcgAvfsGb; 318 + 319 + /* ACG Frequency Table, in Mhz */ 320 + uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS]; 321 322 /* Padding - ignore */ 323 + uint32_t MmHubPadding[3]; /* SMU internal use */ 324 325 } PPTable_t; 326
+2 -1
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
··· 380 entry->num_register_entries = 0; 381 } 382 383 - if (fw_type == UCODE_ID_RLC_G) 384 entry->flags = 1; 385 else 386 entry->flags = 0;
··· 380 entry->num_register_entries = 0; 381 } 382 383 + if ((fw_type == UCODE_ID_RLC_G) 384 + || (fw_type == UCODE_ID_CP_MEC)) 385 entry->flags = 1; 386 else 387 entry->flags = 0;
+19 -4
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 205 struct amd_sched_entity *entity) 206 { 207 struct amd_sched_rq *rq = entity->rq; 208 209 if (!amd_sched_entity_is_initialized(sched, entity)) 210 return; 211 - 212 /** 213 * The client will not queue more IBs during this fini, consume existing 214 - * queued IBs 215 */ 216 - wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); 217 - 218 amd_sched_rq_remove_entity(rq, entity); 219 kfifo_free(&entity->job_queue); 220 } 221
··· 205 struct amd_sched_entity *entity) 206 { 207 struct amd_sched_rq *rq = entity->rq; 208 + int r; 209 210 if (!amd_sched_entity_is_initialized(sched, entity)) 211 return; 212 /** 213 * The client will not queue more IBs during this fini, consume existing 214 + * queued IBs or discard them on SIGKILL 215 */ 216 + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) 217 + r = -ERESTARTSYS; 218 + else 219 + r = wait_event_killable(sched->job_scheduled, 220 + amd_sched_entity_is_idle(entity)); 221 amd_sched_rq_remove_entity(rq, entity); 222 + if (r) { 223 + struct amd_sched_job *job; 224 + 225 + /* Park the kernel for a moment to make sure it isn't processing 226 + * our enity. 227 + */ 228 + kthread_park(sched->thread); 229 + kthread_unpark(sched->thread); 230 + while (kfifo_out(&entity->job_queue, &job, sizeof(job))) 231 + sched->ops->free_job(job); 232 + 233 + } 234 kfifo_free(&entity->job_queue); 235 } 236
+2 -2
drivers/gpu/drm/ttm/ttm_bo.c
··· 109 struct ttm_bo_global *glob = 110 container_of(kobj, struct ttm_bo_global, kobj); 111 112 - return snprintf(buffer, PAGE_SIZE, "%lu\n", 113 - (unsigned long) atomic_read(&glob->bo_count)); 114 } 115 116 static struct attribute *ttm_bo_global_attrs[] = {
··· 109 struct ttm_bo_global *glob = 110 container_of(kobj, struct ttm_bo_global, kobj); 111 112 + return snprintf(buffer, PAGE_SIZE, "%d\n", 113 + atomic_read(&glob->bo_count)); 114 } 115 116 static struct attribute *ttm_bo_global_attrs[] = {
+1
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 469 * TODO: Explicit member copy would probably be better here. 470 */ 471 472 INIT_LIST_HEAD(&fbo->ddestroy); 473 INIT_LIST_HEAD(&fbo->lru); 474 INIT_LIST_HEAD(&fbo->swap);
··· 469 * TODO: Explicit member copy would probably be better here. 470 */ 471 472 + atomic_inc(&bo->glob->bo_count); 473 INIT_LIST_HEAD(&fbo->ddestroy); 474 INIT_LIST_HEAD(&fbo->lru); 475 INIT_LIST_HEAD(&fbo->swap);