Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/io-64-nonatomic-lo-hi.h>
28#ifdef CONFIG_X86
29#include <asm/hypervisor.h>
30#endif
31
32#include "amdgpu.h"
33#include "amdgpu_gmc.h"
34#include "amdgpu_ras.h"
35#include "amdgpu_reset.h"
36#include "amdgpu_xgmi.h"
37
38#include <drm/drm_drv.h>
39#include <drm/ttm/ttm_tt.h>
40
41/**
42 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
43 *
44 * @adev: amdgpu_device pointer
45 *
46 * Allocate video memory for pdb0 and map it for CPU access
47 * Returns 0 for success, error for failure.
48 */
49int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
50{
51 int r;
52 struct amdgpu_bo_param bp;
53 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
54 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
55 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift;
56
57 memset(&bp, 0, sizeof(bp));
58 bp.size = PAGE_ALIGN((npdes + 1) * 8);
59 bp.byte_align = PAGE_SIZE;
60 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
61 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
62 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
63 bp.type = ttm_bo_type_kernel;
64 bp.resv = NULL;
65 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
66
67 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
68 if (r)
69 return r;
70
71 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
72 if (unlikely(r != 0))
73 goto bo_reserve_failure;
74
75 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
76 if (r)
77 goto bo_pin_failure;
78 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
79 if (r)
80 goto bo_kmap_failure;
81
82 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
83 return 0;
84
85bo_kmap_failure:
86 amdgpu_bo_unpin(adev->gmc.pdb0_bo);
87bo_pin_failure:
88 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
89bo_reserve_failure:
90 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
91 return r;
92}
93
94/**
95 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
96 *
97 * @bo: the BO to get the PDE for
98 * @level: the level in the PD hirarchy
99 * @addr: resulting addr
100 * @flags: resulting flags
101 *
102 * Get the address and flags to be used for a PDE (Page Directory Entry).
103 */
104void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
105 uint64_t *addr, uint64_t *flags)
106{
107 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
108
109 switch (bo->tbo.resource->mem_type) {
110 case TTM_PL_TT:
111 *addr = bo->tbo.ttm->dma_address[0];
112 break;
113 case TTM_PL_VRAM:
114 *addr = amdgpu_bo_gpu_offset(bo);
115 break;
116 default:
117 *addr = 0;
118 break;
119 }
120 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
121 amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
122}
123
124/*
125 * amdgpu_gmc_pd_addr - return the address of the root directory
126 */
127uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
128{
129 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
130 uint64_t pd_addr;
131
132 /* TODO: move that into ASIC specific code */
133 if (adev->asic_type >= CHIP_VEGA10) {
134 uint64_t flags = AMDGPU_PTE_VALID;
135
136 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
137 pd_addr |= flags;
138 } else {
139 pd_addr = amdgpu_bo_gpu_offset(bo);
140 }
141 return pd_addr;
142}
143
144/**
145 * amdgpu_gmc_set_pte_pde - update the page tables using CPU
146 *
147 * @adev: amdgpu_device pointer
148 * @cpu_pt_addr: cpu address of the page table
149 * @gpu_page_idx: entry in the page table to update
150 * @addr: dst addr to write into pte/pde
151 * @flags: access flags
152 *
153 * Update the page tables using CPU.
154 */
155int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
156 uint32_t gpu_page_idx, uint64_t addr,
157 uint64_t flags)
158{
159 void __iomem *ptr = (void *)cpu_pt_addr;
160 uint64_t value;
161
162 /*
163 * The following is for PTE only. GART does not have PDEs.
164 */
165 value = addr & 0x0000FFFFFFFFF000ULL;
166 value |= flags;
167 writeq(value, ptr + (gpu_page_idx * 8));
168
169 return 0;
170}
171
172/**
173 * amdgpu_gmc_agp_addr - return the address in the AGP address space
174 *
175 * @bo: TTM BO which needs the address, must be in GTT domain
176 *
177 * Tries to figure out how to access the BO through the AGP aperture. Returns
178 * AMDGPU_BO_INVALID_OFFSET if that is not possible.
179 */
180uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
181{
182 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
183
184 if (!bo->ttm)
185 return AMDGPU_BO_INVALID_OFFSET;
186
187 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
188 return AMDGPU_BO_INVALID_OFFSET;
189
190 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
191 return AMDGPU_BO_INVALID_OFFSET;
192
193 return adev->gmc.agp_start + bo->ttm->dma_address[0];
194}
195
196/**
197 * amdgpu_gmc_vram_location - try to find VRAM location
198 *
199 * @adev: amdgpu device structure holding all necessary information
200 * @mc: memory controller structure holding memory information
201 * @base: base address at which to put VRAM
202 *
203 * Function will try to place VRAM at base address provided
204 * as parameter.
205 */
206void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
207 u64 base)
208{
209 uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
210 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
211
212 mc->vram_start = base;
213 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
214 if (limit < mc->real_vram_size)
215 mc->real_vram_size = limit;
216
217 if (vis_limit && vis_limit < mc->visible_vram_size)
218 mc->visible_vram_size = vis_limit;
219
220 if (mc->real_vram_size < mc->visible_vram_size)
221 mc->visible_vram_size = mc->real_vram_size;
222
223 if (mc->xgmi.num_physical_nodes == 0) {
224 mc->fb_start = mc->vram_start;
225 mc->fb_end = mc->vram_end;
226 }
227 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
228 mc->mc_vram_size >> 20, mc->vram_start,
229 mc->vram_end, mc->real_vram_size >> 20);
230}
231
232/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
233 *
234 * @adev: amdgpu device structure holding all necessary information
235 * @mc: memory controller structure holding memory information
236 *
237 * This function is only used if use GART for FB translation. In such
238 * case, we use sysvm aperture (vmid0 page tables) for both vram
239 * and gart (aka system memory) access.
240 *
241 * GPUVM (and our organization of vmid0 page tables) require sysvm
242 * aperture to be placed at a location aligned with 8 times of native
243 * page size. For example, if vm_context0_cntl.page_table_block_size
244 * is 12, then native page size is 8G (2M*2^12), sysvm should start
245 * with a 64G aligned address. For simplicity, we just put sysvm at
246 * address 0. So vram start at address 0 and gart is right after vram.
247 */
248void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
249{
250 u64 hive_vram_start = 0;
251 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
252 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
253 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
254 mc->gart_start = hive_vram_end + 1;
255 mc->gart_end = mc->gart_start + mc->gart_size - 1;
256 mc->fb_start = hive_vram_start;
257 mc->fb_end = hive_vram_end;
258 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
259 mc->mc_vram_size >> 20, mc->vram_start,
260 mc->vram_end, mc->real_vram_size >> 20);
261 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
262 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
263}
264
265/**
266 * amdgpu_gmc_gart_location - try to find GART location
267 *
268 * @adev: amdgpu device structure holding all necessary information
269 * @mc: memory controller structure holding memory information
270 * @gart_placement: GART placement policy with respect to VRAM
271 *
272 * Function will try to place GART before or after VRAM.
273 * If GART size is bigger than space left then we ajust GART size.
274 * Thus function will never fails.
275 */
276void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
277 enum amdgpu_gart_placement gart_placement)
278{
279 const uint64_t four_gb = 0x100000000ULL;
280 u64 size_af, size_bf;
281 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
282 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
283
284 /* VCE doesn't like it when BOs cross a 4GB segment, so align
285 * the GART base on a 4GB boundary as well.
286 */
287 size_bf = mc->fb_start;
288 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
289
290 if (mc->gart_size > max(size_bf, size_af)) {
291 dev_warn(adev->dev, "limiting GART\n");
292 mc->gart_size = max(size_bf, size_af);
293 }
294
295 switch (gart_placement) {
296 case AMDGPU_GART_PLACEMENT_HIGH:
297 mc->gart_start = max_mc_address - mc->gart_size + 1;
298 break;
299 case AMDGPU_GART_PLACEMENT_LOW:
300 mc->gart_start = 0;
301 break;
302 case AMDGPU_GART_PLACEMENT_BEST_FIT:
303 default:
304 if ((size_bf >= mc->gart_size && size_bf < size_af) ||
305 (size_af < mc->gart_size))
306 mc->gart_start = 0;
307 else
308 mc->gart_start = max_mc_address - mc->gart_size + 1;
309 break;
310 }
311
312 mc->gart_start &= ~(four_gb - 1);
313 mc->gart_end = mc->gart_start + mc->gart_size - 1;
314 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
315 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
316}
317
318/**
319 * amdgpu_gmc_agp_location - try to find AGP location
320 * @adev: amdgpu device structure holding all necessary information
321 * @mc: memory controller structure holding memory information
322 *
323 * Function will place try to find a place for the AGP BAR in the MC address
324 * space.
325 *
326 * AGP BAR will be assigned the largest available hole in the address space.
327 * Should be called after VRAM and GART locations are setup.
328 */
329void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
330{
331 const uint64_t sixteen_gb = 1ULL << 34;
332 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
333 u64 size_af, size_bf;
334
335 if (mc->fb_start > mc->gart_start) {
336 size_bf = (mc->fb_start & sixteen_gb_mask) -
337 ALIGN(mc->gart_end + 1, sixteen_gb);
338 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
339 } else {
340 size_bf = mc->fb_start & sixteen_gb_mask;
341 size_af = (mc->gart_start & sixteen_gb_mask) -
342 ALIGN(mc->fb_end + 1, sixteen_gb);
343 }
344
345 if (size_bf > size_af) {
346 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
347 mc->agp_size = size_bf;
348 } else {
349 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
350 mc->agp_size = size_af;
351 }
352
353 mc->agp_end = mc->agp_start + mc->agp_size - 1;
354 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
355 mc->agp_size >> 20, mc->agp_start, mc->agp_end);
356}
357
358/**
359 * amdgpu_gmc_set_agp_default - Set the default AGP aperture value.
360 * @adev: amdgpu device structure holding all necessary information
361 * @mc: memory controller structure holding memory information
362 *
363 * To disable the AGP aperture, you need to set the start to a larger
364 * value than the end. This function sets the default value which
365 * can then be overridden using amdgpu_gmc_agp_location() if you want
366 * to enable the AGP aperture on a specific chip.
367 *
368 */
369void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
370 struct amdgpu_gmc *mc)
371{
372 mc->agp_start = 0xffffffffffff;
373 mc->agp_end = 0;
374 mc->agp_size = 0;
375}
376
377/**
378 * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
379 *
380 * @addr: 48 bit physical address, page aligned (36 significant bits)
381 * @pasid: 16 bit process address space identifier
382 */
383static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
384{
385 return addr << 4 | pasid;
386}
387
388/**
389 * amdgpu_gmc_filter_faults - filter VM faults
390 *
391 * @adev: amdgpu device structure
392 * @ih: interrupt ring that the fault received from
393 * @addr: address of the VM fault
394 * @pasid: PASID of the process causing the fault
395 * @timestamp: timestamp of the fault
396 *
397 * Returns:
398 * True if the fault was filtered and should not be processed further.
399 * False if the fault is a new one and needs to be handled.
400 */
401bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
402 struct amdgpu_ih_ring *ih, uint64_t addr,
403 uint16_t pasid, uint64_t timestamp)
404{
405 struct amdgpu_gmc *gmc = &adev->gmc;
406 uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
407 struct amdgpu_gmc_fault *fault;
408 uint32_t hash;
409
410 /* Stale retry fault if timestamp goes backward */
411 if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp))
412 return true;
413
414 /* If we don't have space left in the ring buffer return immediately */
415 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
416 AMDGPU_GMC_FAULT_TIMEOUT;
417 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
418 return true;
419
420 /* Try to find the fault in the hash */
421 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
422 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
423 while (fault->timestamp >= stamp) {
424 uint64_t tmp;
425
426 if (atomic64_read(&fault->key) == key) {
427 /*
428 * if we get a fault which is already present in
429 * the fault_ring and the timestamp of
430 * the fault is after the expired timestamp,
431 * then this is a new fault that needs to be added
432 * into the fault ring.
433 */
434 if (fault->timestamp_expiry != 0 &&
435 amdgpu_ih_ts_after(fault->timestamp_expiry,
436 timestamp))
437 break;
438 else
439 return true;
440 }
441
442 tmp = fault->timestamp;
443 fault = &gmc->fault_ring[fault->next];
444
445 /* Check if the entry was reused */
446 if (fault->timestamp >= tmp)
447 break;
448 }
449
450 /* Add the fault to the ring */
451 fault = &gmc->fault_ring[gmc->last_fault];
452 atomic64_set(&fault->key, key);
453 fault->timestamp = timestamp;
454
455 /* And update the hash */
456 fault->next = gmc->fault_hash[hash].idx;
457 gmc->fault_hash[hash].idx = gmc->last_fault++;
458 return false;
459}
460
461/**
462 * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
463 *
464 * @adev: amdgpu device structure
465 * @addr: address of the VM fault
466 * @pasid: PASID of the process causing the fault
467 *
468 * Remove the address from fault filter, then future vm fault on this address
469 * will pass to retry fault handler to recover.
470 */
471void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
472 uint16_t pasid)
473{
474 struct amdgpu_gmc *gmc = &adev->gmc;
475 uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
476 struct amdgpu_ih_ring *ih;
477 struct amdgpu_gmc_fault *fault;
478 uint32_t last_wptr;
479 uint64_t last_ts;
480 uint32_t hash;
481 uint64_t tmp;
482
483 if (adev->irq.retry_cam_enabled)
484 return;
485
486 ih = &adev->irq.ih1;
487 /* Get the WPTR of the last entry in IH ring */
488 last_wptr = amdgpu_ih_get_wptr(adev, ih);
489 /* Order wptr with ring data. */
490 rmb();
491 /* Get the timetamp of the last entry in IH ring */
492 last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1);
493
494 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
495 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
496 do {
497 if (atomic64_read(&fault->key) == key) {
498 /*
499 * Update the timestamp when this fault
500 * expired.
501 */
502 fault->timestamp_expiry = last_ts;
503 break;
504 }
505
506 tmp = fault->timestamp;
507 fault = &gmc->fault_ring[fault->next];
508 } while (fault->timestamp < tmp);
509}
510
511int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
512{
513 int r;
514
515 /* umc ras block */
516 r = amdgpu_umc_ras_sw_init(adev);
517 if (r)
518 return r;
519
520 /* mmhub ras block */
521 r = amdgpu_mmhub_ras_sw_init(adev);
522 if (r)
523 return r;
524
525 /* hdp ras block */
526 r = amdgpu_hdp_ras_sw_init(adev);
527 if (r)
528 return r;
529
530 /* mca.x ras block */
531 r = amdgpu_mca_mp0_ras_sw_init(adev);
532 if (r)
533 return r;
534
535 r = amdgpu_mca_mp1_ras_sw_init(adev);
536 if (r)
537 return r;
538
539 r = amdgpu_mca_mpio_ras_sw_init(adev);
540 if (r)
541 return r;
542
543 /* xgmi ras block */
544 r = amdgpu_xgmi_ras_sw_init(adev);
545 if (r)
546 return r;
547
548 return 0;
549}
550
551int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
552{
553 return 0;
554}
555
556void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
557{
558
559}
560
561 /*
562 * The latest engine allocation on gfx9/10 is:
563 * Engine 2, 3: firmware
564 * Engine 0, 1, 4~16: amdgpu ring,
565 * subject to change when ring number changes
566 * Engine 17: Gart flushes
567 */
568#define AMDGPU_VMHUB_INV_ENG_BITMAP 0x1FFF3
569
570int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
571{
572 struct amdgpu_ring *ring;
573 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
574 unsigned i;
575 unsigned vmhub, inv_eng;
576 struct amdgpu_ring *shared_ring;
577
578 /* init the vm inv eng for all vmhubs */
579 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
580 vm_inv_engs[i] = AMDGPU_VMHUB_INV_ENG_BITMAP;
581 /* reserve engine 5 for firmware */
582 if (adev->enable_mes)
583 vm_inv_engs[i] &= ~(1 << 5);
584 /* reserve mmhub engine 3 for firmware */
585 if (adev->enable_umsch_mm)
586 vm_inv_engs[i] &= ~(1 << 3);
587 }
588
589 for (i = 0; i < adev->num_rings; ++i) {
590 ring = adev->rings[i];
591 vmhub = ring->vm_hub;
592
593 if (ring == &adev->mes.ring[0] ||
594 ring == &adev->mes.ring[1] ||
595 ring == &adev->umsch_mm.ring ||
596 ring == &adev->cper.ring_buf)
597 continue;
598
599 /* Skip if the ring is a shared ring */
600 if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
601 continue;
602
603 inv_eng = ffs(vm_inv_engs[vmhub]);
604 if (!inv_eng) {
605 dev_err(adev->dev, "no VM inv eng for ring %s\n",
606 ring->name);
607 return -EINVAL;
608 }
609
610 ring->vm_inv_eng = inv_eng - 1;
611 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
612
613 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
614 ring->name, ring->vm_inv_eng, ring->vm_hub);
615 /* SDMA has a special packet which allows it to use the same
616 * invalidation engine for all the rings in one instance.
617 * Therefore, we do not allocate a separate VM invalidation engine
618 * for SDMA page rings. Instead, they share the VM invalidation
619 * engine with the SDMA gfx ring. This change ensures efficient
620 * resource management and avoids the issue of insufficient VM
621 * invalidation engines.
622 */
623 shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
624 if (shared_ring) {
625 shared_ring->vm_inv_eng = ring->vm_inv_eng;
626 dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
627 ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
628 continue;
629 }
630 }
631
632 return 0;
633}
634
635void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
636 uint32_t vmhub, uint32_t flush_type)
637{
638 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
639 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
640 struct dma_fence *fence;
641 struct amdgpu_job *job;
642 int r;
643
644 if (!hub->sdma_invalidation_workaround || vmid ||
645 !adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
646 !ring->sched.ready) {
647 /*
648 * A GPU reset should flush all TLBs anyway, so no need to do
649 * this while one is ongoing.
650 */
651 if (!down_read_trylock(&adev->reset_domain->sem))
652 return;
653
654 if (adev->gmc.flush_tlb_needs_extra_type_2)
655 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
656 vmhub, 2);
657
658 if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
659 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
660 vmhub, 0);
661
662 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub,
663 flush_type);
664 up_read(&adev->reset_domain->sem);
665 return;
666 }
667
668 /* The SDMA on Navi 1x has a bug which can theoretically result in memory
669 * corruption if an invalidation happens at the same time as an VA
670 * translation. Avoid this by doing the invalidation from the SDMA
671 * itself at least for GART.
672 */
673 mutex_lock(&adev->mman.gtt_window_lock);
674 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
675 AMDGPU_FENCE_OWNER_UNDEFINED,
676 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
677 &job);
678 if (r)
679 goto error_alloc;
680
681 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
682 job->vm_needs_flush = true;
683 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
684 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
685 fence = amdgpu_job_submit(job);
686 mutex_unlock(&adev->mman.gtt_window_lock);
687
688 dma_fence_wait(fence, false);
689 dma_fence_put(fence);
690
691 return;
692
693error_alloc:
694 mutex_unlock(&adev->mman.gtt_window_lock);
695 dev_err(adev->dev, "Error flushing GPU TLB using the SDMA (%d)!\n", r);
696}
697
698int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
699 uint32_t flush_type, bool all_hub,
700 uint32_t inst)
701{
702 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
703 adev->usec_timeout;
704 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
705 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
706 unsigned int ndw;
707 int r;
708 uint32_t seq;
709
710 /*
711 * A GPU reset should flush all TLBs anyway, so no need to do
712 * this while one is ongoing.
713 */
714 if (!down_read_trylock(&adev->reset_domain->sem))
715 return 0;
716
717 if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
718 if (adev->gmc.flush_tlb_needs_extra_type_2)
719 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
720 2, all_hub,
721 inst);
722
723 if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
724 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
725 0, all_hub,
726 inst);
727
728 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
729 flush_type, all_hub,
730 inst);
731 r = 0;
732 } else {
733 /* 2 dwords flush + 8 dwords fence */
734 ndw = kiq->pmf->invalidate_tlbs_size + 8;
735
736 if (adev->gmc.flush_tlb_needs_extra_type_2)
737 ndw += kiq->pmf->invalidate_tlbs_size;
738
739 if (adev->gmc.flush_tlb_needs_extra_type_0)
740 ndw += kiq->pmf->invalidate_tlbs_size;
741
742 spin_lock(&adev->gfx.kiq[inst].ring_lock);
743 r = amdgpu_ring_alloc(ring, ndw);
744 if (r) {
745 spin_unlock(&adev->gfx.kiq[inst].ring_lock);
746 goto error_unlock_reset;
747 }
748 if (adev->gmc.flush_tlb_needs_extra_type_2)
749 kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
750
751 if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
752 kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
753
754 kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
755 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
756 if (r) {
757 amdgpu_ring_undo(ring);
758 spin_unlock(&adev->gfx.kiq[inst].ring_lock);
759 goto error_unlock_reset;
760 }
761
762 amdgpu_ring_commit(ring);
763 spin_unlock(&adev->gfx.kiq[inst].ring_lock);
764 if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
765 dev_err(adev->dev, "timeout waiting for kiq fence\n");
766 r = -ETIME;
767 }
768 }
769
770error_unlock_reset:
771 up_read(&adev->reset_domain->sem);
772 return r;
773}
774
775void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
776 uint32_t reg0, uint32_t reg1,
777 uint32_t ref, uint32_t mask,
778 uint32_t xcc_inst)
779{
780 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
781 struct amdgpu_ring *ring = &kiq->ring;
782 signed long r, cnt = 0;
783 unsigned long flags;
784 uint32_t seq;
785
786 if (adev->mes.ring[0].sched.ready) {
787 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
788 ref, mask);
789 return;
790 }
791
792 spin_lock_irqsave(&kiq->ring_lock, flags);
793 amdgpu_ring_alloc(ring, 32);
794 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
795 ref, mask);
796 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
797 if (r)
798 goto failed_undo;
799
800 amdgpu_ring_commit(ring);
801 spin_unlock_irqrestore(&kiq->ring_lock, flags);
802
803 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
804
805 /* don't wait anymore for IRQ context */
806 if (r < 1 && in_interrupt())
807 goto failed_kiq;
808
809 might_sleep();
810 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
811 !amdgpu_reset_pending(adev->reset_domain)) {
812
813 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
814 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
815 }
816
817 if (cnt > MAX_KIQ_REG_TRY)
818 goto failed_kiq;
819
820 return;
821
822failed_undo:
823 amdgpu_ring_undo(ring);
824 spin_unlock_irqrestore(&kiq->ring_lock, flags);
825failed_kiq:
826 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
827}
828
829/**
830 * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
831 * @adev: amdgpu_device pointer
832 *
833 * Check and set if an the device @adev supports Trusted Memory
834 * Zones (TMZ).
835 */
836void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
837{
838 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
839 /* RAVEN */
840 case IP_VERSION(9, 2, 2):
841 case IP_VERSION(9, 1, 0):
842 /* RENOIR looks like RAVEN */
843 case IP_VERSION(9, 3, 0):
844 /* GC 10.3.7 */
845 case IP_VERSION(10, 3, 7):
846 /* GC 11.0.1 */
847 case IP_VERSION(11, 0, 1):
848 if (amdgpu_tmz == 0) {
849 adev->gmc.tmz_enabled = false;
850 dev_info(adev->dev,
851 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
852 } else {
853 adev->gmc.tmz_enabled = true;
854 dev_info(adev->dev,
855 "Trusted Memory Zone (TMZ) feature enabled\n");
856 }
857 break;
858 case IP_VERSION(10, 1, 10):
859 case IP_VERSION(10, 1, 1):
860 case IP_VERSION(10, 1, 2):
861 case IP_VERSION(10, 1, 3):
862 case IP_VERSION(10, 3, 0):
863 case IP_VERSION(10, 3, 2):
864 case IP_VERSION(10, 3, 4):
865 case IP_VERSION(10, 3, 5):
866 case IP_VERSION(10, 3, 6):
867 /* VANGOGH */
868 case IP_VERSION(10, 3, 1):
869 /* YELLOW_CARP*/
870 case IP_VERSION(10, 3, 3):
871 case IP_VERSION(11, 0, 4):
872 case IP_VERSION(11, 5, 0):
873 case IP_VERSION(11, 5, 1):
874 case IP_VERSION(11, 5, 2):
875 case IP_VERSION(11, 5, 3):
876 /* Don't enable it by default yet.
877 */
878 if (amdgpu_tmz < 1) {
879 adev->gmc.tmz_enabled = false;
880 dev_info(adev->dev,
881 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
882 } else {
883 adev->gmc.tmz_enabled = true;
884 dev_info(adev->dev,
885 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
886 }
887 break;
888 default:
889 adev->gmc.tmz_enabled = false;
890 dev_info(adev->dev,
891 "Trusted Memory Zone (TMZ) feature not supported\n");
892 break;
893 }
894}
895
896/**
897 * amdgpu_gmc_noretry_set -- set per asic noretry defaults
898 * @adev: amdgpu_device pointer
899 *
900 * Set a per asic default for the no-retry parameter.
901 *
902 */
903void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
904{
905 struct amdgpu_gmc *gmc = &adev->gmc;
906 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
907 bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
908 gc_ver == IP_VERSION(9, 4, 0) ||
909 gc_ver == IP_VERSION(9, 4, 1) ||
910 gc_ver == IP_VERSION(9, 4, 2) ||
911 gc_ver == IP_VERSION(9, 4, 3) ||
912 gc_ver == IP_VERSION(9, 4, 4) ||
913 gc_ver == IP_VERSION(9, 5, 0) ||
914 gc_ver >= IP_VERSION(10, 3, 0));
915
916 if (!amdgpu_sriov_xnack_support(adev))
917 gmc->noretry = 1;
918 else
919 gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
920}
921
922void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
923 bool enable)
924{
925 struct amdgpu_vmhub *hub;
926 u32 tmp, reg, i;
927
928 hub = &adev->vmhub[hub_type];
929 for (i = 0; i < 16; i++) {
930 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
931
932 tmp = (hub_type == AMDGPU_GFXHUB(0)) ?
933 RREG32_SOC15_IP(GC, reg) :
934 RREG32_SOC15_IP(MMHUB, reg);
935
936 if (enable)
937 tmp |= hub->vm_cntx_cntl_vm_fault;
938 else
939 tmp &= ~hub->vm_cntx_cntl_vm_fault;
940
941 (hub_type == AMDGPU_GFXHUB(0)) ?
942 WREG32_SOC15_IP(GC, reg, tmp) :
943 WREG32_SOC15_IP(MMHUB, reg, tmp);
944 }
945}
946
947void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
948{
949 unsigned size;
950
951 /*
952 * Some ASICs need to reserve a region of video memory to avoid access
953 * from driver
954 */
955 adev->mman.stolen_reserved_offset = 0;
956 adev->mman.stolen_reserved_size = 0;
957
958 /*
959 * TODO:
960 * Currently there is a bug where some memory client outside
961 * of the driver writes to first 8M of VRAM on S3 resume,
962 * this overrides GART which by default gets placed in first 8M and
963 * causes VM_FAULTS once GTT is accessed.
964 * Keep the stolen memory reservation until the while this is not solved.
965 */
966 switch (adev->asic_type) {
967 case CHIP_VEGA10:
968 adev->mman.keep_stolen_vga_memory = true;
969 /*
970 * VEGA10 SRIOV VF with MS_HYPERV host needs some firmware reserved area.
971 */
972#ifdef CONFIG_X86
973 if (amdgpu_sriov_vf(adev) && hypervisor_is_type(X86_HYPER_MS_HYPERV)) {
974 adev->mman.stolen_reserved_offset = 0x500000;
975 adev->mman.stolen_reserved_size = 0x200000;
976 }
977#endif
978 break;
979 case CHIP_RAVEN:
980 case CHIP_RENOIR:
981 adev->mman.keep_stolen_vga_memory = true;
982 break;
983 default:
984 adev->mman.keep_stolen_vga_memory = false;
985 break;
986 }
987
988 if (amdgpu_sriov_vf(adev) ||
989 !amdgpu_device_has_display_hardware(adev)) {
990 size = 0;
991 } else {
992 size = amdgpu_gmc_get_vbios_fb_size(adev);
993
994 if (adev->mman.keep_stolen_vga_memory)
995 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
996 }
997
998 /* set to 0 if the pre-OS buffer uses up most of vram */
999 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1000 size = 0;
1001
1002 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
1003 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
1004 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
1005 } else {
1006 adev->mman.stolen_vga_size = size;
1007 adev->mman.stolen_extended_size = 0;
1008 }
1009}
1010
1011/**
1012 * amdgpu_gmc_init_pdb0 - initialize PDB0
1013 *
1014 * @adev: amdgpu_device pointer
1015 *
1016 * This function is only used when GART page table is used
1017 * for FB address translatioin. In such a case, we construct
1018 * a 2-level system VM page table: PDB0->PTB, to cover both
1019 * VRAM of the hive and system memory.
1020 *
1021 * PDB0 is static, initialized once on driver initialization.
1022 * The first n entries of PDB0 are used as PTE by setting
1023 * P bit to 1, pointing to VRAM. The n+1'th entry points
1024 * to a big PTB covering system memory.
1025 *
1026 */
1027void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
1028{
1029 int i;
1030 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
1031 /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
1032 */
1033 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
1034 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
1035 u64 vram_addr = adev->vm_manager.vram_base_offset -
1036 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1037 u64 vram_end = vram_addr + vram_size;
1038 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
1039 int idx;
1040
1041 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1042 return;
1043
1044 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
1045 flags |= AMDGPU_PTE_WRITEABLE;
1046 flags |= AMDGPU_PTE_SNOOPED;
1047 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
1048 flags |= AMDGPU_PDE_PTE_FLAG(adev);
1049
1050 /* The first n PDE0 entries are used as PTE,
1051 * pointing to vram
1052 */
1053 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
1054 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
1055
1056 /* The n+1'th PDE0 entry points to a huge
1057 * PTB who has more than 512 entries each
1058 * pointing to a 4K system page
1059 */
1060 flags = AMDGPU_PTE_VALID;
1061 flags |= AMDGPU_PTE_SNOOPED | AMDGPU_PDE_BFS_FLAG(adev, 0);
1062 /* Requires gart_ptb_gpu_pa to be 4K aligned */
1063 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
1064 drm_dev_exit(idx);
1065}
1066
1067/**
1068 * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
1069 * address
1070 *
1071 * @adev: amdgpu_device pointer
1072 * @mc_addr: MC address of buffer
1073 */
1074uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
1075{
1076 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
1077}
1078
1079/**
1080 * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
1081 * GPU's view
1082 *
1083 * @adev: amdgpu_device pointer
1084 * @bo: amdgpu buffer object
1085 */
1086uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
1087{
1088 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
1089}
1090
1091int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
1092{
1093 struct amdgpu_bo *vram_bo = NULL;
1094 uint64_t vram_gpu = 0;
1095 void *vram_ptr = NULL;
1096
1097 int ret, size = 0x100000;
1098 uint8_t cptr[10];
1099
1100 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
1101 AMDGPU_GEM_DOMAIN_VRAM,
1102 &vram_bo,
1103 &vram_gpu,
1104 &vram_ptr);
1105 if (ret)
1106 return ret;
1107
1108 memset(vram_ptr, 0x86, size);
1109 memset(cptr, 0x86, 10);
1110
1111 /**
1112 * Check the start, the mid, and the end of the memory if the content of
1113 * each byte is the pattern "0x86". If yes, we suppose the vram bo is
1114 * workable.
1115 *
1116 * Note: If check the each byte of whole 1M bo, it will cost too many
1117 * seconds, so here, we just pick up three parts for emulation.
1118 */
1119 ret = memcmp(vram_ptr, cptr, 10);
1120 if (ret) {
1121 ret = -EIO;
1122 goto release_buffer;
1123 }
1124
1125 ret = memcmp(vram_ptr + (size / 2), cptr, 10);
1126 if (ret) {
1127 ret = -EIO;
1128 goto release_buffer;
1129 }
1130
1131 ret = memcmp(vram_ptr + size - 10, cptr, 10);
1132 if (ret) {
1133 ret = -EIO;
1134 goto release_buffer;
1135 }
1136
1137release_buffer:
1138 amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
1139 &vram_ptr);
1140
1141 return ret;
1142}
1143
1144static const char *nps_desc[] = {
1145 [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
1146 [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
1147 [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
1148 [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
1149 [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
1150 [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
1151};
1152
1153static ssize_t available_memory_partition_show(struct device *dev,
1154 struct device_attribute *addr,
1155 char *buf)
1156{
1157 struct drm_device *ddev = dev_get_drvdata(dev);
1158 struct amdgpu_device *adev = drm_to_adev(ddev);
1159 int size = 0, mode;
1160 char *sep = "";
1161
1162 for_each_inst(mode, adev->gmc.supported_nps_modes) {
1163 size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
1164 sep = ", ";
1165 }
1166 size += sysfs_emit_at(buf, size, "\n");
1167
1168 return size;
1169}
1170
1171static ssize_t current_memory_partition_store(struct device *dev,
1172 struct device_attribute *attr,
1173 const char *buf, size_t count)
1174{
1175 struct drm_device *ddev = dev_get_drvdata(dev);
1176 struct amdgpu_device *adev = drm_to_adev(ddev);
1177 enum amdgpu_memory_partition mode;
1178 struct amdgpu_hive_info *hive;
1179 int i;
1180
1181 mode = UNKNOWN_MEMORY_PARTITION_MODE;
1182 for_each_inst(i, adev->gmc.supported_nps_modes) {
1183 if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) {
1184 mode = i;
1185 break;
1186 }
1187 }
1188
1189 if (mode == UNKNOWN_MEMORY_PARTITION_MODE)
1190 return -EINVAL;
1191
1192 if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) {
1193 dev_info(
1194 adev->dev,
1195 "requested NPS mode is same as current NPS mode, skipping\n");
1196 return count;
1197 }
1198
1199 /* If device is part of hive, all devices in the hive should request the
1200 * same mode. Hence store the requested mode in hive.
1201 */
1202 hive = amdgpu_get_xgmi_hive(adev);
1203 if (hive) {
1204 atomic_set(&hive->requested_nps_mode, mode);
1205 amdgpu_put_xgmi_hive(hive);
1206 } else {
1207 adev->gmc.requested_nps_mode = mode;
1208 }
1209
1210 dev_info(
1211 adev->dev,
1212 "NPS mode change requested, please remove and reload the driver\n");
1213
1214 return count;
1215}
1216
1217static ssize_t current_memory_partition_show(
1218 struct device *dev, struct device_attribute *addr, char *buf)
1219{
1220 struct drm_device *ddev = dev_get_drvdata(dev);
1221 struct amdgpu_device *adev = drm_to_adev(ddev);
1222 enum amdgpu_memory_partition mode;
1223
1224 mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1225 if ((mode >= ARRAY_SIZE(nps_desc)) ||
1226 (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
1227 return sysfs_emit(buf, "UNKNOWN\n");
1228
1229 return sysfs_emit(buf, "%s\n", nps_desc[mode]);
1230}
1231
1232static DEVICE_ATTR_RW(current_memory_partition);
1233static DEVICE_ATTR_RO(available_memory_partition);
1234
1235int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
1236{
1237 bool nps_switch_support;
1238 int r = 0;
1239
1240 if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
1241 return 0;
1242
1243 nps_switch_support = (hweight32(adev->gmc.supported_nps_modes &
1244 AMDGPU_ALL_NPS_MASK) > 1);
1245 if (!nps_switch_support)
1246 dev_attr_current_memory_partition.attr.mode &=
1247 ~(S_IWUSR | S_IWGRP | S_IWOTH);
1248 else
1249 r = device_create_file(adev->dev,
1250 &dev_attr_available_memory_partition);
1251
1252 if (r)
1253 return r;
1254
1255 return device_create_file(adev->dev,
1256 &dev_attr_current_memory_partition);
1257}
1258
1259void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
1260{
1261 if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
1262 return;
1263
1264 device_remove_file(adev->dev, &dev_attr_current_memory_partition);
1265 device_remove_file(adev->dev, &dev_attr_available_memory_partition);
1266}
1267
1268int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
1269 struct amdgpu_mem_partition_info *mem_ranges,
1270 uint8_t *exp_ranges)
1271{
1272 struct amdgpu_gmc_memrange *ranges;
1273 int range_cnt, ret, i, j;
1274 uint32_t nps_type;
1275 bool refresh;
1276
1277 if (!mem_ranges || !exp_ranges)
1278 return -EINVAL;
1279
1280 refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
1281 (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
1282 ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
1283 &range_cnt, refresh);
1284
1285 if (ret)
1286 return ret;
1287
1288 /* TODO: For now, expect ranges and partition count to be the same.
1289 * Adjust if there are holes expected in any NPS domain.
1290 */
1291 if (*exp_ranges && (range_cnt != *exp_ranges)) {
1292 dev_warn(
1293 adev->dev,
1294 "NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d",
1295 *exp_ranges, nps_type, range_cnt);
1296 ret = -EINVAL;
1297 goto err;
1298 }
1299
1300 for (i = 0; i < range_cnt; ++i) {
1301 if (ranges[i].base_address >= ranges[i].limit_address) {
1302 dev_warn(
1303 adev->dev,
1304 "Invalid NPS range - nps mode: %d, range[%d]: base: %llx limit: %llx",
1305 nps_type, i, ranges[i].base_address,
1306 ranges[i].limit_address);
1307 ret = -EINVAL;
1308 goto err;
1309 }
1310
1311 /* Check for overlaps, not expecting any now */
1312 for (j = i - 1; j >= 0; j--) {
1313 if (max(ranges[j].base_address,
1314 ranges[i].base_address) <=
1315 min(ranges[j].limit_address,
1316 ranges[i].limit_address)) {
1317 dev_warn(
1318 adev->dev,
1319 "overlapping ranges detected [ %llx - %llx ] | [%llx - %llx]",
1320 ranges[j].base_address,
1321 ranges[j].limit_address,
1322 ranges[i].base_address,
1323 ranges[i].limit_address);
1324 ret = -EINVAL;
1325 goto err;
1326 }
1327 }
1328
1329 mem_ranges[i].range.fpfn =
1330 (ranges[i].base_address -
1331 adev->vm_manager.vram_base_offset) >>
1332 AMDGPU_GPU_PAGE_SHIFT;
1333 mem_ranges[i].range.lpfn =
1334 (ranges[i].limit_address -
1335 adev->vm_manager.vram_base_offset) >>
1336 AMDGPU_GPU_PAGE_SHIFT;
1337 mem_ranges[i].size =
1338 ranges[i].limit_address - ranges[i].base_address + 1;
1339 }
1340
1341 if (!*exp_ranges)
1342 *exp_ranges = range_cnt;
1343err:
1344 kfree(ranges);
1345
1346 return ret;
1347}
1348
1349int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
1350 int nps_mode)
1351{
1352 /* Not supported on VF devices and APUs */
1353 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
1354 return -EOPNOTSUPP;
1355
1356 if (!adev->psp.funcs) {
1357 dev_err(adev->dev,
1358 "PSP interface not available for nps mode change request");
1359 return -EINVAL;
1360 }
1361
1362 return psp_memory_partition(&adev->psp, nps_mode);
1363}
1364
1365static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev,
1366 int req_nps_mode,
1367 int cur_nps_mode)
1368{
1369 return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) ==
1370 BIT(req_nps_mode)) &&
1371 req_nps_mode != cur_nps_mode);
1372}
1373
1374void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev)
1375{
1376 int req_nps_mode, cur_nps_mode, r;
1377 struct amdgpu_hive_info *hive;
1378
1379 if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes ||
1380 !adev->gmc.gmc_funcs->request_mem_partition_mode)
1381 return;
1382
1383 cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
1384 hive = amdgpu_get_xgmi_hive(adev);
1385 if (hive) {
1386 req_nps_mode = atomic_read(&hive->requested_nps_mode);
1387 if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode,
1388 cur_nps_mode)) {
1389 amdgpu_put_xgmi_hive(hive);
1390 return;
1391 }
1392 r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode);
1393 amdgpu_put_xgmi_hive(hive);
1394 goto out;
1395 }
1396
1397 req_nps_mode = adev->gmc.requested_nps_mode;
1398 if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode))
1399 return;
1400
1401 /* even if this fails, we should let driver unload w/o blocking */
1402 r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode);
1403out:
1404 if (r)
1405 dev_err(adev->dev, "NPS mode change request failed\n");
1406 else
1407 dev_info(
1408 adev->dev,
1409 "NPS mode change request done, reload driver to complete the change\n");
1410}
1411
1412bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
1413{
1414 if (adev->gmc.gmc_funcs->need_reset_on_init)
1415 return adev->gmc.gmc_funcs->need_reset_on_init(adev);
1416
1417 return false;
1418}