Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_atombios.h"
25#include "nbio_v2_3.h"
26
27#include "nbio/nbio_2_3_default.h"
28#include "nbio/nbio_2_3_offset.h"
29#include "nbio/nbio_2_3_sh_mask.h"
30#include <uapi/linux/kfd_ioctl.h>
31#include <linux/pci.h>
32
33#define smnPCIE_CONFIG_CNTL 0x11180044
34#define smnCPM_CONTROL 0x11180460
35#define smnPCIE_CNTL2 0x11180070
36#define smnPCIE_LC_CNTL 0x11140280
37#define smnPCIE_LC_CNTL3 0x111402d4
38#define smnPCIE_LC_CNTL6 0x111402ec
39#define smnPCIE_LC_CNTL7 0x111402f0
40#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
41#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
42#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
43#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
44#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
45
46#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
47#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
48#define mmBIF_SDMA3_DOORBELL_RANGE 0x01d7
49#define mmBIF_SDMA3_DOORBELL_RANGE_BASE_IDX 2
50
51#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8
52#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
53
54static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
55{
56 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
57 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
58 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
59 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
60}
61
62static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
63{
64 u32 tmp;
65
66 /*
67 * guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
68 * therefore we force rev_id to 0 (which is the default value)
69 */
70 if (amdgpu_sriov_vf(adev)) {
71 return 0;
72 }
73
74 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
75 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
76 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
77
78 return tmp;
79}
80
81static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
82{
83 if (enable)
84 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
85 BIF_FB_EN__FB_READ_EN_MASK |
86 BIF_FB_EN__FB_WRITE_EN_MASK);
87 else
88 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
89}
90
91static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
92{
93 return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
94}
95
96static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
97 bool use_doorbell, int doorbell_index,
98 int doorbell_size)
99{
100 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
101 instance == 1 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE) :
102 instance == 2 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA2_DOORBELL_RANGE) :
103 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA3_DOORBELL_RANGE);
104
105 u32 doorbell_range = RREG32(reg);
106
107 if (use_doorbell) {
108 doorbell_range = REG_SET_FIELD(doorbell_range,
109 BIF_SDMA0_DOORBELL_RANGE, OFFSET,
110 doorbell_index);
111 doorbell_range = REG_SET_FIELD(doorbell_range,
112 BIF_SDMA0_DOORBELL_RANGE, SIZE,
113 doorbell_size);
114 } else
115 doorbell_range = REG_SET_FIELD(doorbell_range,
116 BIF_SDMA0_DOORBELL_RANGE, SIZE,
117 0);
118
119 WREG32(reg, doorbell_range);
120}
121
122static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
123 int doorbell_index, int instance)
124{
125 u32 reg = instance ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE) :
126 SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
127
128 u32 doorbell_range = RREG32(reg);
129
130 if (use_doorbell) {
131 doorbell_range = REG_SET_FIELD(doorbell_range,
132 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
133 doorbell_index);
134 doorbell_range = REG_SET_FIELD(doorbell_range,
135 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
136 } else
137 doorbell_range = REG_SET_FIELD(doorbell_range,
138 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
139
140 WREG32(reg, doorbell_range);
141}
142
143static void nbio_v2_3_enable_doorbell_aperture(struct amdgpu_device *adev,
144 bool enable)
145{
146 WREG32_FIELD15(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN,
147 enable ? 1 : 0);
148}
149
150static void nbio_v2_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
151 bool enable)
152{
153 u32 tmp = 0;
154
155 if (enable) {
156 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
157 DOORBELL_SELFRING_GPA_APER_EN, 1) |
158 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
159 DOORBELL_SELFRING_GPA_APER_MODE, 1) |
160 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
161 DOORBELL_SELFRING_GPA_APER_SIZE, 0);
162
163 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
164 lower_32_bits(adev->doorbell.base));
165 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
166 upper_32_bits(adev->doorbell.base));
167 }
168
169 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
170 tmp);
171}
172
173
174static void nbio_v2_3_ih_doorbell_range(struct amdgpu_device *adev,
175 bool use_doorbell, int doorbell_index)
176{
177 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE);
178
179 if (use_doorbell) {
180 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
181 BIF_IH_DOORBELL_RANGE, OFFSET,
182 doorbell_index);
183 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
184 BIF_IH_DOORBELL_RANGE, SIZE,
185 2);
186 } else
187 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
188 BIF_IH_DOORBELL_RANGE, SIZE,
189 0);
190
191 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
192}
193
194static void nbio_v2_3_ih_control(struct amdgpu_device *adev)
195{
196 u32 interrupt_cntl;
197
198 /* setup interrupt control */
199 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
200
201 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
202 /*
203 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
204 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
205 */
206 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
207 IH_DUMMY_RD_OVERRIDE, 0);
208
209 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
210 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
211 IH_REQ_NONSNOOP_EN, 0);
212
213 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
214}
215
216static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
217 bool enable)
218{
219 uint32_t def, data;
220
221 def = data = RREG32_PCIE(smnCPM_CONTROL);
222 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
223 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
224 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
225 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
226 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
227 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
228 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
229 } else {
230 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
231 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
232 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
233 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
234 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
235 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
236 }
237
238 if (def != data)
239 WREG32_PCIE(smnCPM_CONTROL, data);
240}
241
242static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
243 bool enable)
244{
245 uint32_t def, data;
246
247 def = data = RREG32_PCIE(smnPCIE_CNTL2);
248 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
249 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
250 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
251 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
252 } else {
253 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
254 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
255 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
256 }
257
258 if (def != data)
259 WREG32_PCIE(smnPCIE_CNTL2, data);
260}
261
262static void nbio_v2_3_get_clockgating_state(struct amdgpu_device *adev,
263 u32 *flags)
264{
265 int data;
266
267 /* AMD_CG_SUPPORT_BIF_MGCG */
268 data = RREG32_PCIE(smnCPM_CONTROL);
269 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
270 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
271
272 /* AMD_CG_SUPPORT_BIF_LS */
273 data = RREG32_PCIE(smnPCIE_CNTL2);
274 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
275 *flags |= AMD_CG_SUPPORT_BIF_LS;
276}
277
278static u32 nbio_v2_3_get_hdp_flush_req_offset(struct amdgpu_device *adev)
279{
280 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_REQ);
281}
282
283static u32 nbio_v2_3_get_hdp_flush_done_offset(struct amdgpu_device *adev)
284{
285 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_DONE);
286}
287
288static u32 nbio_v2_3_get_pcie_index_offset(struct amdgpu_device *adev)
289{
290 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
291}
292
293static u32 nbio_v2_3_get_pcie_data_offset(struct amdgpu_device *adev)
294{
295 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
296}
297
298const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
299 .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
300 .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
301 .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
302 .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
303 .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
304 .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
305 .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
306 .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
307 .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
308 .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
309 .ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
310 .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
311};
312
313static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
314{
315 uint32_t def, data;
316
317 def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
318 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
319 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
320
321 if (def != data)
322 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
323}
324
325#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
326#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
327#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
328
329static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
330 bool enable)
331{
332 uint32_t def, data;
333
334 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
335
336 if (enable) {
337 /* Disable ASPM L0s/L1 first */
338 data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
339
340 data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
341
342 if (pci_is_thunderbolt_attached(adev->pdev))
343 data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
344 else
345 data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
346
347 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
348 } else {
349 /* Disbale ASPM L1 */
350 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
351 /* Disable ASPM TxL0s */
352 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
353 /* Disable ACPI L1 */
354 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
355 }
356
357 if (def != data)
358 WREG32_PCIE(smnPCIE_LC_CNTL, data);
359}
360
361static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
362{
363 uint32_t def, data;
364
365 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
366
367 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
368 data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
369 if (def != data)
370 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
371
372 def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
373 data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
374 if (def != data)
375 WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
376
377 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
378 data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
379 if (def != data)
380 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
381}
382
383static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
384{
385 uint32_t def, data;
386
387 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
388 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
389 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
390 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
391 if (def != data)
392 WREG32_PCIE(smnPCIE_LC_CNTL, data);
393
394 def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
395 data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
396 if (def != data)
397 WREG32_PCIE(smnPCIE_LC_CNTL7, data);
398
399 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
400 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
401 if (def != data)
402 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
403
404 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
405 data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
406 if (def != data)
407 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
408
409 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
410 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
411 data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
412 if (def != data)
413 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
414
415 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
416 data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
417 if (def != data)
418 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
419
420 def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
421 data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
422 if (def != data)
423 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
424
425 WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
426
427 def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
428 data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
429 PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
430 data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
431 if (def != data)
432 WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
433
434 def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
435 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
436 PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
437 if (def != data)
438 WREG32_PCIE(smnPCIE_LC_CNTL6, data);
439
440 nbio_v2_3_program_ltr(adev);
441
442 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
443 data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
444 data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
445 if (def != data)
446 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
447
448 def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
449 data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
450 if (def != data)
451 WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
452
453 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
454 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
455 data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
456 data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
457 if (def != data)
458 WREG32_PCIE(smnPCIE_LC_CNTL, data);
459
460 def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
461 data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
462 if (def != data)
463 WREG32_PCIE(smnPCIE_LC_CNTL3, data);
464}
465
466const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
467 .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
468 .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
469 .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
470 .get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
471 .get_rev_id = nbio_v2_3_get_rev_id,
472 .mc_access_enable = nbio_v2_3_mc_access_enable,
473 .get_memsize = nbio_v2_3_get_memsize,
474 .sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
475 .vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
476 .enable_doorbell_aperture = nbio_v2_3_enable_doorbell_aperture,
477 .enable_doorbell_selfring_aperture = nbio_v2_3_enable_doorbell_selfring_aperture,
478 .ih_doorbell_range = nbio_v2_3_ih_doorbell_range,
479 .update_medium_grain_clock_gating = nbio_v2_3_update_medium_grain_clock_gating,
480 .update_medium_grain_light_sleep = nbio_v2_3_update_medium_grain_light_sleep,
481 .get_clockgating_state = nbio_v2_3_get_clockgating_state,
482 .ih_control = nbio_v2_3_ih_control,
483 .init_registers = nbio_v2_3_init_registers,
484 .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
485 .enable_aspm = nbio_v2_3_enable_aspm,
486 .program_aspm = nbio_v2_3_program_aspm,
487};