Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

radeon: Deinline indirect register accessor functions

This patch deinlines indirect register accessor functions.

These functions perform two mmio accesses, framed by spin lock/unlock.
Spin lock/unlock by itself takes more than 50 cycles in ideal case
(if lock is exclusively cached on current CPU).

With this .config: http://busybox.net/~vda/kernel_config,
after uninlining these functions have sizes and callsite counts
as follows:

r600_uvd_ctx_rreg: 111 bytes, 4 callsites
r600_uvd_ctx_wreg: 113 bytes, 5 callsites
eg_pif_phy0_rreg: 106 bytes, 13 callsites
eg_pif_phy0_wreg: 108 bytes, 13 callsites
eg_pif_phy1_rreg: 107 bytes, 13 callsites
eg_pif_phy1_wreg: 108 bytes, 13 callsites
rv370_pcie_rreg: 111 bytes, 21 callsites
rv370_pcie_wreg: 113 bytes, 24 callsites
r600_rcu_rreg: 111 bytes, 16 callsites
r600_rcu_wreg: 113 bytes, 25 callsites
cik_didt_rreg: 106 bytes, 10 callsites
cik_didt_wreg: 107 bytes, 10 callsites
tn_smc_rreg: 106 bytes, 126 callsites
tn_smc_wreg: 107 bytes, 116 callsites
eg_cg_rreg: 107 bytes, 20 callsites
eg_cg_wreg: 108 bytes, 52 callsites

Functions r100_mm_rreg() and r100_mm_rreg() have a fast path and
a locked (slow) path. This patch deinlines only slow path.

r100_mm_rreg_slow: 78 bytes, 2083 callsites
r100_mm_wreg_slow: 81 bytes, 3570 callsites

Reduction in code size is more than 65,000 bytes:

text data bss dec hex filename
85740176 22294680 20627456 128662312 7ab3b28 vmlinux.before
85674192 22294776 20627456 128598664 7aa4288 vmlinux

Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Denys Vlasenko and committed by
Alex Deucher
9e5acbc2 b0b9bb4d

+241 -197
+25
drivers/gpu/drm/radeon/cik.c
··· 174 174 } 175 175 } 176 176 177 + /* 178 + * Indirect registers accessor 179 + */ 180 + u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 181 + { 182 + unsigned long flags; 183 + u32 r; 184 + 185 + spin_lock_irqsave(&rdev->didt_idx_lock, flags); 186 + WREG32(CIK_DIDT_IND_INDEX, (reg)); 187 + r = RREG32(CIK_DIDT_IND_DATA); 188 + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 189 + return r; 190 + } 191 + 192 + void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 193 + { 194 + unsigned long flags; 195 + 196 + spin_lock_irqsave(&rdev->didt_idx_lock, flags); 197 + WREG32(CIK_DIDT_IND_INDEX, (reg)); 198 + WREG32(CIK_DIDT_IND_DATA, (v)); 199 + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 200 + } 201 + 177 202 /* get temperature in millidegrees */ 178 203 int ci_get_temp(struct radeon_device *rdev) 179 204 {
+69
drivers/gpu/drm/radeon/evergreen.c
··· 35 35 #include "evergreen_blit_shaders.h" 36 36 #include "radeon_ucode.h" 37 37 38 + /* 39 + * Indirect registers accessor 40 + */ 41 + u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 42 + { 43 + unsigned long flags; 44 + u32 r; 45 + 46 + spin_lock_irqsave(&rdev->cg_idx_lock, flags); 47 + WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 48 + r = RREG32(EVERGREEN_CG_IND_DATA); 49 + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 50 + return r; 51 + } 52 + 53 + void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 54 + { 55 + unsigned long flags; 56 + 57 + spin_lock_irqsave(&rdev->cg_idx_lock, flags); 58 + WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 59 + WREG32(EVERGREEN_CG_IND_DATA, (v)); 60 + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 61 + } 62 + 63 + u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 64 + { 65 + unsigned long flags; 66 + u32 r; 67 + 68 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 69 + WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 70 + r = RREG32(EVERGREEN_PIF_PHY0_DATA); 71 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 72 + return r; 73 + } 74 + 75 + void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 76 + { 77 + unsigned long flags; 78 + 79 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 80 + WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 81 + WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 82 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 83 + } 84 + 85 + u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 86 + { 87 + unsigned long flags; 88 + u32 r; 89 + 90 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 91 + WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 92 + r = RREG32(EVERGREEN_PIF_PHY1_DATA); 93 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 94 + return r; 95 + } 96 + 97 + void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 98 + { 99 + unsigned long flags; 100 + 101 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 102 + WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 103 + WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 104 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 105 + } 106 + 38 107 static const u32 crtc_offsets[6] = 39 108 { 40 109 EVERGREEN_CRTC0_REGISTER_OFFSET,
+25
drivers/gpu/drm/radeon/ni.c
··· 36 36 #include "radeon_ucode.h" 37 37 #include "clearstate_cayman.h" 38 38 39 + /* 40 + * Indirect registers accessor 41 + */ 42 + u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 43 + { 44 + unsigned long flags; 45 + u32 r; 46 + 47 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 48 + WREG32(TN_SMC_IND_INDEX_0, (reg)); 49 + r = RREG32(TN_SMC_IND_DATA_0); 50 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 51 + return r; 52 + } 53 + 54 + void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 55 + { 56 + unsigned long flags; 57 + 58 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 59 + WREG32(TN_SMC_IND_INDEX_0, (reg)); 60 + WREG32(TN_SMC_IND_DATA_0, (v)); 61 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 62 + } 63 + 39 64 static const u32 tn_rlc_save_restore_register_list[] = 40 65 { 41 66 0x98fc,
+22
drivers/gpu/drm/radeon/r100.c
··· 4090 4090 return 0; 4091 4091 } 4092 4092 4093 + uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg) 4094 + { 4095 + unsigned long flags; 4096 + uint32_t ret; 4097 + 4098 + spin_lock_irqsave(&rdev->mmio_idx_lock, flags); 4099 + writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4100 + ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4101 + spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); 4102 + return ret; 4103 + } 4104 + 4105 + void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v) 4106 + { 4107 + unsigned long flags; 4108 + 4109 + spin_lock_irqsave(&rdev->mmio_idx_lock, flags); 4110 + writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4111 + writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4112 + spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); 4113 + } 4114 + 4093 4115 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) 4094 4116 { 4095 4117 if (reg < rdev->rio_mem_size)
+25
drivers/gpu/drm/radeon/r300.c
··· 50 50 */ 51 51 52 52 /* 53 + * Indirect registers accessor 54 + */ 55 + uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 56 + { 57 + unsigned long flags; 58 + uint32_t r; 59 + 60 + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 61 + WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 62 + r = RREG32(RADEON_PCIE_DATA); 63 + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 64 + return r; 65 + } 66 + 67 + void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 68 + { 69 + unsigned long flags; 70 + 71 + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 72 + WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 73 + WREG32(RADEON_PCIE_DATA, (v)); 74 + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 75 + } 76 + 77 + /* 53 78 * rv370,rv380 PCIE GART 54 79 */ 55 80 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+47
drivers/gpu/drm/radeon/r600.c
··· 108 108 extern int evergreen_rlc_resume(struct radeon_device *rdev); 109 109 extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); 110 110 111 + /* 112 + * Indirect registers accessor 113 + */ 114 + u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 115 + { 116 + unsigned long flags; 117 + u32 r; 118 + 119 + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 120 + WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 121 + r = RREG32(R600_RCU_DATA); 122 + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 123 + return r; 124 + } 125 + 126 + void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 127 + { 128 + unsigned long flags; 129 + 130 + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 131 + WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 132 + WREG32(R600_RCU_DATA, (v)); 133 + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 134 + } 135 + 136 + u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 137 + { 138 + unsigned long flags; 139 + u32 r; 140 + 141 + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 142 + WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 143 + r = RREG32(R600_UVD_CTX_DATA); 144 + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 145 + return r; 146 + } 147 + 148 + void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 149 + { 150 + unsigned long flags; 151 + 152 + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 153 + WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 154 + WREG32(R600_UVD_CTX_DATA, (v)); 155 + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 156 + } 157 + 111 158 /** 112 159 * r600_get_allowed_info_register - fetch the register for the info ioctl 113 160 *
+28 -197
drivers/gpu/drm/radeon/radeon.h
··· 2474 2474 2475 2475 #define RADEON_MIN_MMIO_SIZE 0x10000 2476 2476 2477 + uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg); 2478 + void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v); 2477 2479 static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 2478 2480 bool always_indirect) 2479 2481 { 2480 2482 /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */ 2481 2483 if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) 2482 2484 return readl(((void __iomem *)rdev->rmmio) + reg); 2483 - else { 2484 - unsigned long flags; 2485 - uint32_t ret; 2486 - 2487 - spin_lock_irqsave(&rdev->mmio_idx_lock, flags); 2488 - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 2489 - ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 2490 - spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); 2491 - 2492 - return ret; 2493 - } 2485 + else 2486 + return r100_mm_rreg_slow(rdev, reg); 2494 2487 } 2495 - 2496 2488 static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 2497 2489 bool always_indirect) 2498 2490 { 2499 2491 if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) 2500 2492 writel(v, ((void __iomem *)rdev->rmmio) + reg); 2501 - else { 2502 - unsigned long flags; 2503 - 2504 - spin_lock_irqsave(&rdev->mmio_idx_lock, flags); 2505 - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 2506 - writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 2507 - spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); 2508 - } 2493 + else 2494 + r100_mm_wreg_slow(rdev, reg, v); 2509 2495 } 2510 2496 2511 2497 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); ··· 2582 2596 #define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) 2583 2597 2584 2598 /* 2585 - * Indirect registers accessor 2599 + * Indirect registers accessors. 2600 + * They used to be inlined, but this increases code size by ~65 kbytes. 2601 + * Since each performs a pair of MMIO ops 2602 + * within a spin_lock_irqsave/spin_unlock_irqrestore region, 2603 + * the cost of call+ret is almost negligible. MMIO and locking 2604 + * costs several dozens of cycles each at best, call+ret is ~5 cycles. 2586 2605 */ 2587 - static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2588 - { 2589 - unsigned long flags; 2590 - uint32_t r; 2591 - 2592 - spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 2593 - WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2594 - r = RREG32(RADEON_PCIE_DATA); 2595 - spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 2596 - return r; 2597 - } 2598 - 2599 - static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2600 - { 2601 - unsigned long flags; 2602 - 2603 - spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 2604 - WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2605 - WREG32(RADEON_PCIE_DATA, (v)); 2606 - spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 2607 - } 2608 - 2609 - static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2610 - { 2611 - unsigned long flags; 2612 - u32 r; 2613 - 2614 - spin_lock_irqsave(&rdev->smc_idx_lock, flags); 2615 - WREG32(TN_SMC_IND_INDEX_0, (reg)); 2616 - r = RREG32(TN_SMC_IND_DATA_0); 2617 - spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 2618 - return r; 2619 - } 2620 - 2621 - static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2622 - { 2623 - unsigned long flags; 2624 - 2625 - spin_lock_irqsave(&rdev->smc_idx_lock, flags); 2626 - WREG32(TN_SMC_IND_INDEX_0, (reg)); 2627 - WREG32(TN_SMC_IND_DATA_0, (v)); 2628 - spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 2629 - } 2630 - 2631 - static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2632 - { 2633 - unsigned long flags; 2634 - u32 r; 2635 - 2636 - spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 2637 - WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2638 - r = RREG32(R600_RCU_DATA); 2639 - spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 2640 - return r; 2641 - } 2642 - 2643 - static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2644 - { 2645 - unsigned long flags; 2646 - 2647 - spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 2648 - WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2649 - WREG32(R600_RCU_DATA, (v)); 2650 - spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 2651 - } 2652 - 2653 - static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2654 - { 2655 - unsigned long flags; 2656 - u32 r; 2657 - 2658 - spin_lock_irqsave(&rdev->cg_idx_lock, flags); 2659 - WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2660 - r = RREG32(EVERGREEN_CG_IND_DATA); 2661 - spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 2662 - return r; 2663 - } 2664 - 2665 - static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2666 - { 2667 - unsigned long flags; 2668 - 2669 - spin_lock_irqsave(&rdev->cg_idx_lock, flags); 2670 - WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2671 - WREG32(EVERGREEN_CG_IND_DATA, (v)); 2672 - spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 2673 - } 2674 - 2675 - static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2676 - { 2677 - unsigned long flags; 2678 - u32 r; 2679 - 2680 - spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2681 - WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2682 - r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2683 - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2684 - return r; 2685 - } 2686 - 2687 - static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2688 - { 2689 - unsigned long flags; 2690 - 2691 - spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2692 - WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2693 - WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2694 - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2695 - } 2696 - 2697 - static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2698 - { 2699 - unsigned long flags; 2700 - u32 r; 2701 - 2702 - spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2703 - WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2704 - r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2705 - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2706 - return r; 2707 - } 2708 - 2709 - static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2710 - { 2711 - unsigned long flags; 2712 - 2713 - spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2714 - WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2715 - WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2716 - spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2717 - } 2718 - 2719 - static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2720 - { 2721 - unsigned long flags; 2722 - u32 r; 2723 - 2724 - spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 2725 - WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2726 - r = RREG32(R600_UVD_CTX_DATA); 2727 - spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 2728 - return r; 2729 - } 2730 - 2731 - static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2732 - { 2733 - unsigned long flags; 2734 - 2735 - spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 2736 - WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2737 - WREG32(R600_UVD_CTX_DATA, (v)); 2738 - spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 2739 - } 2740 - 2741 - 2742 - static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2743 - { 2744 - unsigned long flags; 2745 - u32 r; 2746 - 2747 - spin_lock_irqsave(&rdev->didt_idx_lock, flags); 2748 - WREG32(CIK_DIDT_IND_INDEX, (reg)); 2749 - r = RREG32(CIK_DIDT_IND_DATA); 2750 - spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 2751 - return r; 2752 - } 2753 - 2754 - static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2755 - { 2756 - unsigned long flags; 2757 - 2758 - spin_lock_irqsave(&rdev->didt_idx_lock, flags); 2759 - WREG32(CIK_DIDT_IND_INDEX, (reg)); 2760 - WREG32(CIK_DIDT_IND_DATA, (v)); 2761 - spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 2762 - } 2606 + uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 2607 + void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 2608 + u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg); 2609 + void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2610 + u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg); 2611 + void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2612 + u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg); 2613 + void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2614 + u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg); 2615 + void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2616 + u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg); 2617 + void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2618 + u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg); 2619 + void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2620 + u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg); 2621 + void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2763 2622 2764 2623 void r100_pll_errata_after_index(struct radeon_device *rdev); 2765 2624