]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
Merge tag 'v5.3-rc3' into drm-next-5.4
[mirror_ubuntu-kernels.git] / drivers / gpu / drm / amd / amdgpu / nbio_v7_0.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu.h"
24 #include "amdgpu_atombios.h"
25 #include "nbio_v7_0.h"
26
27 #include "nbio/nbio_7_0_default.h"
28 #include "nbio/nbio_7_0_offset.h"
29 #include "nbio/nbio_7_0_sh_mask.h"
30 #include "nbio/nbio_7_0_smn.h"
31 #include "vega10_enum.h"
32 #include <uapi/linux/kfd_ioctl.h>
33
34 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
35
36 static void nbio_v7_0_remap_hdp_registers(struct amdgpu_device *adev)
37 {
38 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
39 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
40 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
41 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
42 }
43
44 static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
45 {
46 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
47
48 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
49 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
50
51 return tmp;
52 }
53
54 static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
55 {
56 if (enable)
57 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
58 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
59 else
60 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
61 }
62
63 static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
64 struct amdgpu_ring *ring)
65 {
66 if (!ring || !ring->funcs->emit_wreg)
67 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
68 else
69 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
70 }
71
72 static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
73 {
74 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
75 }
76
77 static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
78 bool use_doorbell, int doorbell_index, int doorbell_size)
79 {
80 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
81 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
82
83 u32 doorbell_range = RREG32(reg);
84
85 if (use_doorbell) {
86 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
87 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
88 } else
89 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
90
91 WREG32(reg, doorbell_range);
92 }
93
94 static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
95 bool enable)
96 {
97 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
98 }
99
100 static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
101 bool enable)
102 {
103
104 }
105
106 static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
107 bool use_doorbell, int doorbell_index)
108 {
109 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
110
111 if (use_doorbell) {
112 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
113 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
114 } else
115 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
116
117 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
118 }
119
120 static uint32_t nbio_7_0_read_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t offset)
121 {
122 uint32_t data;
123
124 WREG32_SOC15(NBIO, 0, mmSYSHUB_INDEX, offset);
125 data = RREG32_SOC15(NBIO, 0, mmSYSHUB_DATA);
126
127 return data;
128 }
129
130 static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t offset,
131 uint32_t data)
132 {
133 WREG32_SOC15(NBIO, 0, mmSYSHUB_INDEX, offset);
134 WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
135 }
136
137 static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
138 bool enable)
139 {
140 uint32_t def, data;
141
142 /* NBIF_MGCG_CTRL_LCLK */
143 def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
144
145 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
146 data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK;
147 else
148 data &= ~NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK;
149
150 if (def != data)
151 WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
152
153 /* SYSHUB_MGCG_CTRL_SOCCLK */
154 def = data = nbio_7_0_read_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK);
155
156 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
157 data |= SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK;
158 else
159 data &= ~SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK;
160
161 if (def != data)
162 nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SOCCLK, data);
163
164 /* SYSHUB_MGCG_CTRL_SHUBCLK */
165 def = data = nbio_7_0_read_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK);
166
167 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
168 data |= SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK;
169 else
170 data &= ~SYSHUB_MMREG_DIRECT_SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK;
171
172 if (def != data)
173 nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
174 }
175
176 static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
177 bool enable)
178 {
179 uint32_t def, data;
180
181 def = data = RREG32_PCIE(smnPCIE_CNTL2);
182 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
183 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
184 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
185 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
186 } else {
187 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
188 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
189 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
190 }
191
192 if (def != data)
193 WREG32_PCIE(smnPCIE_CNTL2, data);
194 }
195
196 static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
197 u32 *flags)
198 {
199 int data;
200
201 /* AMD_CG_SUPPORT_BIF_MGCG */
202 data = RREG32_PCIE(smnCPM_CONTROL);
203 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
204 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
205
206 /* AMD_CG_SUPPORT_BIF_LS */
207 data = RREG32_PCIE(smnPCIE_CNTL2);
208 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
209 *flags |= AMD_CG_SUPPORT_BIF_LS;
210 }
211
212 static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
213 {
214 u32 interrupt_cntl;
215
216 /* setup interrupt control */
217 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
218 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
219 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
220 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
221 */
222 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
223 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
224 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
225 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
226 }
227
228 static u32 nbio_v7_0_get_hdp_flush_req_offset(struct amdgpu_device *adev)
229 {
230 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
231 }
232
233 static u32 nbio_v7_0_get_hdp_flush_done_offset(struct amdgpu_device *adev)
234 {
235 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
236 }
237
238 static u32 nbio_v7_0_get_pcie_index_offset(struct amdgpu_device *adev)
239 {
240 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
241 }
242
243 static u32 nbio_v7_0_get_pcie_data_offset(struct amdgpu_device *adev)
244 {
245 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
246 }
247
248 const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
249 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
250 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
251 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
252 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
253 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
254 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
255 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
256 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
257 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
258 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
259 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
260 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
261 };
262
263 static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
264 {
265 if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
266 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
267 }
268
269 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
270 {
271
272 }
273
274 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
275 .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
276 .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
277 .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
278 .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
279 .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
280 .get_rev_id = nbio_v7_0_get_rev_id,
281 .mc_access_enable = nbio_v7_0_mc_access_enable,
282 .hdp_flush = nbio_v7_0_hdp_flush,
283 .get_memsize = nbio_v7_0_get_memsize,
284 .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
285 .enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
286 .enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
287 .ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
288 .update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating,
289 .update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep,
290 .get_clockgating_state = nbio_v7_0_get_clockgating_state,
291 .ih_control = nbio_v7_0_ih_control,
292 .init_registers = nbio_v7_0_init_registers,
293 .detect_hw_virt = nbio_v7_0_detect_hw_virt,
294 .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
295 };