]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drm/amdgpu: Destroy psp ring in hw_fini
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_ai.c
CommitLineData
c9c9de93
XY
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "vega10/soc15ip.h"
26#include "vega10/NBIO/nbio_6_1_offset.h"
27#include "vega10/NBIO/nbio_6_1_sh_mask.h"
28#include "vega10/GC/gc_9_0_offset.h"
29#include "vega10/GC/gc_9_0_sh_mask.h"
30#include "soc15.h"
f98b617e 31#include "vega10_ih.h"
c9c9de93
XY
32#include "soc15_common.h"
33#include "mxgpu_ai.h"
34
35static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
36{
37 u32 reg;
38 int timeout = AI_MAILBOX_TIMEDOUT;
39 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
40
41 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
42 mmBIF_BX_PF0_MAILBOX_CONTROL));
43 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
44 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
45 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
46
47 /*Wait for RCV_MSG_VALID to be 0*/
48 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
49 mmBIF_BX_PF0_MAILBOX_CONTROL));
50 while (reg & mask) {
51 if (timeout <= 0) {
52 pr_err("RCV_MSG_VALID is not cleared\n");
53 break;
54 }
55 mdelay(1);
56 timeout -=1;
57
58 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59 mmBIF_BX_PF0_MAILBOX_CONTROL));
60 }
61}
62
63static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
64{
65 u32 reg;
66
67 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
68 mmBIF_BX_PF0_MAILBOX_CONTROL));
69 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
70 TRN_MSG_VALID, val ? 1 : 0);
71 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
72 reg);
73}
74
75static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev,
76 enum idh_request req)
77{
78 u32 reg;
79
80 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
81 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
82 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
83 MSGBUF_DATA, req);
84 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
85 reg);
86
87 xgpu_ai_mailbox_set_valid(adev, true);
88}
89
90static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
91 enum idh_event event)
92{
93 u32 reg;
94 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
95
96 if (event != IDH_FLR_NOTIFICATION_CMPL) {
97 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
98 mmBIF_BX_PF0_MAILBOX_CONTROL));
99 if (!(reg & mask))
100 return -ENOENT;
101 }
102
103 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
104 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
105 if (reg != event)
106 return -ENOENT;
107
108 xgpu_ai_mailbox_send_ack(adev);
109
110 return 0;
111}
112
113static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
114{
115 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
116 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
117 u32 reg;
118
119 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
120 mmBIF_BX_PF0_MAILBOX_CONTROL));
121 while (!(reg & mask)) {
122 if (timeout <= 0) {
123 pr_err("Doesn't get ack from pf.\n");
124 r = -ETIME;
125 break;
126 }
127 msleep(1);
128 timeout -= 1;
129
130 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
131 mmBIF_BX_PF0_MAILBOX_CONTROL));
132 }
133
134 return r;
135}
136
94b4fd72 137static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
c9c9de93
XY
138{
139 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
140
141 r = xgpu_ai_mailbox_rcv_msg(adev, event);
142 while (r) {
143 if (timeout <= 0) {
144 pr_err("Doesn't get ack from pf.\n");
145 r = -ETIME;
146 break;
147 }
148 msleep(1);
149 timeout -= 1;
150
151 r = xgpu_ai_mailbox_rcv_msg(adev, event);
152 }
153
154 return r;
155}
156
157
158static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
159 enum idh_request req)
160{
161 int r;
162
163 xgpu_ai_mailbox_trans_msg(adev, req);
164
165 /* start to poll ack */
166 r = xgpu_ai_poll_ack(adev);
167 if (r)
168 return r;
169
170 xgpu_ai_mailbox_set_valid(adev, false);
171
172 /* start to check msg if request is idh_req_gpu_init_access */
173 if (req == IDH_REQ_GPU_INIT_ACCESS ||
174 req == IDH_REQ_GPU_FINI_ACCESS ||
175 req == IDH_REQ_GPU_RESET_ACCESS) {
94b4fd72 176 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
c9c9de93
XY
177 if (r)
178 return r;
179 }
180
181 return 0;
182}
183
f98b617e
ML
184static int xgpu_ai_request_reset(struct amdgpu_device *adev)
185{
186 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
187}
188
c9c9de93
XY
189static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
190 bool init)
191{
192 enum idh_request req;
193
194 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
195 return xgpu_ai_send_access_requests(adev, req);
196}
197
198static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
199 bool init)
200{
201 enum idh_request req;
202 int r = 0;
203
204 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
205 r = xgpu_ai_send_access_requests(adev, req);
206
207 return r;
208}
209
f98b617e
ML
210static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
211 struct amdgpu_irq_src *source,
212 struct amdgpu_iv_entry *entry)
213{
214 DRM_DEBUG("get ack intr and do nothing.\n");
215 return 0;
216}
217
218static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
219 struct amdgpu_irq_src *source,
220 unsigned type,
221 enum amdgpu_interrupt_state state)
222{
223 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
224
225 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
226 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
227 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
228
229 return 0;
230}
231
232static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
233{
234 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
235 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
236
237 /* wait until RCV_MSG become 3 */
238 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
239 pr_err("failed to recieve FLR_CMPL\n");
240 return;
241 }
242
243 /* Trigger recovery due to world switch failure */
244 amdgpu_sriov_gpu_reset(adev, false);
245}
246
247static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
248 struct amdgpu_irq_src *src,
249 unsigned type,
250 enum amdgpu_interrupt_state state)
251{
252 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
253
254 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
255 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
256 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
257
258 return 0;
259}
260
261static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
262 struct amdgpu_irq_src *source,
263 struct amdgpu_iv_entry *entry)
264{
265 int r;
266
267 /* see what event we get */
268 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
269
270 /* only handle FLR_NOTIFY now */
271 if (!r)
272 schedule_work(&adev->virt.flr_work);
273
274 return 0;
275}
276
277static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
278 .set = xgpu_ai_set_mailbox_ack_irq,
279 .process = xgpu_ai_mailbox_ack_irq,
280};
281
282static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
283 .set = xgpu_ai_set_mailbox_rcv_irq,
284 .process = xgpu_ai_mailbox_rcv_irq,
285};
286
287void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
288{
289 adev->virt.ack_irq.num_types = 1;
290 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
291 adev->virt.rcv_irq.num_types = 1;
292 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
293}
294
295int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
296{
297 int r;
298
299 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
300 if (r)
301 return r;
302
303 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
304 if (r) {
305 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
306 return r;
307 }
308
309 return 0;
310}
311
312int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
313{
314 int r;
315
316 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
317 if (r)
318 return r;
319 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
320 if (r) {
321 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
322 return r;
323 }
324
325 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
326
327 return 0;
328}
329
330void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
331{
332 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
333 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
334}
335
c9c9de93
XY
336const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
337 .req_full_gpu = xgpu_ai_request_full_gpu_access,
338 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
f98b617e 339 .reset_gpu = xgpu_ai_request_reset,
c9c9de93 340};