1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
4 #include <linux/completion.h>
5 #include <linux/circ_buf.h>
6 #include <linux/list.h>
9 #include "a6xx_gmu.xml.h"
12 #define HFI_MSG_ID(val) [val] = #val
14 static const char * const a6xx_hfi_msg_id
[] = {
15 HFI_MSG_ID(HFI_H2F_MSG_INIT
),
16 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION
),
17 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE
),
18 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE
),
19 HFI_MSG_ID(HFI_H2F_MSG_TEST
),
22 static int a6xx_hfi_queue_read(struct a6xx_hfi_queue
*queue
, u32
*data
,
25 struct a6xx_hfi_queue_header
*header
= queue
->header
;
26 u32 i
, hdr
, index
= header
->read_index
;
28 if (header
->read_index
== header
->write_index
) {
29 header
->rx_request
= 1;
33 hdr
= queue
->data
[index
];
36 * If we are to assume that the GMU firmware is in fact a rational actor
37 * and is programmed to not send us a larger response than we expect
38 * then we can also assume that if the header size is unexpectedly large
39 * that it is due to memory corruption and/or hardware failure. In this
40 * case the only reasonable course of action is to BUG() to help harden
44 BUG_ON(HFI_HEADER_SIZE(hdr
) > dwords
);
46 for (i
= 0; i
< HFI_HEADER_SIZE(hdr
); i
++) {
47 data
[i
] = queue
->data
[index
];
48 index
= (index
+ 1) % header
->size
;
51 header
->read_index
= index
;
52 return HFI_HEADER_SIZE(hdr
);
55 static int a6xx_hfi_queue_write(struct a6xx_gmu
*gmu
,
56 struct a6xx_hfi_queue
*queue
, u32
*data
, u32 dwords
)
58 struct a6xx_hfi_queue_header
*header
= queue
->header
;
59 u32 i
, space
, index
= header
->write_index
;
61 spin_lock(&queue
->lock
);
63 space
= CIRC_SPACE(header
->write_index
, header
->read_index
,
67 spin_unlock(&queue
->lock
);
71 for (i
= 0; i
< dwords
; i
++) {
72 queue
->data
[index
] = data
[i
];
73 index
= (index
+ 1) % header
->size
;
76 header
->write_index
= index
;
77 spin_unlock(&queue
->lock
);
79 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
, 0x01);
83 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu
*gmu
, u32 id
, u32 seqnum
,
84 u32
*payload
, u32 payload_size
)
86 struct a6xx_hfi_queue
*queue
= &gmu
->queues
[HFI_RESPONSE_QUEUE
];
90 /* Wait for a response */
91 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
, val
,
92 val
& A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ
, 100, 5000);
95 DRM_DEV_ERROR(gmu
->dev
,
96 "Message %s id %d timed out waiting for response\n",
97 a6xx_hfi_msg_id
[id
], seqnum
);
101 /* Clear the interrupt */
102 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
,
103 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ
);
106 struct a6xx_hfi_msg_response resp
;
108 /* Get the next packet */
109 ret
= a6xx_hfi_queue_read(queue
, (u32
*) &resp
,
112 /* If the queue is empty our response never made it */
114 DRM_DEV_ERROR(gmu
->dev
,
115 "The HFI response queue is unexpectedly empty\n");
120 if (HFI_HEADER_ID(resp
.header
) == HFI_F2H_MSG_ERROR
) {
121 struct a6xx_hfi_msg_error
*error
=
122 (struct a6xx_hfi_msg_error
*) &resp
;
124 DRM_DEV_ERROR(gmu
->dev
, "GMU firmware error %d\n",
129 if (seqnum
!= HFI_HEADER_SEQNUM(resp
.ret_header
)) {
130 DRM_DEV_ERROR(gmu
->dev
,
131 "Unexpected message id %d on the response queue\n",
132 HFI_HEADER_SEQNUM(resp
.ret_header
));
137 DRM_DEV_ERROR(gmu
->dev
,
138 "Message %s id %d returned error %d\n",
139 a6xx_hfi_msg_id
[id
], seqnum
, resp
.error
);
143 /* All is well, copy over the buffer */
144 if (payload
&& payload_size
)
145 memcpy(payload
, resp
.payload
,
146 min_t(u32
, payload_size
, sizeof(resp
.payload
)));
152 static int a6xx_hfi_send_msg(struct a6xx_gmu
*gmu
, int id
,
153 void *data
, u32 size
, u32
*payload
, u32 payload_size
)
155 struct a6xx_hfi_queue
*queue
= &gmu
->queues
[HFI_COMMAND_QUEUE
];
156 int ret
, dwords
= size
>> 2;
159 seqnum
= atomic_inc_return(&queue
->seqnum
) % 0xfff;
161 /* First dword of the message is the message header - fill it in */
162 *((u32
*) data
) = (seqnum
<< 20) | (HFI_MSG_CMD
<< 16) |
165 ret
= a6xx_hfi_queue_write(gmu
, queue
, data
, dwords
);
167 DRM_DEV_ERROR(gmu
->dev
, "Unable to send message %s id %d\n",
168 a6xx_hfi_msg_id
[id
], seqnum
);
172 return a6xx_hfi_wait_for_ack(gmu
, id
, seqnum
, payload
, payload_size
);
175 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu
*gmu
, int boot_state
)
177 struct a6xx_hfi_msg_gmu_init_cmd msg
= { 0 };
179 msg
.dbg_buffer_addr
= (u32
) gmu
->debug
->iova
;
180 msg
.dbg_buffer_size
= (u32
) gmu
->debug
->size
;
181 msg
.boot_state
= boot_state
;
183 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_INIT
, &msg
, sizeof(msg
),
187 static int a6xx_hfi_get_fw_version(struct a6xx_gmu
*gmu
, u32
*version
)
189 struct a6xx_hfi_msg_fw_version msg
= { 0 };
191 /* Currently supporting version 1.1 */
192 msg
.supported_version
= (1 << 28) | (1 << 16);
194 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_FW_VERSION
, &msg
, sizeof(msg
),
195 version
, sizeof(*version
));
198 static int a6xx_hfi_send_perf_table(struct a6xx_gmu
*gmu
)
200 struct a6xx_hfi_msg_perf_table msg
= { 0 };
203 msg
.num_gpu_levels
= gmu
->nr_gpu_freqs
;
204 msg
.num_gmu_levels
= gmu
->nr_gmu_freqs
;
206 for (i
= 0; i
< gmu
->nr_gpu_freqs
; i
++) {
207 msg
.gx_votes
[i
].vote
= gmu
->gx_arc_votes
[i
];
208 msg
.gx_votes
[i
].freq
= gmu
->gpu_freqs
[i
] / 1000;
211 for (i
= 0; i
< gmu
->nr_gmu_freqs
; i
++) {
212 msg
.cx_votes
[i
].vote
= gmu
->cx_arc_votes
[i
];
213 msg
.cx_votes
[i
].freq
= gmu
->gmu_freqs
[i
] / 1000;
216 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_PERF_TABLE
, &msg
, sizeof(msg
),
220 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table
*msg
)
222 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
223 msg
->bw_level_num
= 1;
225 msg
->ddr_cmds_num
= 3;
226 msg
->ddr_wait_bitmask
= 0x01;
228 msg
->ddr_cmds_addrs
[0] = 0x50000;
229 msg
->ddr_cmds_addrs
[1] = 0x5003c;
230 msg
->ddr_cmds_addrs
[2] = 0x5000c;
232 msg
->ddr_cmds_data
[0][0] = 0x40000000;
233 msg
->ddr_cmds_data
[0][1] = 0x40000000;
234 msg
->ddr_cmds_data
[0][2] = 0x40000000;
237 * These are the CX (CNOC) votes - these are used by the GMU but the
238 * votes are known and fixed for the target
240 msg
->cnoc_cmds_num
= 1;
241 msg
->cnoc_wait_bitmask
= 0x01;
243 msg
->cnoc_cmds_addrs
[0] = 0x5007c;
244 msg
->cnoc_cmds_data
[0][0] = 0x40000000;
245 msg
->cnoc_cmds_data
[1][0] = 0x60000001;
248 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table
*msg
)
250 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
251 msg
->bw_level_num
= 1;
253 msg
->ddr_cmds_num
= 3;
254 msg
->ddr_wait_bitmask
= 0x07;
256 msg
->ddr_cmds_addrs
[0] = 0x50000;
257 msg
->ddr_cmds_addrs
[1] = 0x5005c;
258 msg
->ddr_cmds_addrs
[2] = 0x5000c;
260 msg
->ddr_cmds_data
[0][0] = 0x40000000;
261 msg
->ddr_cmds_data
[0][1] = 0x40000000;
262 msg
->ddr_cmds_data
[0][2] = 0x40000000;
265 * These are the CX (CNOC) votes. This is used but the values for the
266 * sdm845 GMU are known and fixed so we can hard code them.
269 msg
->cnoc_cmds_num
= 3;
270 msg
->cnoc_wait_bitmask
= 0x05;
272 msg
->cnoc_cmds_addrs
[0] = 0x50034;
273 msg
->cnoc_cmds_addrs
[1] = 0x5007c;
274 msg
->cnoc_cmds_addrs
[2] = 0x5004c;
276 msg
->cnoc_cmds_data
[0][0] = 0x40000000;
277 msg
->cnoc_cmds_data
[0][1] = 0x00000000;
278 msg
->cnoc_cmds_data
[0][2] = 0x40000000;
280 msg
->cnoc_cmds_data
[1][0] = 0x60000001;
281 msg
->cnoc_cmds_data
[1][1] = 0x20000001;
282 msg
->cnoc_cmds_data
[1][2] = 0x60000001;
286 static int a6xx_hfi_send_bw_table(struct a6xx_gmu
*gmu
)
288 struct a6xx_hfi_msg_bw_table msg
= { 0 };
289 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
290 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
292 if (adreno_is_a618(adreno_gpu
))
293 a618_build_bw_table(&msg
);
295 a6xx_build_bw_table(&msg
);
297 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_BW_TABLE
, &msg
, sizeof(msg
),
301 static int a6xx_hfi_send_test(struct a6xx_gmu
*gmu
)
303 struct a6xx_hfi_msg_test msg
= { 0 };
305 return a6xx_hfi_send_msg(gmu
, HFI_H2F_MSG_TEST
, &msg
, sizeof(msg
),
309 int a6xx_hfi_start(struct a6xx_gmu
*gmu
, int boot_state
)
313 ret
= a6xx_hfi_send_gmu_init(gmu
, boot_state
);
317 ret
= a6xx_hfi_get_fw_version(gmu
, NULL
);
322 * We have to get exchange version numbers per the sequence but at this
323 * point th kernel driver doesn't need to know the exact version of
327 ret
= a6xx_hfi_send_perf_table(gmu
);
331 ret
= a6xx_hfi_send_bw_table(gmu
);
336 * Let the GMU know that there won't be any more HFI messages until next
339 a6xx_hfi_send_test(gmu
);
344 void a6xx_hfi_stop(struct a6xx_gmu
*gmu
)
348 for (i
= 0; i
< ARRAY_SIZE(gmu
->queues
); i
++) {
349 struct a6xx_hfi_queue
*queue
= &gmu
->queues
[i
];
354 if (queue
->header
->read_index
!= queue
->header
->write_index
)
355 DRM_DEV_ERROR(gmu
->dev
, "HFI queue %d is not empty\n", i
);
357 queue
->header
->read_index
= 0;
358 queue
->header
->write_index
= 0;
362 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue
*queue
,
363 struct a6xx_hfi_queue_header
*header
, void *virt
, u64 iova
,
366 spin_lock_init(&queue
->lock
);
367 queue
->header
= header
;
369 atomic_set(&queue
->seqnum
, 0);
371 /* Set up the shared memory header */
373 header
->type
= 10 << 8 | id
;
375 header
->size
= SZ_4K
>> 2;
376 header
->msg_size
= 0;
378 header
->rx_watermark
= 1;
379 header
->tx_watermark
= 1;
380 header
->rx_request
= 1;
381 header
->tx_request
= 0;
382 header
->read_index
= 0;
383 header
->write_index
= 0;
386 void a6xx_hfi_init(struct a6xx_gmu
*gmu
)
388 struct a6xx_gmu_bo
*hfi
= gmu
->hfi
;
389 struct a6xx_hfi_queue_table_header
*table
= hfi
->virt
;
390 struct a6xx_hfi_queue_header
*headers
= hfi
->virt
+ sizeof(*table
);
395 * The table size is the size of the table header plus all of the queue
398 table_size
= sizeof(*table
);
399 table_size
+= (ARRAY_SIZE(gmu
->queues
) *
400 sizeof(struct a6xx_hfi_queue_header
));
403 table
->size
= table_size
;
404 /* First queue header is located immediately after the table header */
405 table
->qhdr0_offset
= sizeof(*table
) >> 2;
406 table
->qhdr_size
= sizeof(struct a6xx_hfi_queue_header
) >> 2;
407 table
->num_queues
= ARRAY_SIZE(gmu
->queues
);
408 table
->active_queues
= ARRAY_SIZE(gmu
->queues
);
412 a6xx_hfi_queue_init(&gmu
->queues
[0], &headers
[0], hfi
->virt
+ offset
,
413 hfi
->iova
+ offset
, 0);
415 /* GMU response queue */
417 a6xx_hfi_queue_init(&gmu
->queues
[1], &headers
[1], hfi
->virt
+ offset
,
418 hfi
->iova
+ offset
, 4);