]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drm/amdgpu: add AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS flag v3
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_uvd.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42/* 1 second timeout */
08086635 43#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
4cb5877c
CK
44
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
8e008dd7 51/* Polaris10/11 firmware version */
4cb5877c 52#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
d38ceaf9
AD
53
54/* Firmware Names */
55#ifdef CONFIG_DRM_AMDGPU_CIK
56#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
edf600da
CK
57#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
d38ceaf9
AD
60#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
61#endif
c65444fe
JZ
62#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
63#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
974ee3db 64#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
a39c8cea 65#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
2cc0c0b5 66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
925a51c4 67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
d38ceaf9
AD
68
69/**
70 * amdgpu_uvd_cs_ctx - Command submission parser context
71 *
72 * Used for emulating virtual memory support on UVD 4.2.
73 */
74struct amdgpu_uvd_cs_ctx {
75 struct amdgpu_cs_parser *parser;
76 unsigned reg, count;
77 unsigned data0, data1;
78 unsigned idx;
79 unsigned ib_idx;
80
81 /* does the IB has a msg command */
82 bool has_msg_cmd;
83
84 /* minimum buffer sizes */
85 unsigned *buf_sizes;
86};
87
88#ifdef CONFIG_DRM_AMDGPU_CIK
89MODULE_FIRMWARE(FIRMWARE_BONAIRE);
90MODULE_FIRMWARE(FIRMWARE_KABINI);
91MODULE_FIRMWARE(FIRMWARE_KAVERI);
92MODULE_FIRMWARE(FIRMWARE_HAWAII);
93MODULE_FIRMWARE(FIRMWARE_MULLINS);
94#endif
95MODULE_FIRMWARE(FIRMWARE_TONGA);
96MODULE_FIRMWARE(FIRMWARE_CARRIZO);
974ee3db 97MODULE_FIRMWARE(FIRMWARE_FIJI);
a39c8cea 98MODULE_FIRMWARE(FIRMWARE_STONEY);
2cc0c0b5
FC
99MODULE_FIRMWARE(FIRMWARE_POLARIS10);
100MODULE_FIRMWARE(FIRMWARE_POLARIS11);
d38ceaf9 101
d38ceaf9
AD
102static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
103
104int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
105{
ead833ec
CK
106 struct amdgpu_ring *ring;
107 struct amd_sched_rq *rq;
d38ceaf9
AD
108 unsigned long bo_size;
109 const char *fw_name;
110 const struct common_firmware_header *hdr;
111 unsigned version_major, version_minor, family_id;
112 int i, r;
113
114 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
115
116 switch (adev->asic_type) {
117#ifdef CONFIG_DRM_AMDGPU_CIK
118 case CHIP_BONAIRE:
119 fw_name = FIRMWARE_BONAIRE;
120 break;
121 case CHIP_KABINI:
122 fw_name = FIRMWARE_KABINI;
123 break;
124 case CHIP_KAVERI:
125 fw_name = FIRMWARE_KAVERI;
126 break;
127 case CHIP_HAWAII:
128 fw_name = FIRMWARE_HAWAII;
129 break;
130 case CHIP_MULLINS:
131 fw_name = FIRMWARE_MULLINS;
132 break;
133#endif
134 case CHIP_TONGA:
135 fw_name = FIRMWARE_TONGA;
136 break;
974ee3db
DZ
137 case CHIP_FIJI:
138 fw_name = FIRMWARE_FIJI;
139 break;
d38ceaf9
AD
140 case CHIP_CARRIZO:
141 fw_name = FIRMWARE_CARRIZO;
142 break;
a39c8cea
SL
143 case CHIP_STONEY:
144 fw_name = FIRMWARE_STONEY;
145 break;
2cc0c0b5
FC
146 case CHIP_POLARIS10:
147 fw_name = FIRMWARE_POLARIS10;
38d75817 148 break;
2cc0c0b5
FC
149 case CHIP_POLARIS11:
150 fw_name = FIRMWARE_POLARIS11;
38d75817 151 break;
d38ceaf9
AD
152 default:
153 return -EINVAL;
154 }
155
156 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
157 if (r) {
158 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
159 fw_name);
160 return r;
161 }
162
163 r = amdgpu_ucode_validate(adev->uvd.fw);
164 if (r) {
165 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
166 fw_name);
167 release_firmware(adev->uvd.fw);
168 adev->uvd.fw = NULL;
169 return r;
170 }
171
c0365541
AN
172 /* Set the default UVD handles that the firmware can handle */
173 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
174
d38ceaf9
AD
175 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
176 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
177 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
178 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
179 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
180 version_major, version_minor, family_id);
181
c0365541
AN
182 /*
183 * Limit the number of UVD handles depending on microcode major
184 * and minor versions. The firmware version which has 40 UVD
185 * instances support is 1.80. So all subsequent versions should
186 * also have the same support.
187 */
188 if ((version_major > 0x01) ||
189 ((version_major == 0x01) && (version_minor >= 0x50)))
190 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
191
562e2689
SJ
192 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
193 (family_id << 8));
194
8e008dd7
SJ
195 if ((adev->asic_type == CHIP_POLARIS10 ||
196 adev->asic_type == CHIP_POLARIS11) &&
197 (adev->uvd.fw_version < FW_1_66_16))
198 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
199 version_major, version_minor);
200
d38ceaf9 201 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
c0365541
AN
202 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
203 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
4b62e697
CK
204 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
205 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
206 &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
d38ceaf9
AD
207 if (r) {
208 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
209 return r;
210 }
211
ead833ec
CK
212 ring = &adev->uvd.ring;
213 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
214 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
215 rq, amdgpu_sched_jobs);
216 if (r != 0) {
217 DRM_ERROR("Failed setting up UVD run queue.\n");
218 return r;
219 }
220
c0365541 221 for (i = 0; i < adev->uvd.max_handles; ++i) {
d38ceaf9
AD
222 atomic_set(&adev->uvd.handles[i], 0);
223 adev->uvd.filp[i] = NULL;
224 }
225
226 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
5fc3aeeb 227 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
d38ceaf9
AD
228 adev->uvd.address_64_bit = true;
229
4cb5877c
CK
230 switch (adev->asic_type) {
231 case CHIP_TONGA:
232 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
233 break;
234 case CHIP_CARRIZO:
235 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
236 break;
237 case CHIP_FIJI:
238 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
239 break;
240 case CHIP_STONEY:
241 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
242 break;
243 default:
244 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
245 }
246
d38ceaf9
AD
247 return 0;
248}
249
250int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
251{
05f19eb5 252 kfree(adev->uvd.saved_bo);
d38ceaf9 253
ead833ec
CK
254 amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
255
8640faed
JZ
256 amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
257 &adev->uvd.gpu_addr,
258 (void **)&adev->uvd.cpu_addr);
d38ceaf9
AD
259
260 amdgpu_ring_fini(&adev->uvd.ring);
261
262 release_firmware(adev->uvd.fw);
263
264 return 0;
265}
266
267int amdgpu_uvd_suspend(struct amdgpu_device *adev)
268{
3f99dd81
LL
269 unsigned size;
270 void *ptr;
3f99dd81 271 int i;
d38ceaf9
AD
272
273 if (adev->uvd.vcpu_bo == NULL)
274 return 0;
275
c0365541 276 for (i = 0; i < adev->uvd.max_handles; ++i)
3f99dd81
LL
277 if (atomic_read(&adev->uvd.handles[i]))
278 break;
279
280 if (i == AMDGPU_MAX_UVD_HANDLES)
281 return 0;
282
85cc88f0
RZ
283 cancel_delayed_work_sync(&adev->uvd.idle_work);
284
3f99dd81 285 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
3f99dd81 286 ptr = adev->uvd.cpu_addr;
d38ceaf9 287
3f99dd81
LL
288 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
289 if (!adev->uvd.saved_bo)
290 return -ENOMEM;
d38ceaf9 291
ba0b2275 292 memcpy_fromio(adev->uvd.saved_bo, ptr, size);
d38ceaf9
AD
293
294 return 0;
295}
296
297int amdgpu_uvd_resume(struct amdgpu_device *adev)
298{
299 unsigned size;
300 void *ptr;
d38ceaf9
AD
301
302 if (adev->uvd.vcpu_bo == NULL)
303 return -EINVAL;
304
d38ceaf9 305 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
d38ceaf9 306 ptr = adev->uvd.cpu_addr;
d38ceaf9 307
3f99dd81 308 if (adev->uvd.saved_bo != NULL) {
ba0b2275 309 memcpy_toio(ptr, adev->uvd.saved_bo, size);
3f99dd81
LL
310 kfree(adev->uvd.saved_bo);
311 adev->uvd.saved_bo = NULL;
d23be4e3
LL
312 } else {
313 const struct common_firmware_header *hdr;
314 unsigned offset;
315
316 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
317 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
ba0b2275
CK
318 memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
319 le32_to_cpu(hdr->ucode_size_bytes));
d23be4e3
LL
320 size -= le32_to_cpu(hdr->ucode_size_bytes);
321 ptr += le32_to_cpu(hdr->ucode_size_bytes);
ba0b2275 322 memset_io(ptr, 0, size);
d23be4e3 323 }
d38ceaf9
AD
324
325 return 0;
326}
327
328void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
329{
330 struct amdgpu_ring *ring = &adev->uvd.ring;
331 int i, r;
332
c0365541 333 for (i = 0; i < adev->uvd.max_handles; ++i) {
d38ceaf9
AD
334 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
335 if (handle != 0 && adev->uvd.filp[i] == filp) {
0e3f154a 336 struct fence *fence;
d38ceaf9 337
d7af97db
CK
338 r = amdgpu_uvd_get_destroy_msg(ring, handle,
339 false, &fence);
d38ceaf9
AD
340 if (r) {
341 DRM_ERROR("Error destroying UVD (%d)!\n", r);
342 continue;
343 }
344
0e3f154a
CZ
345 fence_wait(fence, false);
346 fence_put(fence);
d38ceaf9
AD
347
348 adev->uvd.filp[i] = NULL;
349 atomic_set(&adev->uvd.handles[i], 0);
350 }
351 }
352}
353
765e7fbf 354static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
d38ceaf9
AD
355{
356 int i;
765e7fbf
CK
357 for (i = 0; i < abo->placement.num_placement; ++i) {
358 abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
359 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
d38ceaf9
AD
360 }
361}
362
363/**
364 * amdgpu_uvd_cs_pass1 - first parsing round
365 *
366 * @ctx: UVD parser context
367 *
368 * Make sure UVD message and feedback buffers are in VRAM and
369 * nobody is violating an 256MB boundary.
370 */
371static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
372{
373 struct amdgpu_bo_va_mapping *mapping;
374 struct amdgpu_bo *bo;
375 uint32_t cmd, lo, hi;
376 uint64_t addr;
377 int r = 0;
378
379 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
380 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
381 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
382
383 mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
384 if (mapping == NULL) {
385 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
386 return -EINVAL;
387 }
388
389 if (!ctx->parser->adev->uvd.address_64_bit) {
390 /* check if it's a message or feedback command */
391 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
392 if (cmd == 0x0 || cmd == 0x3) {
393 /* yes, force it into VRAM */
394 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
395 amdgpu_ttm_placement_from_domain(bo, domain);
396 }
397 amdgpu_uvd_force_into_uvd_segment(bo);
398
399 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
400 }
401
402 return r;
403}
404
405/**
406 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
407 *
408 * @msg: pointer to message structure
409 * @buf_sizes: returned buffer sizes
410 *
411 * Peek into the decode message and calculate the necessary buffer sizes.
412 */
8e008dd7
SJ
413static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
414 unsigned buf_sizes[])
d38ceaf9
AD
415{
416 unsigned stream_type = msg[4];
417 unsigned width = msg[6];
418 unsigned height = msg[7];
419 unsigned dpb_size = msg[9];
420 unsigned pitch = msg[28];
421 unsigned level = msg[57];
422
423 unsigned width_in_mb = width / 16;
424 unsigned height_in_mb = ALIGN(height / 16, 2);
425 unsigned fs_in_mb = width_in_mb * height_in_mb;
426
21df89a5 427 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
e5a6858d 428 unsigned min_ctx_size = ~0;
d38ceaf9
AD
429
430 image_size = width * height;
431 image_size += image_size / 2;
432 image_size = ALIGN(image_size, 1024);
433
434 switch (stream_type) {
435 case 0: /* H264 */
d38ceaf9
AD
436 switch(level) {
437 case 30:
438 num_dpb_buffer = 8100 / fs_in_mb;
439 break;
440 case 31:
441 num_dpb_buffer = 18000 / fs_in_mb;
442 break;
443 case 32:
444 num_dpb_buffer = 20480 / fs_in_mb;
445 break;
446 case 41:
447 num_dpb_buffer = 32768 / fs_in_mb;
448 break;
449 case 42:
450 num_dpb_buffer = 34816 / fs_in_mb;
451 break;
452 case 50:
453 num_dpb_buffer = 110400 / fs_in_mb;
454 break;
455 case 51:
456 num_dpb_buffer = 184320 / fs_in_mb;
457 break;
458 default:
459 num_dpb_buffer = 184320 / fs_in_mb;
460 break;
461 }
462 num_dpb_buffer++;
463 if (num_dpb_buffer > 17)
464 num_dpb_buffer = 17;
465
466 /* reference picture buffer */
467 min_dpb_size = image_size * num_dpb_buffer;
468
469 /* macroblock context buffer */
470 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
471
472 /* IT surface buffer */
473 min_dpb_size += width_in_mb * height_in_mb * 32;
474 break;
475
476 case 1: /* VC1 */
477
478 /* reference picture buffer */
479 min_dpb_size = image_size * 3;
480
481 /* CONTEXT_BUFFER */
482 min_dpb_size += width_in_mb * height_in_mb * 128;
483
484 /* IT surface buffer */
485 min_dpb_size += width_in_mb * 64;
486
487 /* DB surface buffer */
488 min_dpb_size += width_in_mb * 128;
489
490 /* BP */
491 tmp = max(width_in_mb, height_in_mb);
492 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
493 break;
494
495 case 3: /* MPEG2 */
496
497 /* reference picture buffer */
498 min_dpb_size = image_size * 3;
499 break;
500
501 case 4: /* MPEG4 */
502
503 /* reference picture buffer */
504 min_dpb_size = image_size * 3;
505
506 /* CM */
507 min_dpb_size += width_in_mb * height_in_mb * 64;
508
509 /* IT surface buffer */
510 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
511 break;
512
8e008dd7
SJ
513 case 7: /* H264 Perf */
514 switch(level) {
515 case 30:
516 num_dpb_buffer = 8100 / fs_in_mb;
517 break;
518 case 31:
519 num_dpb_buffer = 18000 / fs_in_mb;
520 break;
521 case 32:
522 num_dpb_buffer = 20480 / fs_in_mb;
523 break;
524 case 41:
525 num_dpb_buffer = 32768 / fs_in_mb;
526 break;
527 case 42:
528 num_dpb_buffer = 34816 / fs_in_mb;
529 break;
530 case 50:
531 num_dpb_buffer = 110400 / fs_in_mb;
532 break;
533 case 51:
534 num_dpb_buffer = 184320 / fs_in_mb;
535 break;
536 default:
537 num_dpb_buffer = 184320 / fs_in_mb;
538 break;
539 }
540 num_dpb_buffer++;
541 if (num_dpb_buffer > 17)
542 num_dpb_buffer = 17;
543
544 /* reference picture buffer */
545 min_dpb_size = image_size * num_dpb_buffer;
546
4cb5877c 547 if (!adev->uvd.use_ctx_buf){
8e008dd7
SJ
548 /* macroblock context buffer */
549 min_dpb_size +=
550 width_in_mb * height_in_mb * num_dpb_buffer * 192;
551
552 /* IT surface buffer */
553 min_dpb_size += width_in_mb * height_in_mb * 32;
554 } else {
555 /* macroblock context buffer */
556 min_ctx_size =
557 width_in_mb * height_in_mb * num_dpb_buffer * 192;
558 }
559 break;
560
86fa0bdc
CK
561 case 16: /* H265 */
562 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
563 image_size = ALIGN(image_size, 256);
564
565 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
566 min_dpb_size = image_size * num_dpb_buffer;
8c8bac59
BZ
567 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
568 * 16 * num_dpb_buffer + 52 * 1024;
86fa0bdc
CK
569 break;
570
d38ceaf9
AD
571 default:
572 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
573 return -EINVAL;
574 }
575
576 if (width > pitch) {
577 DRM_ERROR("Invalid UVD decoding target pitch!\n");
578 return -EINVAL;
579 }
580
581 if (dpb_size < min_dpb_size) {
582 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
583 dpb_size, min_dpb_size);
584 return -EINVAL;
585 }
586
587 buf_sizes[0x1] = dpb_size;
588 buf_sizes[0x2] = image_size;
8c8bac59 589 buf_sizes[0x4] = min_ctx_size;
d38ceaf9
AD
590 return 0;
591}
592
593/**
594 * amdgpu_uvd_cs_msg - handle UVD message
595 *
596 * @ctx: UVD parser context
597 * @bo: buffer object containing the message
598 * @offset: offset into the buffer object
599 *
600 * Peek into the UVD message and extract the session id.
601 * Make sure that we don't open up to many sessions.
602 */
603static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
604 struct amdgpu_bo *bo, unsigned offset)
605{
606 struct amdgpu_device *adev = ctx->parser->adev;
607 int32_t *msg, msg_type, handle;
d38ceaf9 608 void *ptr;
4127a59e
CK
609 long r;
610 int i;
d38ceaf9
AD
611
612 if (offset & 0x3F) {
613 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
614 return -EINVAL;
615 }
616
d38ceaf9
AD
617 r = amdgpu_bo_kmap(bo, &ptr);
618 if (r) {
4127a59e 619 DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
d38ceaf9
AD
620 return r;
621 }
622
623 msg = ptr + offset;
624
625 msg_type = msg[1];
626 handle = msg[2];
627
628 if (handle == 0) {
629 DRM_ERROR("Invalid UVD handle!\n");
630 return -EINVAL;
631 }
632
5146419e
LL
633 switch (msg_type) {
634 case 0:
635 /* it's a create msg, calc image size (width * height) */
636 amdgpu_bo_kunmap(bo);
637
638 /* try to alloc a new handle */
c0365541 639 for (i = 0; i < adev->uvd.max_handles; ++i) {
5146419e
LL
640 if (atomic_read(&adev->uvd.handles[i]) == handle) {
641 DRM_ERROR("Handle 0x%x already in use!\n", handle);
642 return -EINVAL;
643 }
644
645 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
646 adev->uvd.filp[i] = ctx->parser->filp;
647 return 0;
648 }
649 }
650
651 DRM_ERROR("No more free UVD handles!\n");
7129d3ae 652 return -ENOSPC;
5146419e
LL
653
654 case 1:
d38ceaf9 655 /* it's a decode msg, calc buffer sizes */
8e008dd7 656 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
d38ceaf9
AD
657 amdgpu_bo_kunmap(bo);
658 if (r)
659 return r;
660
5146419e 661 /* validate the handle */
c0365541 662 for (i = 0; i < adev->uvd.max_handles; ++i) {
5146419e
LL
663 if (atomic_read(&adev->uvd.handles[i]) == handle) {
664 if (adev->uvd.filp[i] != ctx->parser->filp) {
665 DRM_ERROR("UVD handle collision detected!\n");
666 return -EINVAL;
667 }
668 return 0;
669 }
670 }
671
672 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
673 return -ENOENT;
674
675 case 2:
d38ceaf9 676 /* it's a destroy msg, free the handle */
c0365541 677 for (i = 0; i < adev->uvd.max_handles; ++i)
d38ceaf9
AD
678 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
679 amdgpu_bo_kunmap(bo);
680 return 0;
d38ceaf9 681
5146419e
LL
682 default:
683 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
684 return -EINVAL;
d38ceaf9 685 }
5146419e 686 BUG();
d38ceaf9
AD
687 return -EINVAL;
688}
689
690/**
691 * amdgpu_uvd_cs_pass2 - second parsing round
692 *
693 * @ctx: UVD parser context
694 *
695 * Patch buffer addresses, make sure buffer sizes are correct.
696 */
697static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
698{
699 struct amdgpu_bo_va_mapping *mapping;
700 struct amdgpu_bo *bo;
d38ceaf9
AD
701 uint32_t cmd, lo, hi;
702 uint64_t start, end;
703 uint64_t addr;
704 int r;
705
706 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
707 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
708 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
709
710 mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
711 if (mapping == NULL)
712 return -EINVAL;
713
714 start = amdgpu_bo_gpu_offset(bo);
715
716 end = (mapping->it.last + 1 - mapping->it.start);
717 end = end * AMDGPU_GPU_PAGE_SIZE + start;
718
719 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
720 start += addr;
721
7270f839
CK
722 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
723 lower_32_bits(start));
724 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
725 upper_32_bits(start));
d38ceaf9
AD
726
727 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
728 if (cmd < 0x4) {
729 if ((end - start) < ctx->buf_sizes[cmd]) {
730 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
731 (unsigned)(end - start),
732 ctx->buf_sizes[cmd]);
733 return -EINVAL;
734 }
735
8c8bac59
BZ
736 } else if (cmd == 0x206) {
737 if ((end - start) < ctx->buf_sizes[4]) {
738 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
739 (unsigned)(end - start),
740 ctx->buf_sizes[4]);
741 return -EINVAL;
742 }
d38ceaf9
AD
743 } else if ((cmd != 0x100) && (cmd != 0x204)) {
744 DRM_ERROR("invalid UVD command %X!\n", cmd);
745 return -EINVAL;
746 }
747
748 if (!ctx->parser->adev->uvd.address_64_bit) {
749 if ((start >> 28) != ((end - 1) >> 28)) {
750 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
751 start, end);
752 return -EINVAL;
753 }
754
755 if ((cmd == 0 || cmd == 0x3) &&
756 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
757 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
758 start, end);
759 return -EINVAL;
760 }
761 }
762
763 if (cmd == 0) {
764 ctx->has_msg_cmd = true;
765 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
766 if (r)
767 return r;
768 } else if (!ctx->has_msg_cmd) {
769 DRM_ERROR("Message needed before other commands are send!\n");
770 return -EINVAL;
771 }
772
773 return 0;
774}
775
776/**
777 * amdgpu_uvd_cs_reg - parse register writes
778 *
779 * @ctx: UVD parser context
780 * @cb: callback function
781 *
782 * Parse the register writes, call cb on each complete command.
783 */
784static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
785 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
786{
50838c8c 787 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
d38ceaf9
AD
788 int i, r;
789
790 ctx->idx++;
791 for (i = 0; i <= ctx->count; ++i) {
792 unsigned reg = ctx->reg + i;
793
794 if (ctx->idx >= ib->length_dw) {
795 DRM_ERROR("Register command after end of CS!\n");
796 return -EINVAL;
797 }
798
799 switch (reg) {
800 case mmUVD_GPCOM_VCPU_DATA0:
801 ctx->data0 = ctx->idx;
802 break;
803 case mmUVD_GPCOM_VCPU_DATA1:
804 ctx->data1 = ctx->idx;
805 break;
806 case mmUVD_GPCOM_VCPU_CMD:
807 r = cb(ctx);
808 if (r)
809 return r;
810 break;
811 case mmUVD_ENGINE_CNTL:
8dd31d74 812 case mmUVD_NO_OP:
d38ceaf9
AD
813 break;
814 default:
815 DRM_ERROR("Invalid reg 0x%X!\n", reg);
816 return -EINVAL;
817 }
818 ctx->idx++;
819 }
820 return 0;
821}
822
823/**
824 * amdgpu_uvd_cs_packets - parse UVD packets
825 *
826 * @ctx: UVD parser context
827 * @cb: callback function
828 *
829 * Parse the command stream packets.
830 */
831static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
832 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
833{
50838c8c 834 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
d38ceaf9
AD
835 int r;
836
837 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
838 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
839 unsigned type = CP_PACKET_GET_TYPE(cmd);
840 switch (type) {
841 case PACKET_TYPE0:
842 ctx->reg = CP_PACKET0_GET_REG(cmd);
843 ctx->count = CP_PACKET_GET_COUNT(cmd);
844 r = amdgpu_uvd_cs_reg(ctx, cb);
845 if (r)
846 return r;
847 break;
848 case PACKET_TYPE2:
849 ++ctx->idx;
850 break;
851 default:
852 DRM_ERROR("Unknown packet type %d !\n", type);
853 return -EINVAL;
854 }
855 }
856 return 0;
857}
858
859/**
860 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
861 *
862 * @parser: Command submission parser context
863 *
864 * Parse the command stream, patch in addresses as necessary.
865 */
866int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
867{
868 struct amdgpu_uvd_cs_ctx ctx = {};
869 unsigned buf_sizes[] = {
870 [0x00000000] = 2048,
8c8bac59
BZ
871 [0x00000001] = 0xFFFFFFFF,
872 [0x00000002] = 0xFFFFFFFF,
d38ceaf9 873 [0x00000003] = 2048,
8c8bac59 874 [0x00000004] = 0xFFFFFFFF,
d38ceaf9 875 };
50838c8c 876 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
d38ceaf9
AD
877 int r;
878
879 if (ib->length_dw % 16) {
880 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
881 ib->length_dw);
882 return -EINVAL;
883 }
884
c855e250
CK
885 r = amdgpu_cs_sysvm_access_required(parser);
886 if (r)
887 return r;
888
d38ceaf9
AD
889 ctx.parser = parser;
890 ctx.buf_sizes = buf_sizes;
891 ctx.ib_idx = ib_idx;
892
893 /* first round, make sure the buffers are actually in the UVD segment */
894 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
895 if (r)
896 return r;
897
898 /* second round, patch buffer addresses into the command stream */
899 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
900 if (r)
901 return r;
902
903 if (!ctx.has_msg_cmd) {
904 DRM_ERROR("UVD-IBs need a msg command!\n");
905 return -EINVAL;
906 }
907
d38ceaf9
AD
908 return 0;
909}
910
d7af97db
CK
911static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
912 bool direct, struct fence **fence)
d38ceaf9
AD
913{
914 struct ttm_validate_buffer tv;
915 struct ww_acquire_ctx ticket;
916 struct list_head head;
d71518b5
CK
917 struct amdgpu_job *job;
918 struct amdgpu_ib *ib;
1763552e 919 struct fence *f = NULL;
7b5ec431 920 struct amdgpu_device *adev = ring->adev;
d38ceaf9
AD
921 uint64_t addr;
922 int i, r;
923
924 memset(&tv, 0, sizeof(tv));
925 tv.bo = &bo->tbo;
926
927 INIT_LIST_HEAD(&head);
928 list_add(&tv.head, &head);
929
930 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
931 if (r)
932 return r;
933
934 if (!bo->adev->uvd.address_64_bit) {
935 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
936 amdgpu_uvd_force_into_uvd_segment(bo);
937 }
938
939 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
940 if (r)
941 goto err;
d71518b5
CK
942
943 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
7b5ec431 944 if (r)
d71518b5 945 goto err;
d38ceaf9 946
d71518b5 947 ib = &job->ibs[0];
d38ceaf9 948 addr = amdgpu_bo_gpu_offset(bo);
7b5ec431
CZ
949 ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
950 ib->ptr[1] = addr;
951 ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
952 ib->ptr[3] = addr >> 32;
953 ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
954 ib->ptr[5] = 0;
c8b4f288
AD
955 for (i = 6; i < 16; i += 2) {
956 ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
957 ib->ptr[i+1] = 0;
958 }
7b5ec431 959 ib->length_dw = 16;
d38ceaf9 960
d7af97db 961 if (direct) {
c5637837 962 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
22a77cf6 963 job->fence = fence_get(f);
d7af97db
CK
964 if (r)
965 goto err_free;
966
967 amdgpu_job_free(job);
968 } else {
ead833ec 969 r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
d7af97db
CK
970 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
971 if (r)
972 goto err_free;
973 }
d38ceaf9 974
1763552e 975 ttm_eu_fence_buffer_objects(&ticket, &head, f);
d38ceaf9 976
7b5ec431 977 if (fence)
1763552e 978 *fence = fence_get(f);
d38ceaf9 979 amdgpu_bo_unref(&bo);
281b4223 980 fence_put(f);
7b5ec431 981
7b5ec431 982 return 0;
d71518b5
CK
983
984err_free:
985 amdgpu_job_free(job);
986
d38ceaf9
AD
987err:
988 ttm_eu_backoff_reservation(&ticket, &head);
989 return r;
990}
991
992/* multiple fence commands without any stream commands in between can
993 crash the vcpu so just try to emmit a dummy create/destroy msg to
994 avoid this */
995int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
0e3f154a 996 struct fence **fence)
d38ceaf9
AD
997{
998 struct amdgpu_device *adev = ring->adev;
999 struct amdgpu_bo *bo;
1000 uint32_t *msg;
1001 int r, i;
1002
1003 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
857d913d 1004 AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
1005 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1006 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 1007 NULL, NULL, &bo);
d38ceaf9
AD
1008 if (r)
1009 return r;
1010
1011 r = amdgpu_bo_reserve(bo, false);
1012 if (r) {
1013 amdgpu_bo_unref(&bo);
1014 return r;
1015 }
1016
1017 r = amdgpu_bo_kmap(bo, (void **)&msg);
1018 if (r) {
1019 amdgpu_bo_unreserve(bo);
1020 amdgpu_bo_unref(&bo);
1021 return r;
1022 }
1023
1024 /* stitch together an UVD create msg */
1025 msg[0] = cpu_to_le32(0x00000de4);
1026 msg[1] = cpu_to_le32(0x00000000);
1027 msg[2] = cpu_to_le32(handle);
1028 msg[3] = cpu_to_le32(0x00000000);
1029 msg[4] = cpu_to_le32(0x00000000);
1030 msg[5] = cpu_to_le32(0x00000000);
1031 msg[6] = cpu_to_le32(0x00000000);
1032 msg[7] = cpu_to_le32(0x00000780);
1033 msg[8] = cpu_to_le32(0x00000440);
1034 msg[9] = cpu_to_le32(0x00000000);
1035 msg[10] = cpu_to_le32(0x01b37000);
1036 for (i = 11; i < 1024; ++i)
1037 msg[i] = cpu_to_le32(0x0);
1038
1039 amdgpu_bo_kunmap(bo);
1040 amdgpu_bo_unreserve(bo);
1041
d7af97db 1042 return amdgpu_uvd_send_msg(ring, bo, true, fence);
d38ceaf9
AD
1043}
1044
1045int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
d7af97db 1046 bool direct, struct fence **fence)
d38ceaf9
AD
1047{
1048 struct amdgpu_device *adev = ring->adev;
1049 struct amdgpu_bo *bo;
1050 uint32_t *msg;
1051 int r, i;
1052
1053 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
857d913d 1054 AMDGPU_GEM_DOMAIN_VRAM,
03f48dd5
CK
1055 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1056 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
72d7668b 1057 NULL, NULL, &bo);
d38ceaf9
AD
1058 if (r)
1059 return r;
1060
1061 r = amdgpu_bo_reserve(bo, false);
1062 if (r) {
1063 amdgpu_bo_unref(&bo);
1064 return r;
1065 }
1066
1067 r = amdgpu_bo_kmap(bo, (void **)&msg);
1068 if (r) {
1069 amdgpu_bo_unreserve(bo);
1070 amdgpu_bo_unref(&bo);
1071 return r;
1072 }
1073
1074 /* stitch together an UVD destroy msg */
1075 msg[0] = cpu_to_le32(0x00000de4);
1076 msg[1] = cpu_to_le32(0x00000002);
1077 msg[2] = cpu_to_le32(handle);
1078 msg[3] = cpu_to_le32(0x00000000);
1079 for (i = 4; i < 1024; ++i)
1080 msg[i] = cpu_to_le32(0x0);
1081
1082 amdgpu_bo_kunmap(bo);
1083 amdgpu_bo_unreserve(bo);
1084
d7af97db 1085 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
d38ceaf9
AD
1086}
1087
1088static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1089{
1090 struct amdgpu_device *adev =
1091 container_of(work, struct amdgpu_device, uvd.idle_work.work);
713c0021 1092 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
d38ceaf9 1093
713c0021 1094 if (fences == 0) {
d38ceaf9
AD
1095 if (adev->pm.dpm_enabled) {
1096 amdgpu_dpm_enable_uvd(adev, false);
1097 } else {
1098 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1099 }
1100 } else {
08086635 1101 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
d38ceaf9
AD
1102 }
1103}
1104
c4120d55 1105void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
d38ceaf9 1106{
c4120d55 1107 struct amdgpu_device *adev = ring->adev;
d38ceaf9 1108 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
d38ceaf9
AD
1109
1110 if (set_clocks) {
1111 if (adev->pm.dpm_enabled) {
1112 amdgpu_dpm_enable_uvd(adev, true);
1113 } else {
1114 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1115 }
1116 }
1117}
c4120d55
CK
1118
1119void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1120{
1121 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1122}
8de190c9
CK
1123
1124/**
1125 * amdgpu_uvd_ring_test_ib - test ib execution
1126 *
1127 * @ring: amdgpu_ring pointer
1128 *
1129 * Test if we can successfully execute an IB
1130 */
bbec97aa 1131int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
8de190c9 1132{
bbec97aa
CK
1133 struct fence *fence;
1134 long r;
8de190c9
CK
1135
1136 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1137 if (r) {
bbec97aa 1138 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
8de190c9
CK
1139 goto error;
1140 }
1141
1142 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1143 if (r) {
bbec97aa 1144 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
8de190c9
CK
1145 goto error;
1146 }
1147
bbec97aa
CK
1148 r = fence_wait_timeout(fence, false, timeout);
1149 if (r == 0) {
1150 DRM_ERROR("amdgpu: IB test timed out.\n");
1151 r = -ETIMEDOUT;
1152 } else if (r < 0) {
1153 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1154 } else {
1155 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
1156 r = 0;
8de190c9 1157 }
bbec97aa 1158
8de190c9 1159 fence_put(fence);
c2a4c5b7
JC
1160
1161error:
8de190c9
CK
1162 return r;
1163}