]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
2 * Copyright 2017 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Andres Rodriguez
26 #include "amdgpu_ring.h"
28 static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper
*mapper
,
34 if (hw_ip
> AMDGPU_MAX_IP_NUM
)
37 mapper
->hw_ip
= hw_ip
;
38 mutex_init(&mapper
->lock
);
40 memset(mapper
->queue_map
, 0, sizeof(mapper
->queue_map
));
45 static struct amdgpu_ring
*amdgpu_get_cached_map(struct amdgpu_queue_mapper
*mapper
,
48 return mapper
->queue_map
[ring
];
51 static int amdgpu_update_cached_map(struct amdgpu_queue_mapper
*mapper
,
52 int ring
, struct amdgpu_ring
*pring
)
54 if (WARN_ON(mapper
->queue_map
[ring
])) {
55 DRM_ERROR("Un-expected ring re-map\n");
59 mapper
->queue_map
[ring
] = pring
;
64 static int amdgpu_identity_map(struct amdgpu_device
*adev
,
65 struct amdgpu_queue_mapper
*mapper
,
67 struct amdgpu_ring
**out_ring
)
69 switch (mapper
->hw_ip
) {
70 case AMDGPU_HW_IP_GFX
:
71 *out_ring
= &adev
->gfx
.gfx_ring
[ring
];
73 case AMDGPU_HW_IP_COMPUTE
:
74 *out_ring
= &adev
->gfx
.compute_ring
[ring
];
76 case AMDGPU_HW_IP_DMA
:
77 *out_ring
= &adev
->sdma
.instance
[ring
].ring
;
79 case AMDGPU_HW_IP_UVD
:
80 *out_ring
= &adev
->uvd
.ring
;
82 case AMDGPU_HW_IP_VCE
:
83 *out_ring
= &adev
->vce
.ring
[ring
];
85 case AMDGPU_HW_IP_UVD_ENC
:
86 *out_ring
= &adev
->uvd
.ring_enc
[ring
];
88 case AMDGPU_HW_IP_VCN_DEC
:
89 *out_ring
= &adev
->vcn
.ring_dec
;
91 case AMDGPU_HW_IP_VCN_ENC
:
92 *out_ring
= &adev
->vcn
.ring_enc
[ring
];
96 DRM_ERROR("unknown HW IP type: %d\n", mapper
->hw_ip
);
100 return amdgpu_update_cached_map(mapper
, ring
, *out_ring
);
103 static enum amdgpu_ring_type
amdgpu_hw_ip_to_ring_type(int hw_ip
)
106 case AMDGPU_HW_IP_GFX
:
107 return AMDGPU_RING_TYPE_GFX
;
108 case AMDGPU_HW_IP_COMPUTE
:
109 return AMDGPU_RING_TYPE_COMPUTE
;
110 case AMDGPU_HW_IP_DMA
:
111 return AMDGPU_RING_TYPE_SDMA
;
112 case AMDGPU_HW_IP_UVD
:
113 return AMDGPU_RING_TYPE_UVD
;
114 case AMDGPU_HW_IP_VCE
:
115 return AMDGPU_RING_TYPE_VCE
;
117 DRM_ERROR("Invalid HW IP specified %d\n", hw_ip
);
122 static int amdgpu_lru_map(struct amdgpu_device
*adev
,
123 struct amdgpu_queue_mapper
*mapper
,
125 struct amdgpu_ring
**out_ring
)
128 int ring_type
= amdgpu_hw_ip_to_ring_type(mapper
->hw_ip
);
129 int ring_blacklist
[AMDGPU_MAX_RINGS
];
130 struct amdgpu_ring
*ring
;
132 /* 0 is a valid ring index, so initialize to -1 */
133 memset(ring_blacklist
, 0xff, sizeof(ring_blacklist
));
135 for (i
= 0, j
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
136 ring
= mapper
->queue_map
[i
];
138 ring_blacklist
[j
++] = ring
->idx
;
141 r
= amdgpu_ring_lru_get(adev
, ring_type
, ring_blacklist
,
146 return amdgpu_update_cached_map(mapper
, user_ring
, *out_ring
);
150 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
152 * @adev: amdgpu_device pointer
153 * @mgr: amdgpu_queue_mgr structure holding queue information
155 * Initialize the the selected @mgr (all asics).
157 * Returns 0 on success, error on failure.
159 int amdgpu_queue_mgr_init(struct amdgpu_device
*adev
,
160 struct amdgpu_queue_mgr
*mgr
)
167 memset(mgr
, 0, sizeof(*mgr
));
169 for (i
= 0; i
< AMDGPU_MAX_IP_NUM
; ++i
) {
170 r
= amdgpu_queue_mapper_init(&mgr
->mapper
[i
], i
);
179 * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
181 * @adev: amdgpu_device pointer
182 * @mgr: amdgpu_queue_mgr structure holding queue information
184 * De-initialize the the selected @mgr (all asics).
186 * Returns 0 on success, error on failure.
188 int amdgpu_queue_mgr_fini(struct amdgpu_device
*adev
,
189 struct amdgpu_queue_mgr
*mgr
)
195 * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
197 * @adev: amdgpu_device pointer
198 * @mgr: amdgpu_queue_mgr structure holding queue information
200 * @instance: HW instance
201 * @ring: user ring id
202 * @our_ring: pointer to mapped amdgpu_ring
204 * Map a userspace ring id to an appropriate kernel ring. Different
205 * policies are configurable at a HW IP level.
207 * Returns 0 on success, error on failure.
209 int amdgpu_queue_mgr_map(struct amdgpu_device
*adev
,
210 struct amdgpu_queue_mgr
*mgr
,
211 int hw_ip
, int instance
, int ring
,
212 struct amdgpu_ring
**out_ring
)
215 struct amdgpu_queue_mapper
*mapper
= &mgr
->mapper
[hw_ip
];
217 if (!adev
|| !mgr
|| !out_ring
)
220 if (hw_ip
>= AMDGPU_MAX_IP_NUM
)
223 if (ring
>= AMDGPU_MAX_RINGS
)
226 /* Right now all IPs have only one instance - multiple rings. */
228 DRM_ERROR("invalid ip instance: %d\n", instance
);
233 case AMDGPU_HW_IP_GFX
:
234 ip_num_rings
= adev
->gfx
.num_gfx_rings
;
236 case AMDGPU_HW_IP_COMPUTE
:
237 ip_num_rings
= adev
->gfx
.num_compute_rings
;
239 case AMDGPU_HW_IP_DMA
:
240 ip_num_rings
= adev
->sdma
.num_instances
;
242 case AMDGPU_HW_IP_UVD
:
245 case AMDGPU_HW_IP_VCE
:
246 ip_num_rings
= adev
->vce
.num_rings
;
248 case AMDGPU_HW_IP_UVD_ENC
:
249 ip_num_rings
= adev
->uvd
.num_enc_rings
;
251 case AMDGPU_HW_IP_VCN_DEC
:
254 case AMDGPU_HW_IP_VCN_ENC
:
255 ip_num_rings
= adev
->vcn
.num_enc_rings
;
258 DRM_ERROR("unknown ip type: %d\n", hw_ip
);
262 if (ring
>= ip_num_rings
) {
263 DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
264 ring
, ip_num_rings
, hw_ip
);
268 mutex_lock(&mapper
->lock
);
270 *out_ring
= amdgpu_get_cached_map(mapper
, ring
);
277 switch (mapper
->hw_ip
) {
278 case AMDGPU_HW_IP_GFX
:
279 case AMDGPU_HW_IP_UVD
:
280 case AMDGPU_HW_IP_VCE
:
281 case AMDGPU_HW_IP_UVD_ENC
:
282 case AMDGPU_HW_IP_VCN_DEC
:
283 case AMDGPU_HW_IP_VCN_ENC
:
284 r
= amdgpu_identity_map(adev
, mapper
, ring
, out_ring
);
286 case AMDGPU_HW_IP_DMA
:
287 case AMDGPU_HW_IP_COMPUTE
:
288 r
= amdgpu_lru_map(adev
, mapper
, ring
, out_ring
);
293 DRM_ERROR("unknown HW IP type: %d\n", mapper
->hw_ip
);
297 mutex_unlock(&mapper
->lock
);