]>
git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
2 * Copyright 2017 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Andres Rodriguez
26 #include "amdgpu_ring.h"
28 static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper
*mapper
,
34 if (hw_ip
> AMDGPU_MAX_IP_NUM
)
37 mapper
->hw_ip
= hw_ip
;
38 mutex_init(&mapper
->lock
);
40 memset(mapper
->queue_map
, 0, sizeof(mapper
->queue_map
));
45 static struct amdgpu_ring
*amdgpu_get_cached_map(struct amdgpu_queue_mapper
*mapper
,
48 return mapper
->queue_map
[ring
];
51 static int amdgpu_update_cached_map(struct amdgpu_queue_mapper
*mapper
,
52 int ring
, struct amdgpu_ring
*pring
)
54 if (WARN_ON(mapper
->queue_map
[ring
])) {
55 DRM_ERROR("Un-expected ring re-map\n");
59 mapper
->queue_map
[ring
] = pring
;
64 static int amdgpu_identity_map(struct amdgpu_device
*adev
,
65 struct amdgpu_queue_mapper
*mapper
,
67 struct amdgpu_ring
**out_ring
)
71 switch (mapper
->hw_ip
) {
72 case AMDGPU_HW_IP_GFX
:
73 *out_ring
= &adev
->gfx
.gfx_ring
[ring
];
75 case AMDGPU_HW_IP_COMPUTE
:
76 *out_ring
= &adev
->gfx
.compute_ring
[ring
];
78 case AMDGPU_HW_IP_DMA
:
79 *out_ring
= &adev
->sdma
.instance
[ring
].ring
;
81 case AMDGPU_HW_IP_UVD
:
83 *out_ring
= &adev
->uvd
.inst
[instance
].ring
;
85 case AMDGPU_HW_IP_VCE
:
86 *out_ring
= &adev
->vce
.ring
[ring
];
88 case AMDGPU_HW_IP_UVD_ENC
:
89 instance
= ring
/ adev
->uvd
.num_enc_rings
;
91 &adev
->uvd
.inst
[instance
].ring_enc
[ring
%adev
->uvd
.num_enc_rings
];
93 case AMDGPU_HW_IP_VCN_DEC
:
94 *out_ring
= &adev
->vcn
.ring_dec
;
96 case AMDGPU_HW_IP_VCN_ENC
:
97 *out_ring
= &adev
->vcn
.ring_enc
[ring
];
101 DRM_ERROR("unknown HW IP type: %d\n", mapper
->hw_ip
);
105 return amdgpu_update_cached_map(mapper
, ring
, *out_ring
);
108 static enum amdgpu_ring_type
amdgpu_hw_ip_to_ring_type(int hw_ip
)
111 case AMDGPU_HW_IP_GFX
:
112 return AMDGPU_RING_TYPE_GFX
;
113 case AMDGPU_HW_IP_COMPUTE
:
114 return AMDGPU_RING_TYPE_COMPUTE
;
115 case AMDGPU_HW_IP_DMA
:
116 return AMDGPU_RING_TYPE_SDMA
;
117 case AMDGPU_HW_IP_UVD
:
118 return AMDGPU_RING_TYPE_UVD
;
119 case AMDGPU_HW_IP_VCE
:
120 return AMDGPU_RING_TYPE_VCE
;
122 DRM_ERROR("Invalid HW IP specified %d\n", hw_ip
);
127 static int amdgpu_lru_map(struct amdgpu_device
*adev
,
128 struct amdgpu_queue_mapper
*mapper
,
129 u32 user_ring
, bool lru_pipe_order
,
130 struct amdgpu_ring
**out_ring
)
133 int ring_type
= amdgpu_hw_ip_to_ring_type(mapper
->hw_ip
);
134 int ring_blacklist
[AMDGPU_MAX_RINGS
];
135 struct amdgpu_ring
*ring
;
137 /* 0 is a valid ring index, so initialize to -1 */
138 memset(ring_blacklist
, 0xff, sizeof(ring_blacklist
));
140 for (i
= 0, j
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
141 ring
= mapper
->queue_map
[i
];
143 ring_blacklist
[j
++] = ring
->idx
;
146 r
= amdgpu_ring_lru_get(adev
, ring_type
, ring_blacklist
,
147 j
, lru_pipe_order
, out_ring
);
151 return amdgpu_update_cached_map(mapper
, user_ring
, *out_ring
);
155 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
157 * @adev: amdgpu_device pointer
158 * @mgr: amdgpu_queue_mgr structure holding queue information
160 * Initialize the the selected @mgr (all asics).
162 * Returns 0 on success, error on failure.
164 int amdgpu_queue_mgr_init(struct amdgpu_device
*adev
,
165 struct amdgpu_queue_mgr
*mgr
)
172 memset(mgr
, 0, sizeof(*mgr
));
174 for (i
= 0; i
< AMDGPU_MAX_IP_NUM
; ++i
) {
175 r
= amdgpu_queue_mapper_init(&mgr
->mapper
[i
], i
);
184 * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
186 * @adev: amdgpu_device pointer
187 * @mgr: amdgpu_queue_mgr structure holding queue information
189 * De-initialize the the selected @mgr (all asics).
191 * Returns 0 on success, error on failure.
193 int amdgpu_queue_mgr_fini(struct amdgpu_device
*adev
,
194 struct amdgpu_queue_mgr
*mgr
)
200 * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
202 * @adev: amdgpu_device pointer
203 * @mgr: amdgpu_queue_mgr structure holding queue information
205 * @instance: HW instance
206 * @ring: user ring id
207 * @our_ring: pointer to mapped amdgpu_ring
209 * Map a userspace ring id to an appropriate kernel ring. Different
210 * policies are configurable at a HW IP level.
212 * Returns 0 on success, error on failure.
214 int amdgpu_queue_mgr_map(struct amdgpu_device
*adev
,
215 struct amdgpu_queue_mgr
*mgr
,
216 u32 hw_ip
, u32 instance
, u32 ring
,
217 struct amdgpu_ring
**out_ring
)
220 struct amdgpu_queue_mapper
*mapper
= &mgr
->mapper
[hw_ip
];
222 if (!adev
|| !mgr
|| !out_ring
)
225 if (hw_ip
>= AMDGPU_MAX_IP_NUM
)
228 if (ring
>= AMDGPU_MAX_RINGS
)
231 /* Right now all IPs have only one instance - multiple rings. */
233 DRM_DEBUG("invalid ip instance: %d\n", instance
);
238 case AMDGPU_HW_IP_GFX
:
239 ip_num_rings
= adev
->gfx
.num_gfx_rings
;
241 case AMDGPU_HW_IP_COMPUTE
:
242 ip_num_rings
= adev
->gfx
.num_compute_rings
;
244 case AMDGPU_HW_IP_DMA
:
245 ip_num_rings
= adev
->sdma
.num_instances
;
247 case AMDGPU_HW_IP_UVD
:
248 ip_num_rings
= adev
->uvd
.num_uvd_inst
;
250 case AMDGPU_HW_IP_VCE
:
251 ip_num_rings
= adev
->vce
.num_rings
;
253 case AMDGPU_HW_IP_UVD_ENC
:
255 adev
->uvd
.num_enc_rings
* adev
->uvd
.num_uvd_inst
;
257 case AMDGPU_HW_IP_VCN_DEC
:
260 case AMDGPU_HW_IP_VCN_ENC
:
261 ip_num_rings
= adev
->vcn
.num_enc_rings
;
264 DRM_DEBUG("unknown ip type: %d\n", hw_ip
);
268 if (ring
>= ip_num_rings
) {
269 DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
270 ring
, ip_num_rings
, hw_ip
);
274 mutex_lock(&mapper
->lock
);
276 *out_ring
= amdgpu_get_cached_map(mapper
, ring
);
283 switch (mapper
->hw_ip
) {
284 case AMDGPU_HW_IP_GFX
:
285 case AMDGPU_HW_IP_UVD
:
286 case AMDGPU_HW_IP_VCE
:
287 case AMDGPU_HW_IP_UVD_ENC
:
288 case AMDGPU_HW_IP_VCN_DEC
:
289 case AMDGPU_HW_IP_VCN_ENC
:
290 r
= amdgpu_identity_map(adev
, mapper
, ring
, out_ring
);
292 case AMDGPU_HW_IP_DMA
:
293 r
= amdgpu_lru_map(adev
, mapper
, ring
, false, out_ring
);
295 case AMDGPU_HW_IP_COMPUTE
:
296 r
= amdgpu_lru_map(adev
, mapper
, ring
, true, out_ring
);
301 DRM_DEBUG("unknown HW IP type: %d\n", mapper
->hw_ip
);
305 mutex_unlock(&mapper
->lock
);