#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xcp.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- enum amdgpu_gfx_partition mode;
+ int mode;
char *partition_mode;
- mode = adev->gfx.funcs->query_partition_mode(adev);
+ mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
return -EINVAL;
}
- if (!adev->kfd.init_complete)
- return -EPERM;
-
- mutex_lock(&adev->gfx.partition_mutex);
-
- if (mode == adev->gfx.funcs->query_partition_mode(adev))
- goto out;
-
- ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
- if (ret)
- goto out;
-
- amdgpu_amdkfd_device_fini_sw(adev);
-
- adev->gfx.funcs->switch_partition_mode(adev, mode);
-
- amdgpu_amdkfd_device_probe(adev);
- amdgpu_amdkfd_device_init(adev);
- /* If KFD init failed, return failure */
- if (!adev->kfd.init_complete)
- ret = -EIO;
-
- amdgpu_amdkfd_unlock_kfd(adev);
-out:
- mutex_unlock(&adev->gfx.partition_mutex);
+ ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
if (ret)
return ret;
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
+#include "amdgpu_xcp.h"
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
return mode;
}
-static enum amdgpu_gfx_partition
-gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev)
-{
- enum amdgpu_gfx_partition mode = adev->gfx.partition_mode;
-
- if (adev->nbio.funcs->get_compute_partition_mode)
- mode = adev->nbio.funcs->get_compute_partition_mode(adev);
-
- return mode;
-}
-
static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
- enum amdgpu_gfx_partition mode)
+ int num_xccs_per_xcp)
{
+ int i, num_xcc;
u32 tmp = 0;
- int num_xcc_per_partition, i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- switch (mode) {
- case AMDGPU_SPX_PARTITION_MODE:
- num_xcc_per_partition = num_xcc;
- break;
- case AMDGPU_DPX_PARTITION_MODE:
- num_xcc_per_partition = num_xcc / 2;
- break;
- case AMDGPU_TPX_PARTITION_MODE:
- num_xcc_per_partition = num_xcc / 3;
- break;
- case AMDGPU_QPX_PARTITION_MODE:
- num_xcc_per_partition = num_xcc / 4;
- break;
- case AMDGPU_CPX_PARTITION_MODE:
- num_xcc_per_partition = 1;
- break;
- default:
- return -EINVAL;
- }
-
- /* TODO:
- * Stop user queues and threads, and make sure GPU is empty of work.
- */
for (i = 0; i < num_xcc; i++) {
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
- num_xcc_per_partition);
+ num_xccs_per_xcp);
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
- i % num_xcc_per_partition);
+ i % num_xccs_per_xcp);
WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp);
}
- if (adev->nbio.funcs->set_compute_partition_mode)
- adev->nbio.funcs->set_compute_partition_mode(adev, mode);
-
- adev->gfx.num_xcc_per_xcp = num_xcc_per_partition;
- adev->gfx.partition_mode = mode;
+ adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
return 0;
}
.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
- .query_partition_mode = &gfx_v9_4_3_query_compute_partition,
.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
.query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition,
};
return r;
}
- if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
- gfx_v9_4_3_switch_compute_partition(adev,
- amdgpu_user_partt_mode);
-
/* set the virtual and physical id based on partition_mode */
gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id);
{
int r, i, num_xcc;
+ if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode);
+
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
r = gfx_v9_4_3_xcc_cp_resume(adev, i);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
-
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
gfx_v9_4_3_set_kiq_pm4_funcs(adev);
#include "kfd_smi_events.h"
#include "kfd_migrate.h"
#include "amdgpu.h"
+#include "amdgpu_xcp.h"
#define MQD_SIZE_ALIGNED 768
struct kfd_node *node;
uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
unsigned int max_proc_per_quantum;
- int num_xcd;
+ int num_xcd, partition_mode;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
* If the VMID range changes for GFX9.4.3, then this code MUST be
* revisited.
*/
+ partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr);
if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
- kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
vmid_num_kfd /= 2;
first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
node->start_xcc_id = node->num_xcc_per_node * i;
if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
- kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
/* For GFX9.4.3 and CPX mode, first XCD gets VMID range
* 4-9 and second XCD gets VMID range 10-15.