]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge tag 'drm-amdkfd-next-2017-10-18' of git://people.freedesktop.org/~gabbayo/linux...
authorDave Airlie <airlied@redhat.com>
Fri, 20 Oct 2017 05:54:44 +0000 (15:54 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 20 Oct 2017 05:54:44 +0000 (15:54 +1000)
This is the amdkfd pull request for 4.15 merge window.
The patches here are relevant only for Kaveri and Carrizo. Still no dGPU patches.

The main goal is to continue alignment with the internal AMD development tree.

The following is a summary of the changes:
- Improvements and fixes to suspend/resume code
- Improvements and fixes to process termination code
- Cleanups in the queue unmapping functionality
- Reuse code from amdgpu

* tag 'drm-amdkfd-next-2017-10-18' of git://people.freedesktop.org/~gabbayo/linux:
  drm/amdkfd: Improve multiple SDMA queues support per process
  drm/amdkfd: Limit queue number per process and device to 127
  drm/amdkfd: Clean up process queue management
  drm/amdkfd: Compress unnecessary function parameters
  drm/amdkfd: Improve process termination handling
  drm/amdkfd: Avoid submitting an unnecessary packet to HWS
  drm/amdkfd: Fix MQD updates
  drm/amdkfd: Pass filter params to unmap_queues_cpsch
  drm/amdkfd: move locking outside of unmap_queues_cpsch
  drm/amdkfd: Avoid name confusion involved in queue unmapping
  drm/amdkfd: Drop _nocpsch suffix from shared functions
  drm/amdkfd: Reuse CHIP_* from amdgpu v2
  drm/amdkfd: Use VMID bitmap from KGD v2
  drm/amdkfd: Adjust dequeue latencies and timeouts
  drm/amdkfd: Rectify the jiffies calculation error with milliseconds v2
  drm/amdkfd: Fix suspend/resume issue on Carrizo v2
  drm/amdkfd: Reorganize kfd resume code

155 files changed:
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h [deleted file]
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
drivers/gpu/drm/amd/powerplay/smumgr/Makefile
drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h [deleted file]
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gemfs.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gemfs.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_guc_submission.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_utils.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpio_phy.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_guc.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_guc.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_guc_log.c
drivers/gpu/drm/i915/intel_guc_log.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_huc.c
drivers/gpu/drm/i915/intel_huc.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pipe_crc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/i915/intel_uc_fw.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_uc_fw.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/selftests/huge_gem_object.c
drivers/gpu/drm/i915/selftests/huge_pages.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_gem_request.c
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
drivers/gpu/drm/i915/selftests/scatterlist.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
include/linux/shmem_fs.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
mm/shmem.c

index 25a95c95df14dba680cab9d337bd1d5371e047fc..ef9a3b6d7b6236beb3b6ce796297f2b72eeb594b 100644 (file)
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
        amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
-       amdgpu_queue_mgr.o amdgpu_vf_error.o
+       amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
index a23b8af953191702346df39260312709e72daa07..cbcb6a153abae61810f1223e4080a2f18af60706 100644 (file)
@@ -732,10 +732,14 @@ struct amdgpu_ctx {
        struct amdgpu_device    *adev;
        struct amdgpu_queue_mgr queue_mgr;
        unsigned                reset_counter;
+       uint32_t                vram_lost_counter;
        spinlock_t              ring_lock;
        struct dma_fence        **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
-       bool preamble_presented;
+       bool                    preamble_presented;
+       enum amd_sched_priority init_priority;
+       enum amd_sched_priority override_priority;
+       struct mutex            lock;
 };
 
 struct amdgpu_ctx_mgr {
@@ -752,13 +756,18 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
                              struct dma_fence *fence, uint64_t *seq);
 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                   struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+                                 enum amd_sched_priority priority);
 
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *filp);
 
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
+
 /*
  * file private structure
  */
@@ -770,7 +779,6 @@ struct amdgpu_fpriv {
        struct mutex            bo_list_lock;
        struct idr              bo_list_handles;
        struct amdgpu_ctx_mgr   ctx_mgr;
-       u32                     vram_lost_counter;
 };
 
 /*
@@ -871,7 +879,7 @@ struct amdgpu_mec {
 struct amdgpu_kiq {
        u64                     eop_gpu_addr;
        struct amdgpu_bo        *eop_obj;
-       struct mutex            ring_mutex;
+       spinlock_t              ring_lock;
        struct amdgpu_ring      ring;
        struct amdgpu_irq_src   irq;
 };
@@ -1035,6 +1043,10 @@ struct amdgpu_gfx {
        bool                            in_suspend;
        /* NGG */
        struct amdgpu_ngg               ngg;
+
+       /* pipe reservation */
+       struct mutex                    pipe_reserve_mutex;
+       DECLARE_BITMAP                  (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1113,6 +1125,7 @@ struct amdgpu_job {
        uint32_t                gds_base, gds_size;
        uint32_t                gws_base, gws_size;
        uint32_t                oa_base, oa_size;
+       uint32_t                vram_lost_counter;
 
        /* user fence handling */
        uint64_t                uf_addr;
@@ -1138,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
 /*
  * Writeback
  */
-#define AMDGPU_MAX_WB 1024     /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 512      /* Reserve at most 512 WB slots for amdgpu-owned rings. */
 
 struct amdgpu_wb {
        struct amdgpu_bo        *wb_obj;
@@ -1378,6 +1391,18 @@ struct amdgpu_atcs {
        struct amdgpu_atcs_functions functions;
 };
 
+/*
+ * Firmware VRAM reservation
+ */
+struct amdgpu_fw_vram_usage {
+       u64 start_offset;
+       u64 size;
+       struct amdgpu_bo *reserved_bo;
+       void *va;
+};
+
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
+
 /*
  * CGS
  */
@@ -1582,6 +1607,8 @@ struct amdgpu_device {
        struct delayed_work     late_init_work;
 
        struct amdgpu_virt      virt;
+       /* firmware VRAM reservation */
+       struct amdgpu_fw_vram_usage fw_vram_usage;
 
        /* link all shadow bo */
        struct list_head                shadow_list;
@@ -1833,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
 extern const int amdgpu_max_kms_ioctl;
 
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
-                         struct amdgpu_fpriv *fpriv);
 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
 void amdgpu_driver_unload_kms(struct drm_device *dev);
 void amdgpu_driver_lastclose_kms(struct drm_device *dev);
index ce443586a0c71c13bd36472558473c0beae9b081..f66d33e4bacae88ad9e0e4d08478820836c56d7b 100644 (file)
@@ -1807,6 +1807,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
        uint16_t data_offset;
        int usage_bytes = 0;
        struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+       u64 start_addr;
+       u64 size;
 
        if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
                firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@@ -1815,7 +1817,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
                          le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
                          le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
 
-               usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+               start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
+               size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
+
+               if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+                       (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+                       ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+                       /* Firmware request VRAM reservation for SR-IOV */
+                       adev->fw_vram_usage.start_offset = (start_addr &
+                               (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+                       adev->fw_vram_usage.size = size << 10;
+                       /* Use the default scratch size */
+                       usage_bytes = 0;
+               } else {
+                       usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+               }
        }
        ctx->scratch_size_bytes = 0;
        if (usage_bytes == 0)
index ab83dfcabb41c85ace7017f327238d18ab1831da..f7fceb63413c9b1c0769a85d38116ba15e43f2e4 100644 (file)
@@ -90,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                goto free_chunk;
        }
 
+       mutex_lock(&p->ctx->lock);
+
        /* get chunks */
        chunk_array_user = u64_to_user_ptr(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
                           sizeof(uint64_t)*cs->in.num_chunks)) {
                ret = -EFAULT;
-               goto put_ctx;
+               goto free_chunk;
        }
 
        p->nchunks = cs->in.num_chunks;
@@ -103,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                            GFP_KERNEL);
        if (!p->chunks) {
                ret = -ENOMEM;
-               goto put_ctx;
+               goto free_chunk;
        }
 
        for (i = 0; i < p->nchunks; i++) {
@@ -170,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        if (ret)
                goto free_all_kdata;
 
+       if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+               ret = -ECANCELED;
+               goto free_all_kdata;
+       }
+
        if (p->uf_entry.robj)
                p->job->uf_addr = uf_offset;
        kfree(chunk_array);
@@ -183,8 +190,6 @@ free_partial_kdata:
        kfree(p->chunks);
        p->chunks = NULL;
        p->nchunks = 0;
-put_ctx:
-       amdgpu_ctx_put(p->ctx);
 free_chunk:
        kfree(chunk_array);
 
@@ -705,7 +710,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 
        list_for_each_entry(e, &p->validated, tv.head) {
                struct reservation_object *resv = e->robj->tbo.resv;
-               r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
+               r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
+                                    amdgpu_bo_explicit_sync(e->robj));
 
                if (r)
                        return r;
@@ -736,8 +742,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 
        dma_fence_put(parser->fence);
 
-       if (parser->ctx)
+       if (parser->ctx) {
+               mutex_unlock(&parser->ctx->lock);
                amdgpu_ctx_put(parser->ctx);
+       }
        if (parser->bo_list)
                amdgpu_bo_list_put(parser->bo_list);
 
@@ -844,14 +852,58 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_ring *ring = p->job->ring;
-       int i, r;
+       int r;
 
        /* Only for UVD/VCE VM emulation */
-       if (ring->funcs->parse_cs) {
-               for (i = 0; i < p->job->num_ibs; i++) {
-                       r = amdgpu_ring_parse_cs(ring, p, i);
+       if (p->job->ring->funcs->parse_cs) {
+               unsigned i, j;
+
+               for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+                       struct drm_amdgpu_cs_chunk_ib *chunk_ib;
+                       struct amdgpu_bo_va_mapping *m;
+                       struct amdgpu_bo *aobj = NULL;
+                       struct amdgpu_cs_chunk *chunk;
+                       struct amdgpu_ib *ib;
+                       uint64_t offset;
+                       uint8_t *kptr;
+
+                       chunk = &p->chunks[i];
+                       ib = &p->job->ibs[j];
+                       chunk_ib = chunk->kdata;
+
+                       if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+                               continue;
+
+                       r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
+                                                  &aobj, &m);
+                       if (r) {
+                               DRM_ERROR("IB va_start is invalid\n");
+                               return r;
+                       }
+
+                       if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
+                           (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+                               DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+                               return -EINVAL;
+                       }
+
+                       /* the IB should be reserved at this point */
+                       r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+                       if (r) {
+                               return r;
+                       }
+
+                       offset = m->start * AMDGPU_GPU_PAGE_SIZE;
+                       kptr += chunk_ib->va_start - offset;
+
+                       memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+                       amdgpu_bo_kunmap(aobj);
+
+                       r = amdgpu_ring_parse_cs(ring, p, j);
                        if (r)
                                return r;
+
+                       j++;
                }
        }
 
@@ -918,54 +970,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
                parser->job->ring = ring;
 
-               if (ring->funcs->parse_cs) {
-                       struct amdgpu_bo_va_mapping *m;
-                       struct amdgpu_bo *aobj = NULL;
-                       uint64_t offset;
-                       uint8_t *kptr;
-
-                       r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
-                                                  &aobj, &m);
-                       if (r) {
-                               DRM_ERROR("IB va_start is invalid\n");
-                               return r;
-                       }
-
-                       if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
-                           (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
-                               DRM_ERROR("IB va_start+ib_bytes is invalid\n");
-                               return -EINVAL;
-                       }
-
-                       /* the IB should be reserved at this point */
-                       r = amdgpu_bo_kmap(aobj, (void **)&kptr);
-                       if (r) {
-                               return r;
-                       }
-
-                       offset = m->start * AMDGPU_GPU_PAGE_SIZE;
-                       kptr += chunk_ib->va_start - offset;
-
-                       r =  amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
-                       if (r) {
-                               DRM_ERROR("Failed to get ib !\n");
-                               return r;
-                       }
-
-                       memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
-                       amdgpu_bo_kunmap(aobj);
-               } else {
-                       r =  amdgpu_ib_get(adev, vm, 0, ib);
-                       if (r) {
-                               DRM_ERROR("Failed to get ib !\n");
-                               return r;
-                       }
-
+               r =  amdgpu_ib_get(adev, vm,
+                                       ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
+                                       ib);
+               if (r) {
+                       DRM_ERROR("Failed to get ib !\n");
+                       return r;
                }
 
                ib->gpu_addr = chunk_ib->va_start;
                ib->length_dw = chunk_ib->ib_bytes / 4;
                ib->flags = chunk_ib->flags;
+
                j++;
        }
 
@@ -975,7 +991,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
            parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
                return -EINVAL;
 
-       return 0;
+       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
 }
 
 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1176,6 +1192,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        job->uf_sequence = seq;
 
        amdgpu_job_free_resources(job);
+       amdgpu_ring_priority_get(job->ring,
+                                amd_sched_get_job_priority(&job->base));
 
        trace_amdgpu_cs_ioctl(job);
        amd_sched_entity_push_job(&job->base);
@@ -1189,7 +1207,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        union drm_amdgpu_cs *cs = data;
        struct amdgpu_cs_parser parser = {};
        bool reserved_buffers = false;
@@ -1197,8 +1214,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
        if (!adev->accel_working)
                return -EBUSY;
-       if (amdgpu_kms_vram_lost(adev, fpriv))
-               return -ENODEV;
 
        parser.adev = adev;
        parser.filp = filp;
@@ -1209,6 +1224,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                goto out;
        }
 
+       r = amdgpu_cs_ib_fill(adev, &parser);
+       if (r)
+               goto out;
+
        r = amdgpu_cs_parser_bos(&parser, data);
        if (r) {
                if (r == -ENOMEM)
@@ -1219,9 +1238,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        }
 
        reserved_buffers = true;
-       r = amdgpu_cs_ib_fill(adev, &parser);
-       if (r)
-               goto out;
 
        r = amdgpu_cs_dependencies(adev, &parser);
        if (r) {
@@ -1257,16 +1273,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 {
        union drm_amdgpu_wait_cs *wait = data;
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
        struct amdgpu_ring *ring = NULL;
        struct amdgpu_ctx *ctx;
        struct dma_fence *fence;
        long r;
 
-       if (amdgpu_kms_vram_lost(adev, fpriv))
-               return -ENODEV;
-
        ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
        if (ctx == NULL)
                return -EINVAL;
@@ -1284,6 +1296,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
                r = PTR_ERR(fence);
        else if (fence) {
                r = dma_fence_wait_timeout(fence, true, timeout);
+               if (r > 0 && fence->error)
+                       r = fence->error;
                dma_fence_put(fence);
        } else
                r = 1;
@@ -1335,16 +1349,12 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        union drm_amdgpu_fence_to_handle *info = data;
        struct dma_fence *fence;
        struct drm_syncobj *syncobj;
        struct sync_file *sync_file;
        int fd, r;
 
-       if (amdgpu_kms_vram_lost(adev, fpriv))
-               return -ENODEV;
-
        fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
        if (IS_ERR(fence))
                return PTR_ERR(fence);
@@ -1425,6 +1435,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
 
                if (r == 0)
                        break;
+
+               if (fence->error)
+                       return fence->error;
        }
 
        memset(wait, 0, sizeof(*wait));
@@ -1485,7 +1498,7 @@ out:
        wait->out.status = (r > 0);
        wait->out.first_signaled = first;
        /* set return value 0 to indicate success */
-       r = 0;
+       r = array[first]->error;
 
 err_free_fence_array:
        for (i = 0; i < fence_count; i++)
@@ -1506,15 +1519,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        union drm_amdgpu_wait_fences *wait = data;
        uint32_t fence_count = wait->in.fence_count;
        struct drm_amdgpu_fence *fences_user;
        struct drm_amdgpu_fence *fences;
        int r;
 
-       if (amdgpu_kms_vram_lost(adev, fpriv))
-               return -ENODEV;
        /* Get the fences from userspace */
        fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
                        GFP_KERNEL);
@@ -1572,14 +1582,14 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
        if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
                return -EINVAL;
 
-       r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
-       if (unlikely(r))
-               return r;
-
-       if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
-               return 0;
+       if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+               (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+               amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+               r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
+                                   false);
+               if (r)
+                       return r;
+       }
 
-       (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
-       amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
-       return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
+       return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
 }
index 75c933b1a4326532a76d023cbaa242b84bbed015..c184468e2b2b31cc196c9494e94ce31538091ae4 100644 (file)
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_auth.h>
 #include "amdgpu.h"
+#include "amdgpu_sched.h"
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+                                     enum amd_sched_priority priority)
+{
+       /* NORMAL and below are accessible by everyone */
+       if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+               return 0;
+
+       if (capable(CAP_SYS_NICE))
+               return 0;
+
+       if (drm_is_current_master(filp))
+               return 0;
+
+       return -EACCES;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+                          enum amd_sched_priority priority,
+                          struct drm_file *filp,
+                          struct amdgpu_ctx *ctx)
 {
        unsigned i, j;
        int r;
 
+       if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+               return -EINVAL;
+
+       r = amdgpu_ctx_priority_permit(filp, priority);
+       if (r)
+               return r;
+
        memset(ctx, 0, sizeof(*ctx));
        ctx->adev = adev;
        kref_init(&ctx->refcount);
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
        if (!ctx->fences)
                return -ENOMEM;
 
+       mutex_init(&ctx->lock);
+
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                ctx->rings[i].sequence = 1;
                ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
        }
 
        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+       ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+       ctx->init_priority = priority;
+       ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
 
        /* create context entity for each ring */
        for (i = 0; i < adev->num_rings; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
                struct amd_sched_rq *rq;
 
-               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+               rq = &ring->sched.sched_rq[priority];
 
                if (ring == &adev->gfx.kiq.ring)
                        continue;
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
                                      &ctx->rings[i].entity);
 
        amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+       mutex_destroy(&ctx->lock);
 }
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                            struct amdgpu_fpriv *fpriv,
+                           struct drm_file *filp,
+                           enum amd_sched_priority priority,
                            uint32_t *id)
 {
        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                kfree(ctx);
                return r;
        }
+
        *id = (uint32_t)r;
-       r = amdgpu_ctx_init(adev, ctx);
+       r = amdgpu_ctx_init(adev, priority, filp, ctx);
        if (r) {
                idr_remove(&mgr->ctx_handles, *id);
                *id = 0;
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 {
        int r;
        uint32_t id;
+       enum amd_sched_priority priority;
 
        union drm_amdgpu_ctx *args = data;
        struct amdgpu_device *adev = dev->dev_private;
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 
        r = 0;
        id = args->in.ctx_id;
+       priority = amdgpu_to_sched_priority(args->in.priority);
+
+       /* For backwards compatibility reasons, we need to accept
+        * ioctls with garbage in the priority field */
+       if (priority == AMD_SCHED_PRIORITY_INVALID)
+               priority = AMD_SCHED_PRIORITY_NORMAL;
 
        switch (args->in.op) {
        case AMDGPU_CTX_OP_ALLOC_CTX:
-               r = amdgpu_ctx_alloc(adev, fpriv, &id);
+               r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
                args->out.alloc.ctx_id = id;
                break;
        case AMDGPU_CTX_OP_FREE_CTX:
@@ -256,12 +301,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 
        idx = seq & (amdgpu_sched_jobs - 1);
        other = cring->fences[idx];
-       if (other) {
-               signed long r;
-               r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
-               if (r < 0)
-                       return r;
-       }
+       if (other)
+               BUG_ON(!dma_fence_is_signaled(other));
 
        dma_fence_get(fence);
 
@@ -305,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
        return fence;
 }
 
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+                                 enum amd_sched_priority priority)
+{
+       int i;
+       struct amdgpu_device *adev = ctx->adev;
+       struct amd_sched_rq *rq;
+       struct amd_sched_entity *entity;
+       struct amdgpu_ring *ring;
+       enum amd_sched_priority ctx_prio;
+
+       ctx->override_priority = priority;
+
+       ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+                       ctx->init_priority : ctx->override_priority;
+
+       for (i = 0; i < adev->num_rings; i++) {
+               ring = adev->rings[i];
+               entity = &ctx->rings[i].entity;
+               rq = &ring->sched.sched_rq[ctx_prio];
+
+               if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+                       continue;
+
+               amd_sched_entity_set_rq(entity, rq);
+       }
+}
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+{
+       struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
+       unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
+       struct dma_fence *other = cring->fences[idx];
+
+       if (other) {
+               signed long r;
+               r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+               if (r < 0) {
+                       DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+                       return r;
+               }
+       }
+
+       return 0;
+}
+
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 {
        mutex_init(&mgr->lock);
index 1949d8aedf49ef2c4b0f0a59edd7efa7e486aa6c..0b9332e65a4c35552edf351fd1efc5503dd0599f 100644 (file)
@@ -109,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 {
        uint32_t ret;
 
-       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
-               BUG_ON(in_interrupt());
+       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_virt_kiq_rreg(adev, reg);
-       }
 
        if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -137,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                adev->last_mm_index = v;
        }
 
-       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
-               BUG_ON(in_interrupt());
+       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_virt_kiq_wreg(adev, reg, v);
-       }
 
        if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -657,6 +653,81 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
                        mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 }
 
+/*
+ * Firmware Reservation functions
+ */
+/**
+ * amdgpu_fw_reserve_vram_fini - free fw reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free fw reserved vram if it has been reserved.
+ */
+void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
+{
+       amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+               NULL, &adev->fw_vram_usage.va);
+}
+
+/**
+ * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+       int r = 0;
+       u64 gpu_addr;
+       u64 vram_size = adev->mc.visible_vram_size;
+
+       adev->fw_vram_usage.va = NULL;
+       adev->fw_vram_usage.reserved_bo = NULL;
+
+       if (adev->fw_vram_usage.size > 0 &&
+               adev->fw_vram_usage.size <= vram_size) {
+
+               r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
+                       PAGE_SIZE, true, 0,
+                       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+                       AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
+                       &adev->fw_vram_usage.reserved_bo);
+               if (r)
+                       goto error_create;
+
+               r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+               if (r)
+                       goto error_reserve;
+               r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+                       AMDGPU_GEM_DOMAIN_VRAM,
+                       adev->fw_vram_usage.start_offset,
+                       (adev->fw_vram_usage.start_offset +
+                       adev->fw_vram_usage.size), &gpu_addr);
+               if (r)
+                       goto error_pin;
+               r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+                       &adev->fw_vram_usage.va);
+               if (r)
+                       goto error_kmap;
+
+               amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+       }
+       return r;
+
+error_kmap:
+       amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+       amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+       amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+       adev->fw_vram_usage.va = NULL;
+       adev->fw_vram_usage.reserved_bo = NULL;
+       return r;
+}
+
+
 /*
  * GPU helpers function.
  */
@@ -1604,7 +1675,6 @@ static int amdgpu_init(struct amdgpu_device *adev)
                        return r;
                }
                adev->ip_blocks[i].status.sw = true;
-
                /* need to do gmc hw init early so we can allocate gpu mem */
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
                        r = amdgpu_vram_scratch_init(adev);
@@ -1635,11 +1705,6 @@ static int amdgpu_init(struct amdgpu_device *adev)
                }
        }
 
-       mutex_lock(&adev->firmware.mutex);
-       if (amdgpu_ucode_init_bo(adev))
-               adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
-       mutex_unlock(&adev->firmware.mutex);
-
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
@@ -1775,8 +1840,6 @@ static int amdgpu_fini(struct amdgpu_device *adev)
 
                adev->ip_blocks[i].status.hw = false;
        }
-       if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
-               amdgpu_ucode_fini_bo(adev);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
@@ -2019,6 +2082,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->vm_manager.vm_pte_num_rings = 0;
        adev->gart.gart_funcs = NULL;
        adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+       bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
        adev->smc_rreg = &amdgpu_invalid_rreg;
        adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -2047,6 +2111,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        mutex_init(&adev->pm.mutex);
        mutex_init(&adev->gfx.gpu_clock_mutex);
        mutex_init(&adev->srbm_mutex);
+       mutex_init(&adev->gfx.pipe_reserve_mutex);
        mutex_init(&adev->grbm_idx_mutex);
        mutex_init(&adev->mn_lock);
        mutex_init(&adev->virt.vf_errors.lock);
@@ -2223,6 +2288,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_virt_init_data_exchange(adev);
+
        amdgpu_fbdev_init(adev);
 
        r = amdgpu_pm_sysfs_init(adev);
@@ -2300,6 +2368,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
+       amdgpu_fw_reserve_vram_fini(adev);
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
@@ -2552,6 +2621,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
        int i;
        bool asic_hang = false;
 
+       if (amdgpu_sriov_vf(adev))
+               return true;
+
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
index ad02d3fbb44c99b52965ca56d4997f2e523cdb70..dd2f060d62a86306500ed9cb3dc55915d4e7afda 100644 (file)
  * - 3.19.0 - Add support for UVD MJPEG decode
  * - 3.20.0 - Add support for local BOs
  * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
+ * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
+ * - 3.23.0 - Add query for VRAM lost counter
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       21
+#define KMS_DRIVER_MINOR       23
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
index 333bad74906784f7ecc7a794c3d1d4e1dd915978..fb9f88ef6059f2a6e8d65a62f4ebe40d45d4331b 100644 (file)
@@ -168,6 +168,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
        return 0;
 }
 
+/**
+ * amdgpu_fence_emit_polling - emit a fence on the requeste ring
+ *
+ * @ring: ring the fence is associated with
+ * @s: resulting sequence number
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Used For polling fence.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+{
+       uint32_t seq;
+
+       if (!s)
+               return -EINVAL;
+
+       seq = ++ring->fence_drv.sync_seq;
+       amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+                              seq, AMDGPU_FENCE_FLAG_INT);
+
+       *s = seq;
+
+       return 0;
+}
+
 /**
  * amdgpu_fence_schedule_fallback - schedule fallback check
  *
@@ -281,6 +307,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
        return r;
 }
 
+/**
+ * amdgpu_fence_wait_polling - busy wait for givn sequence number
+ *
+ * @ring: ring index the fence is associated with
+ * @wait_seq: sequence number to wait
+ * @timeout: the timeout for waiting in usecs
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns left time if no timeout, 0 or minus if timeout.
+ */
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+                                     uint32_t wait_seq,
+                                     signed long timeout)
+{
+       uint32_t seq;
+
+       do {
+               seq = amdgpu_fence_read(ring);
+               udelay(5);
+               timeout -= 5;
+       } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
+
+       return timeout > 0 ? timeout : 0;
+}
 /**
  * amdgpu_fence_count_emitted - get the count of emitted fences
  *
@@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
                           atomic_read(&ring->fence_drv.last_seq));
                seq_printf(m, "Last emitted        0x%08x\n",
                           ring->fence_drv.sync_seq);
+
+               if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+                       continue;
+
+               /* set in CP_VMID_PREEMPT and preemption occurred */
+               seq_printf(m, "Last preempted      0x%08x\n",
+                          le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
+               /* set in CP_VMID_RESET and reset occurred */
+               seq_printf(m, "Last reset          0x%08x\n",
+                          le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
+               /* Both preemption and reset occurred */
+               seq_printf(m, "Last both           0x%08x\n",
+                          le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
        }
        return 0;
 }
index b0d45c8e6bb3f7762357f09cb3a570607cee5bb8..fb72edc4c0263659f39fa778995a14c40afcce79 100644 (file)
@@ -212,7 +212,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
                      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
-                     AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
+                     AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
+                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+
                return -EINVAL;
 
        /* reject invalid gem domains */
@@ -577,11 +579,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                        args->operation);
                return -EINVAL;
        }
-       if ((args->operation == AMDGPU_VA_OP_MAP) ||
-           (args->operation == AMDGPU_VA_OP_REPLACE)) {
-               if (amdgpu_kms_vram_lost(adev, fpriv))
-                       return -ENODEV;
-       }
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&duplicates);
index 83435ccbad44791a8da2591d3885b6fa77a0a0c1..ef043361009f4c2a8de0788beeed86aef288cd7a 100644 (file)
@@ -201,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        int r = 0;
 
-       mutex_init(&kiq->ring_mutex);
+       spin_lock_init(&kiq->ring_lock);
 
        r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
        if (r)
index 0d15eb7d31d7d0471b9253155c211bf5d3a641bc..33535d3477343bab044dba5c1cb171ed6befa475 100644 (file)
@@ -169,7 +169,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
        int r;
 
        spin_lock(&mgr->lock);
-       if (atomic64_read(&mgr->available) < mem->num_pages) {
+       if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
+           atomic64_read(&mgr->available) < mem->num_pages) {
                spin_unlock(&mgr->lock);
                return 0;
        }
@@ -244,8 +245,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
 {
        struct amdgpu_gtt_mgr *mgr = man->priv;
+       s64 result = man->size - atomic64_read(&mgr->available);
 
-       return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+       return (result > 0 ? result : 0) * PAGE_SIZE;
 }
 
 /**
@@ -265,7 +267,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
        drm_mm_print(&mgr->mm, printer);
        spin_unlock(&mgr->lock);
 
-       drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+       drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
                   man->size, (u64)atomic64_read(&mgr->available),
                   amdgpu_gtt_mgr_usage(man) >> 20);
 }
index 4510627ae83e9b57e19dccfe19f260da00f918f2..0cfc68db575b1ec8c220060154e2485ff8cecf78 100644 (file)
@@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        amdgpu_sync_create(&(*job)->sync);
        amdgpu_sync_create(&(*job)->dep_sync);
        amdgpu_sync_create(&(*job)->sched_sync);
+       (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
 
        return 0;
 }
@@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+       amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->dep_sync);
@@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
        job->fence_ctx = entity->fence_context;
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
+       amdgpu_ring_priority_get(job->ring,
+                                amd_sched_get_job_priority(&job->base));
        amd_sched_entity_push_job(&job->base);
 
        return 0;
@@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_device *adev;
        struct amdgpu_job *job;
-       struct amdgpu_fpriv *fpriv = NULL;
        int r;
 
        if (!sched_job) {
@@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
                return NULL;
        }
        job = to_amdgpu_job(sched_job);
+       adev = job->adev;
 
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
-       if (job->vm)
-               fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
        /* skip ib schedule when vram is lost */
-       if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
+       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
+               dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
                DRM_ERROR("Skip scheduling IBs!\n");
-       else {
-               r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
+       } else {
+               r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+                                      &fence);
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
        }
        /* if gpu reset, hw fence will be replaced here */
        dma_fence_put(job->fence);
        job->fence = dma_fence_get(fence);
+
        amdgpu_job_free_resources(job);
        return fence;
 }
index 51841259e23f86f492eaf62ba69557d7670def9d..6f0b26dae3b0009ee127929186ef43d140b87bcb 100644 (file)
@@ -28,6 +28,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include <drm/amdgpu_drm.h>
+#include "amdgpu_sched.h"
 #include "amdgpu_uvd.h"
 #include "amdgpu_vce.h"
 
@@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
        struct drm_amdgpu_info *info = data;
        struct amdgpu_mode_info *minfo = &adev->mode_info;
        void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
        if (!info->return_size || !info->return_pointer)
                return -EINVAL;
-       if (amdgpu_kms_vram_lost(adev, fpriv))
-               return -ENODEV;
 
        switch (info->query) {
        case AMDGPU_INFO_ACCEL_WORKING:
@@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                }
                return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
        }
+       case AMDGPU_INFO_VRAM_LOST_COUNTER:
+               ui32 = atomic_read(&adev->vram_lost_counter);
+               return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->query);
                return -EINVAL;
@@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
        vga_switcheroo_process_delayed_switch();
 }
 
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
-                         struct amdgpu_fpriv *fpriv)
-{
-       return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
-}
-
 /**
  * amdgpu_driver_open_kms - drm callback for open
  *
@@ -853,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 
        amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
 
-       fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
        file_priv->driver_priv = fpriv;
 
 out_suspend:
@@ -1023,6 +1017,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
        DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        /* KMS */
index 6982baeccd1491737b027a33a6e77364b0811e8f..8b4ed8a98a18f9393f1edb9e7ea3b13eb53489fe 100644 (file)
@@ -40,9 +40,7 @@
 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
-       struct amdgpu_bo *bo;
-
-       bo = container_of(tbo, struct amdgpu_bo, tbo);
+       struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
 
        amdgpu_bo_kunmap(bo);
 
@@ -884,7 +882,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
                return;
 
-       abo = container_of(bo, struct amdgpu_bo, tbo);
+       abo = ttm_to_amdgpu_bo(bo);
        amdgpu_vm_bo_invalidate(adev, abo, evict);
 
        amdgpu_bo_kunmap(abo);
@@ -911,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
                return 0;
 
-       abo = container_of(bo, struct amdgpu_bo, tbo);
+       abo = ttm_to_amdgpu_bo(bo);
 
        /* Remember that this BO was accessed by the CPU */
        abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
index 39b6bf6fb051fe2ee890f06d6a1066e9db4f58c2..428aae048f4b19bb2f31dbb168a9a12cb6ae5a2b 100644 (file)
@@ -94,6 +94,11 @@ struct amdgpu_bo {
        };
 };
 
+static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+{
+       return container_of(tbo, struct amdgpu_bo, tbo);
+}
+
 /**
  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
  * @mem_type:  ttm memory type
@@ -188,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
        }
 }
 
+/**
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
+ */
+static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+{
+       return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+}
+
 int amdgpu_bo_create(struct amdgpu_device *adev,
                            unsigned long size, int byte_align,
                            bool kernel, u32 domain, u64 flags,
index 3b42f407971d29d0b2c59bcbf5cd661945e8ccb1..5f5aa5fddc169355077a4e61665563c087d860f5 100644 (file)
@@ -145,6 +145,8 @@ static int amdgpu_pp_hw_init(void *handle)
        int ret = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+               amdgpu_ucode_init_bo(adev);
 
        if (adev->powerplay.ip_funcs->hw_init)
                ret = adev->powerplay.ip_funcs->hw_init(
@@ -162,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle)
                ret = adev->powerplay.ip_funcs->hw_fini(
                                        adev->powerplay.pp_handle);
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+               amdgpu_ucode_fini_bo(adev);
+
        return ret;
 }
 
index f1035a689d354349aaac7b7b79e1eaa51cf6e4a8..447d446b50150d475cb9a01945706b17bbfc2e78 100644 (file)
@@ -411,6 +411,13 @@ static int psp_hw_init(void *handle)
                return 0;
 
        mutex_lock(&adev->firmware.mutex);
+       /*
+        * This sequence is just used on hw_init only once, no need on
+        * resume.
+        */
+       ret = amdgpu_ucode_init_bo(adev);
+       if (ret)
+               goto failed;
 
        ret = psp_load_fw(adev);
        if (ret) {
@@ -435,6 +442,8 @@ static int psp_hw_fini(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
+       amdgpu_ucode_fini_bo(adev);
+
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
index 019932a7ea3a2bdd761ef4358d77fd44065fea17..e5ece1fae149a3e060f28230301e090a6e56e684 100644 (file)
@@ -154,6 +154,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
                ring->funcs->end_use(ring);
 }
 
+/**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority)
+{
+       int i;
+
+       if (!ring->funcs->set_priority)
+               return;
+
+       if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+               return;
+
+       /* no need to restore if the job is already at the lowest priority */
+       if (priority == AMD_SCHED_PRIORITY_NORMAL)
+               return;
+
+       mutex_lock(&ring->priority_mutex);
+       /* something higher prio is executing, no need to decay */
+       if (ring->priority > priority)
+               goto out_unlock;
+
+       /* decay priority to the next level with a job available */
+       for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+               if (i == AMD_SCHED_PRIORITY_NORMAL
+                               || atomic_read(&ring->num_jobs[i])) {
+                       ring->priority = i;
+                       ring->funcs->set_priority(ring, i);
+                       break;
+               }
+       }
+
+out_unlock:
+       mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority)
+{
+       if (!ring->funcs->set_priority)
+               return;
+
+       atomic_inc(&ring->num_jobs[priority]);
+
+       mutex_lock(&ring->priority_mutex);
+       if (priority <= ring->priority)
+               goto out_unlock;
+
+       ring->priority = priority;
+       ring->funcs->set_priority(ring, priority);
+
+out_unlock:
+       mutex_unlock(&ring->priority_mutex);
+}
+
 /**
  * amdgpu_ring_init - init driver ring struct.
  *
@@ -169,7 +238,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned max_dw, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type)
 {
-       int r;
+       int r, i;
        int sched_hw_submission = amdgpu_sched_hw_submission;
 
        /* Set the hw submission limit higher for KIQ because
@@ -247,9 +316,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
        }
 
        ring->max_dw = max_dw;
+       ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+       mutex_init(&ring->priority_mutex);
        INIT_LIST_HEAD(&ring->lru_list);
        amdgpu_ring_lru_touch(adev, ring);
 
+       for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+               atomic_set(&ring->num_jobs[i], 0);
+
        if (amdgpu_debugfs_ring_init(adev, ring)) {
                DRM_ERROR("Failed to register debugfs file for rings !\n");
        }
index 491bd5512dcc50c54a1dc0cd33ca2b4005f3827a..b18c2b96691f72f071df199b96db997c741700ce 100644 (file)
@@ -24,6 +24,7 @@
 #ifndef __AMDGPU_RING_H__
 #define __AMDGPU_RING_H__
 
+#include <drm/amdgpu_drm.h>
 #include "gpu_scheduler.h"
 
 /* max number of rings */
@@ -56,6 +57,7 @@ struct amdgpu_device;
 struct amdgpu_ring;
 struct amdgpu_ib;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 
 /*
  * Fences.
@@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+                                     uint32_t wait_seq,
+                                     signed long timeout);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 /*
@@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
        void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
        void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
        void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+       /* priority functions */
+       void (*set_priority) (struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
 };
 
 struct amdgpu_ring {
@@ -187,6 +196,12 @@ struct amdgpu_ring {
        volatile u32            *cond_exe_cpu_addr;
        unsigned                vm_inv_eng;
        bool                    has_compute_vm_bug;
+
+       atomic_t                num_jobs[AMD_SCHED_PRIORITY_MAX];
+       struct mutex            priority_mutex;
+       /* protected by priority_mutex */
+       int                     priority;
+
 #if defined(CONFIG_DEBUG_FS)
        struct dentry *ent;
 #endif
@@ -197,6 +212,10 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+                             enum amd_sched_priority priority);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned ring_size, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644 (file)
index 0000000..290cc3f
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#include <linux/fdtable.h>
+#include <linux/pid.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#include "amdgpu_vm.h"
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+{
+       switch (amdgpu_priority) {
+       case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+               return AMD_SCHED_PRIORITY_HIGH_HW;
+       case AMDGPU_CTX_PRIORITY_HIGH:
+               return AMD_SCHED_PRIORITY_HIGH_SW;
+       case AMDGPU_CTX_PRIORITY_NORMAL:
+               return AMD_SCHED_PRIORITY_NORMAL;
+       case AMDGPU_CTX_PRIORITY_LOW:
+       case AMDGPU_CTX_PRIORITY_VERY_LOW:
+               return AMD_SCHED_PRIORITY_LOW;
+       case AMDGPU_CTX_PRIORITY_UNSET:
+               return AMD_SCHED_PRIORITY_UNSET;
+       default:
+               WARN(1, "Invalid context priority %d\n", amdgpu_priority);
+               return AMD_SCHED_PRIORITY_INVALID;
+       }
+}
+
+static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+                                                 int fd,
+                                                 enum amd_sched_priority priority)
+{
+       struct file *filp = fcheck(fd);
+       struct drm_file *file;
+       struct pid *pid;
+       struct amdgpu_fpriv *fpriv;
+       struct amdgpu_ctx *ctx;
+       uint32_t id;
+
+       if (!filp)
+               return -EINVAL;
+
+       pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+
+       mutex_lock(&adev->ddev->filelist_mutex);
+       list_for_each_entry(file, &adev->ddev->filelist, lhead) {
+               if (file->pid != pid)
+                       continue;
+
+               fpriv = file->driver_priv;
+               idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+                               amdgpu_ctx_priority_override(ctx, priority);
+       }
+       mutex_unlock(&adev->ddev->filelist_mutex);
+
+       put_pid(pid);
+
+       return 0;
+}
+
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *filp)
+{
+       union drm_amdgpu_sched *args = data;
+       struct amdgpu_device *adev = dev->dev_private;
+       enum amd_sched_priority priority;
+       int r;
+
+       priority = amdgpu_to_sched_priority(args->in.priority);
+       if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+               return -EINVAL;
+
+       switch (args->in.op) {
+       case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+               r = amdgpu_sched_process_priority_override(adev,
+                                                          args->in.fd,
+                                                          priority);
+               break;
+       default:
+               DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+               r = -EINVAL;
+               break;
+       }
+
+       return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
new file mode 100644 (file)
index 0000000..b28c067
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#ifndef __AMDGPU_SCHED_H__
+#define __AMDGPU_SCHED_H__
+
+#include <drm/drmP.h>
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *filp);
+
+#endif // __AMDGPU_SCHED_H__
index c586f44312f9772fb93f8b1442827c842d610594..a4bf21f8f1c187e9c7f45bfc4c30171760647eea 100644 (file)
@@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  *
  * @sync: sync object to add fences from reservation object to
  * @resv: reservation object with embedded fence
- * @shared: true if we should only sync to the exclusive fence
+ * @explicit_sync: true if we should only sync to the exclusive fence
  *
  * Sync to the fence
  */
 int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
-                    void *owner)
+                    void *owner, bool explicit_sync)
 {
        struct reservation_object_list *flist;
        struct dma_fence *f;
@@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        f = reservation_object_get_excl(resv);
        r = amdgpu_sync_fence(adev, sync, f);
 
+       if (explicit_sync)
+               return r;
+
        flist = reservation_object_get_list(resv);
        if (!flist || r)
                return r;
index dc7687993317041546db1a559489095b8011be27..70d7e3a279a052c9be7453311886d985751417b6 100644 (file)
@@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
 int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
-                    void *owner);
+                    void *owner,
+                    bool explicit_sync);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
                                     struct amdgpu_ring *ring);
 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
index 15a28578d4585bcb1523b70ed24d34cfcf7cb71c..51eacefadea11e23ddf9325e89d01671a45ef36b 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/debugfs.h>
 #include <linux/iommu.h>
 #include "amdgpu.h"
+#include "amdgpu_object.h"
 #include "amdgpu_trace.h"
 #include "bif/bif_4_1_d.h"
 
@@ -209,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                placement->num_busy_placement = 1;
                return;
        }
-       abo = container_of(bo, struct amdgpu_bo, tbo);
+       abo = ttm_to_amdgpu_bo(bo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
                if (adev->mman.buffer_funcs &&
@@ -257,7 +258,7 @@ gtt:
 
 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-       struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 
        if (amdgpu_ttm_tt_get_usermm(bo->ttm))
                return -EPERM;
@@ -289,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
        return addr;
 }
 
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
-                           bool evict, bool no_wait_gpu,
-                           struct ttm_mem_reg *new_mem,
-                           struct ttm_mem_reg *old_mem)
+/**
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+ *  corresponding to @offset. It also modifies the offset to be
+ *  within the drm_mm_node returned
+ */
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+                                              unsigned long *offset)
 {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
-       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+       struct drm_mm_node *mm_node = mem->mm_node;
 
-       struct drm_mm_node *old_mm, *new_mm;
-       uint64_t old_start, old_size, new_start, new_size;
-       unsigned long num_pages;
-       struct dma_fence *fence = NULL;
-       int r;
+       while (*offset >= (mm_node->size << PAGE_SHIFT)) {
+               *offset -= (mm_node->size << PAGE_SHIFT);
+               ++mm_node;
+       }
+       return mm_node;
+}
 
-       BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+/**
+ * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ *
+ * The function copies @size bytes from {src->mem + src->offset} to
+ * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
+ * move and different for a BO to BO copy.
+ *
+ * @f: Returns the last fence if multiple jobs are submitted.
+ */
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+                              struct amdgpu_copy_mem *src,
+                              struct amdgpu_copy_mem *dst,
+                              uint64_t size,
+                              struct reservation_object *resv,
+                              struct dma_fence **f)
+{
+       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+       struct drm_mm_node *src_mm, *dst_mm;
+       uint64_t src_node_start, dst_node_start, src_node_size,
+                dst_node_size, src_page_offset, dst_page_offset;
+       struct dma_fence *fence = NULL;
+       int r = 0;
+       const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+                                       AMDGPU_GPU_PAGE_SIZE);
 
        if (!ring->ready) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
 
-       old_mm = old_mem->mm_node;
-       old_size = old_mm->size;
-       old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
+       src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
+       src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
+                                            src->offset;
+       src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
+       src_page_offset = src_node_start & (PAGE_SIZE - 1);
 
-       new_mm = new_mem->mm_node;
-       new_size = new_mm->size;
-       new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
+       dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
+       dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
+                                            dst->offset;
+       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
+       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
 
-       num_pages = new_mem->num_pages;
        mutex_lock(&adev->mman.gtt_window_lock);
-       while (num_pages) {
-               unsigned long cur_pages = min(min(old_size, new_size),
-                                             (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
-               uint64_t from = old_start, to = new_start;
+
+       while (size) {
+               unsigned long cur_size;
+               uint64_t from = src_node_start, to = dst_node_start;
                struct dma_fence *next;
 
-               if (old_mem->mem_type == TTM_PL_TT &&
-                   !amdgpu_gtt_mgr_is_allocated(old_mem)) {
-                       r = amdgpu_map_buffer(bo, old_mem, cur_pages,
-                                             old_start, 0, ring, &from);
+               /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
+                * begins at an offset, then adjust the size accordingly
+                */
+               cur_size = min3(min(src_node_size, dst_node_size), size,
+                               GTT_MAX_BYTES);
+               if (cur_size + src_page_offset > GTT_MAX_BYTES ||
+                   cur_size + dst_page_offset > GTT_MAX_BYTES)
+                       cur_size -= max(src_page_offset, dst_page_offset);
+
+               /* Map only what needs to be accessed. Map src to window 0 and
+                * dst to window 1
+                */
+               if (src->mem->mem_type == TTM_PL_TT &&
+                   !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+                       r = amdgpu_map_buffer(src->bo, src->mem,
+                                       PFN_UP(cur_size + src_page_offset),
+                                       src_node_start, 0, ring,
+                                       &from);
                        if (r)
                                goto error;
+                       /* Adjust the offset because amdgpu_map_buffer returns
+                        * start of mapped page
+                        */
+                       from += src_page_offset;
                }
 
-               if (new_mem->mem_type == TTM_PL_TT &&
-                   !amdgpu_gtt_mgr_is_allocated(new_mem)) {
-                       r = amdgpu_map_buffer(bo, new_mem, cur_pages,
-                                             new_start, 1, ring, &to);
+               if (dst->mem->mem_type == TTM_PL_TT &&
+                   !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+                       r = amdgpu_map_buffer(dst->bo, dst->mem,
+                                       PFN_UP(cur_size + dst_page_offset),
+                                       dst_node_start, 1, ring,
+                                       &to);
                        if (r)
                                goto error;
+                       to += dst_page_offset;
                }
 
-               r = amdgpu_copy_buffer(ring, from, to,
-                                      cur_pages * PAGE_SIZE,
-                                      bo->resv, &next, false, true);
+               r = amdgpu_copy_buffer(ring, from, to, cur_size,
+                                      resv, &next, false, true);
                if (r)
                        goto error;
 
                dma_fence_put(fence);
                fence = next;
 
-               num_pages -= cur_pages;
-               if (!num_pages)
+               size -= cur_size;
+               if (!size)
                        break;
 
-               old_size -= cur_pages;
-               if (!old_size) {
-                       old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
-                       old_size = old_mm->size;
+               src_node_size -= cur_size;
+               if (!src_node_size) {
+                       src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
+                                                            src->mem);
+                       src_node_size = (src_mm->size << PAGE_SHIFT);
                } else {
-                       old_start += cur_pages * PAGE_SIZE;
+                       src_node_start += cur_size;
+                       src_page_offset = src_node_start & (PAGE_SIZE - 1);
                }
-
-               new_size -= cur_pages;
-               if (!new_size) {
-                       new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
-                       new_size = new_mm->size;
+               dst_node_size -= cur_size;
+               if (!dst_node_size) {
+                       dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
+                                                            dst->mem);
+                       dst_node_size = (dst_mm->size << PAGE_SHIFT);
                } else {
-                       new_start += cur_pages * PAGE_SIZE;
+                       dst_node_start += cur_size;
+                       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
                }
        }
+error:
        mutex_unlock(&adev->mman.gtt_window_lock);
+       if (f)
+               *f = dma_fence_get(fence);
+       dma_fence_put(fence);
+       return r;
+}
+
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+                           bool evict, bool no_wait_gpu,
+                           struct ttm_mem_reg *new_mem,
+                           struct ttm_mem_reg *old_mem)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct amdgpu_copy_mem src, dst;
+       struct dma_fence *fence = NULL;
+       int r;
+
+       src.bo = bo;
+       dst.bo = bo;
+       src.mem = old_mem;
+       dst.mem = new_mem;
+       src.offset = 0;
+       dst.offset = 0;
+
+       r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+                                      new_mem->num_pages << PAGE_SHIFT,
+                                      bo->resv, &fence);
+       if (r)
+               goto error;
 
        r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
        dma_fence_put(fence);
        return r;
 
 error:
-       mutex_unlock(&adev->mman.gtt_window_lock);
-
        if (fence)
                dma_fence_wait(fence, false);
        dma_fence_put(fence);
@@ -484,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
        int r;
 
        /* Can't move a pinned BO */
-       abo = container_of(bo, struct amdgpu_bo, tbo);
+       abo = ttm_to_amdgpu_bo(bo);
        if (WARN_ON_ONCE(abo->pin_count > 0))
                return -EINVAL;
 
@@ -582,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
 {
-       struct drm_mm_node *mm = bo->mem.mm_node;
-       uint64_t size = mm->size;
-       uint64_t offset = page_offset;
+       struct drm_mm_node *mm;
+       unsigned long offset = (page_offset << PAGE_SHIFT);
 
-       page_offset = do_div(offset, size);
-       mm += offset;
-       return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+       mm = amdgpu_find_mm_node(&bo->mem, &offset);
+       return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
+               (offset >> PAGE_SHIFT);
 }
 
 /*
@@ -1142,9 +1222,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
                                    unsigned long offset,
                                    void *buf, int len, int write)
 {
-       struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
-       struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+       struct drm_mm_node *nodes;
        uint32_t value = 0;
        int ret = 0;
        uint64_t pos;
@@ -1153,10 +1233,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        if (bo->mem.mem_type != TTM_PL_VRAM)
                return -EIO;
 
-       while (offset >= (nodes->size << PAGE_SHIFT)) {
-               offset -= nodes->size << PAGE_SHIFT;
-               ++nodes;
-       }
+       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
        pos = (nodes->start << PAGE_SHIFT) + offset;
 
        while (len && pos < adev->mc.mc_vram_size) {
@@ -1255,6 +1332,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        /* Change the size here instead of the init above so only lpfn is affected */
        amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
+       /*
+        *The reserved vram for firmware must be pinned to the specified
+        *place on the VRAM, so reserve it early.
+        */
+       r = amdgpu_fw_reserve_vram_init(adev);
+       if (r) {
+               return r;
+       }
+
        r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
                                    AMDGPU_GEM_DOMAIN_VRAM,
                                    &adev->stolen_vga_memory,
@@ -1479,7 +1565,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        job->vm_needs_flush = vm_needs_flush;
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
+                                    AMDGPU_FENCE_OWNER_UNDEFINED,
+                                    false);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
@@ -1571,7 +1658,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
        if (resv) {
                r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
+                                    AMDGPU_FENCE_OWNER_UNDEFINED, false);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
                        goto error_free;
index 7abae6867339cc9f64df21ca4ee32f8971694949..abd4084982a3a14e1303046b479efd679f8396dd 100644 (file)
@@ -58,6 +58,12 @@ struct amdgpu_mman {
        struct amd_sched_entity                 entity;
 };
 
+struct amdgpu_copy_mem {
+       struct ttm_buffer_object        *bo;
+       struct ttm_mem_reg              *mem;
+       unsigned long                   offset;
+};
+
 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
 
@@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       struct reservation_object *resv,
                       struct dma_fence **fence, bool direct_submit,
                       bool vm_needs_flush);
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+                              struct amdgpu_copy_mem *src,
+                              struct amdgpu_copy_mem *dst,
+                              uint64_t size,
+                              struct reservation_object *resv,
+                              struct dma_fence **f);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint64_t src_data,
                        struct reservation_object *resv,
index ab05121b9272b9f11bf7ab52aa0ca085149aeeca..e97f80f860050edf7f874cfceec083427cb4fd4a 100644 (file)
@@ -22,7 +22,7 @@
  */
 
 #include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT       100000
+#define MAX_KIQ_REG_WAIT       100000000 /* in usecs */
 
 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
 {
@@ -114,27 +114,24 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 {
        signed long r;
-       uint32_t val;
-       struct dma_fence *f;
+       uint32_t val, seq;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_rreg);
 
-       mutex_lock(&kiq->ring_mutex);
+       spin_lock(&kiq->ring_lock);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_rreg(ring, reg);
-       amdgpu_fence_emit(ring, &f);
+       amdgpu_fence_emit_polling(ring, &seq);
        amdgpu_ring_commit(ring);
-       mutex_unlock(&kiq->ring_mutex);
+       spin_unlock(&kiq->ring_lock);
 
-       r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
-       dma_fence_put(f);
+       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
        if (r < 1) {
-               DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+               DRM_ERROR("wait for kiq fence error: %ld\n", r);
                return ~0;
        }
-
        val = adev->wb.wb[adev->virt.reg_val_offs];
 
        return val;
@@ -143,23 +140,22 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 {
        signed long r;
-       struct dma_fence *f;
+       uint32_t seq;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_wreg);
 
-       mutex_lock(&kiq->ring_mutex);
+       spin_lock(&kiq->ring_lock);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_wreg(ring, reg, v);
-       amdgpu_fence_emit(ring, &f);
+       amdgpu_fence_emit_polling(ring, &seq);
        amdgpu_ring_commit(ring);
-       mutex_unlock(&kiq->ring_mutex);
+       spin_unlock(&kiq->ring_lock);
 
-       r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
+       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
        if (r < 1)
-               DRM_ERROR("wait for kiq fence error: %ld.\n", r);
-       dma_fence_put(f);
+               DRM_ERROR("wait for kiq fence error: %ld\n", r);
 }
 
 /**
@@ -274,3 +270,78 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
                              (void *)&adev->virt.mm_table.cpu_addr);
        adev->virt.mm_table.gpu_addr = 0;
 }
+
+
+int amdgpu_virt_fw_reserve_get_checksum(void *obj,
+                                       unsigned long obj_size,
+                                       unsigned int key,
+                                       unsigned int chksum)
+{
+       unsigned int ret = key;
+       unsigned long i = 0;
+       unsigned char *pos;
+
+       pos = (char *)obj;
+       /* calculate checksum */
+       for (i = 0; i < obj_size; ++i)
+               ret += *(pos + i);
+       /* minus the chksum itself */
+       pos = (char *)&chksum;
+       for (i = 0; i < sizeof(chksum); ++i)
+               ret -= *(pos + i);
+       return ret;
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
+       uint32_t pf2vf_ver = 0;
+       uint32_t pf2vf_size = 0;
+       uint32_t checksum = 0;
+       uint32_t checkval;
+       char *str;
+
+       adev->virt.fw_reserve.p_pf2vf = NULL;
+       adev->virt.fw_reserve.p_vf2pf = NULL;
+
+       if (adev->fw_vram_usage.va != NULL) {
+               adev->virt.fw_reserve.p_pf2vf =
+                       (struct amdgim_pf2vf_info_header *)(
+                       adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
+               pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
+               AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
+               AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
+
+               /* pf2vf message must be in 4K */
+               if (pf2vf_size > 0 && pf2vf_size < 4096) {
+                       checkval = amdgpu_virt_fw_reserve_get_checksum(
+                               adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
+                               adev->virt.fw_reserve.checksum_key, checksum);
+                       if (checkval == checksum) {
+                               adev->virt.fw_reserve.p_vf2pf =
+                                       ((void *)adev->virt.fw_reserve.p_pf2vf +
+                                       pf2vf_size);
+                               memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
+                                       sizeof(amdgim_vf2pf_info));
+                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
+                                       AMDGPU_FW_VRAM_VF2PF_VER);
+                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
+                                       sizeof(amdgim_vf2pf_info));
+                               AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
+                                       &str);
+                               if (THIS_MODULE->version != NULL)
+                                       strcpy(str, THIS_MODULE->version);
+                               else
+                                       strcpy(str, "N/A");
+                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
+                                       0);
+                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
+                                       amdgpu_virt_fw_reserve_get_checksum(
+                                       adev->virt.fw_reserve.p_vf2pf,
+                                       pf2vf_size,
+                                       adev->virt.fw_reserve.checksum_key, 0));
+                       }
+               }
+       }
+}
+
+
index e5fd0ff6b29d84ad71bfbd817fb7383e773282e8..b89d37fc406f4cea88ada380c84158ac6d4cef98 100644 (file)
@@ -58,6 +58,179 @@ struct amdgpu_virt_ops {
        void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
 };
 
+/*
+ * Firmware Reserve Frame buffer
+ */
+struct amdgpu_virt_fw_reserve {
+       struct amdgim_pf2vf_info_header *p_pf2vf;
+       struct amdgim_vf2pf_info_header *p_vf2pf;
+       unsigned int checksum_key;
+};
+/*
+ * Defination between PF and VF
+ * Structures forcibly aligned to 4 to keep the same style as PF.
+ */
+#define AMDGIM_DATAEXCHANGE_OFFSET             (64 * 1024)
+
+#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
+               (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
+
+enum AMDGIM_FEATURE_FLAG {
+       /* GIM supports feature of Error log collecting */
+       AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
+       /* GIM supports feature of loading uCodes */
+       AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
+};
+
+struct amdgim_pf2vf_info_header {
+       /* the total structure size in byte. */
+       uint32_t size;
+       /* version of this structure, written by the GIM */
+       uint32_t version;
+} __aligned(4);
+struct  amdgim_pf2vf_info_v1 {
+       /* header contains size and version */
+       struct amdgim_pf2vf_info_header header;
+       /* max_width * max_height */
+       unsigned int uvd_enc_max_pixels_count;
+       /* 16x16 pixels/sec, codec independent */
+       unsigned int uvd_enc_max_bandwidth;
+       /* max_width * max_height */
+       unsigned int vce_enc_max_pixels_count;
+       /* 16x16 pixels/sec, codec independent */
+       unsigned int vce_enc_max_bandwidth;
+       /* MEC FW position in kb from the start of visible frame buffer */
+       unsigned int mecfw_kboffset;
+       /* The features flags of the GIM driver supports. */
+       unsigned int feature_flags;
+       /* use private key from mailbox 2 to create chueksum */
+       unsigned int checksum;
+} __aligned(4);
+
+struct  amdgim_pf2vf_info_v2 {
+       /* header contains size and version */
+       struct amdgim_pf2vf_info_header header;
+       /* use private key from mailbox 2 to create chueksum */
+       uint32_t checksum;
+       /* The features flags of the GIM driver supports. */
+       uint32_t feature_flags;
+       /* max_width * max_height */
+       uint32_t uvd_enc_max_pixels_count;
+       /* 16x16 pixels/sec, codec independent */
+       uint32_t uvd_enc_max_bandwidth;
+       /* max_width * max_height */
+       uint32_t vce_enc_max_pixels_count;
+       /* 16x16 pixels/sec, codec independent */
+       uint32_t vce_enc_max_bandwidth;
+       /* MEC FW position in kb from the start of VF visible frame buffer */
+       uint64_t mecfw_kboffset;
+       /* MEC FW size in KB */
+       uint32_t mecfw_ksize;
+       /* UVD FW position in kb from the start of VF visible frame buffer */
+       uint64_t uvdfw_kboffset;
+       /* UVD FW size in KB */
+       uint32_t uvdfw_ksize;
+       /* VCE FW position in kb from the start of VF visible frame buffer */
+       uint64_t vcefw_kboffset;
+       /* VCE FW size in KB */
+       uint32_t vcefw_ksize;
+       uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+} __aligned(4);
+
+
+struct amdgim_vf2pf_info_header {
+       /* the total structure size in byte. */
+       uint32_t size;
+       /*version of this structure, written by the guest */
+       uint32_t version;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v1 {
+       /* header contains size and version */
+       struct amdgim_vf2pf_info_header header;
+       /* driver version */
+       char driver_version[64];
+       /* driver certification, 1=WHQL, 0=None */
+       unsigned int driver_cert;
+       /* guest OS type and version: need a define */
+       unsigned int os_info;
+       /* in the unit of 1M */
+       unsigned int fb_usage;
+       /* guest gfx engine usage percentage */
+       unsigned int gfx_usage;
+       /* guest gfx engine health percentage */
+       unsigned int gfx_health;
+       /* guest compute engine usage percentage */
+       unsigned int compute_usage;
+       /* guest compute engine health percentage */
+       unsigned int compute_health;
+       /* guest vce engine usage percentage. 0xffff means N/A. */
+       unsigned int vce_enc_usage;
+       /* guest vce engine health percentage. 0xffff means N/A. */
+       unsigned int vce_enc_health;
+       /* guest uvd engine usage percentage. 0xffff means N/A. */
+       unsigned int uvd_enc_usage;
+       /* guest uvd engine usage percentage. 0xffff means N/A. */
+       unsigned int uvd_enc_health;
+       unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v2 {
+       /* header contains size and version */
+       struct amdgim_vf2pf_info_header header;
+       uint32_t checksum;
+       /* driver version */
+       uint8_t driver_version[64];
+       /* driver certification, 1=WHQL, 0=None */
+       uint32_t driver_cert;
+       /* guest OS type and version: need a define */
+       uint32_t os_info;
+       /* in the unit of 1M */
+       uint32_t fb_usage;
+       /* guest gfx engine usage percentage */
+       uint32_t gfx_usage;
+       /* guest gfx engine health percentage */
+       uint32_t gfx_health;
+       /* guest compute engine usage percentage */
+       uint32_t compute_usage;
+       /* guest compute engine health percentage */
+       uint32_t compute_health;
+       /* guest vce engine usage percentage. 0xffff means N/A. */
+       uint32_t vce_enc_usage;
+       /* guest vce engine health percentage. 0xffff means N/A. */
+       uint32_t vce_enc_health;
+       /* guest uvd engine usage percentage. 0xffff means N/A. */
+       uint32_t uvd_enc_usage;
+       /* guest uvd engine usage percentage. 0xffff means N/A. */
+       uint32_t uvd_enc_health;
+       uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+} __aligned(4);
+
+#define AMDGPU_FW_VRAM_VF2PF_VER 2
+typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
+
+#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
+       do { \
+               ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
+       } while (0)
+
+#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
+       do { \
+               (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
+       } while (0)
+
+#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
+       do { \
+               if (!adev->virt.fw_reserve.p_pf2vf) \
+                       *(val) = 0; \
+               else { \
+                       if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
+                               *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+                       if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
+                               *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+               } \
+       } while (0)
+
 /* GPU virtualization */
 struct amdgpu_virt {
        uint32_t                        caps;
@@ -72,6 +245,7 @@ struct amdgpu_virt {
        struct amdgpu_mm_table          mm_table;
        const struct amdgpu_virt_ops    *ops;
        struct amdgpu_vf_error_buffer   vf_errors;
+       struct amdgpu_virt_fw_reserve   fw_reserve;
 };
 
 #define AMDGPU_CSA_SIZE    (8 * 1024)
@@ -114,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
+int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
+                                       unsigned int key,
+                                       unsigned int chksum);
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
 
 #endif
index fee0a32ac56f65b32f4e15a966afbca1baced4b1..010d14195a5ec63d54e5e37d6ce97901dbead71c 100644 (file)
@@ -328,9 +328,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                                AMDGPU_GEM_CREATE_SHADOW);
 
        if (vm->pte_support_ats) {
-               init_value = AMDGPU_PTE_SYSTEM;
+               init_value = AMDGPU_PTE_DEFAULT_ATC;
                if (level != adev->vm_manager.num_level - 1)
                        init_value |= AMDGPU_PDE_PTE;
+
        }
 
        /* walk over the address space and allocate the page tables */
@@ -1034,7 +1035,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        int r;
 
        amdgpu_sync_create(&sync);
-       amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner);
+       amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
        r = amdgpu_sync_wait(&sync, true);
        amdgpu_sync_free(&sync);
 
@@ -1175,11 +1176,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
                        amdgpu_ring_pad_ib(ring, params.ib);
                        amdgpu_sync_resv(adev, &job->sync,
                                         parent->base.bo->tbo.resv,
-                                        AMDGPU_FENCE_OWNER_VM);
+                                        AMDGPU_FENCE_OWNER_VM, false);
                        if (shadow)
                                amdgpu_sync_resv(adev, &job->sync,
                                                 shadow->tbo.resv,
-                                                AMDGPU_FENCE_OWNER_VM);
+                                                AMDGPU_FENCE_OWNER_VM, false);
 
                        WARN_ON(params.ib->length_dw > ndw);
                        r = amdgpu_job_submit(job, ring, &vm->entity,
@@ -1643,7 +1644,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                goto error_free;
 
        r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
-                            owner);
+                            owner, false);
        if (r)
                goto error_free;
 
@@ -1698,6 +1699,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct drm_mm_node *nodes,
                                      struct dma_fence **fence)
 {
+       unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
        uint64_t pfn, start = mapping->start;
        int r;
 
@@ -1732,6 +1734,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
        }
 
        do {
+               dma_addr_t *dma_addr = NULL;
                uint64_t max_entries;
                uint64_t addr, last;
 
@@ -1745,15 +1748,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                }
 
                if (pages_addr) {
+                       uint64_t count;
+
                        max_entries = min(max_entries, 16ull * 1024ull);
-                       addr = 0;
+                       for (count = 1; count < max_entries; ++count) {
+                               uint64_t idx = pfn + count;
+
+                               if (pages_addr[idx] !=
+                                   (pages_addr[idx - 1] + PAGE_SIZE))
+                                       break;
+                       }
+
+                       if (count < min_linear_pages) {
+                               addr = pfn << PAGE_SHIFT;
+                               dma_addr = pages_addr;
+                       } else {
+                               addr = pages_addr[pfn];
+                               max_entries = count;
+                       }
+
                } else if (flags & AMDGPU_PTE_VALID) {
                        addr += adev->vm_manager.vram_base_offset;
+                       addr += pfn << PAGE_SHIFT;
                }
-               addr += pfn << PAGE_SHIFT;
 
                last = min((uint64_t)mapping->last, start + max_entries - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, exclusive, pages_addr, vm,
+               r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
                                                start, last, flags, addr,
                                                fence);
                if (r)
@@ -2017,7 +2037,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                list_del(&mapping->list);
 
                if (vm->pte_support_ats)
-                       init_pte_value = AMDGPU_PTE_SYSTEM;
+                       init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
                r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
                                                mapping->start, mapping->last,
@@ -2629,7 +2649,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
                if (adev->asic_type == CHIP_RAVEN) {
                        vm->pte_support_ats = true;
-                       init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+                       init_pde_value = AMDGPU_PTE_DEFAULT_ATC
+                                       | AMDGPU_PDE_PTE;
+
                }
        } else
                vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2737,8 +2759,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        struct amdgpu_bo_va_mapping *mapping, *tmp;
        bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+       struct amdgpu_bo *root;
        u64 fault;
-       int i;
+       int i, r;
 
        /* Clear pending page faults from IH when the VM is destroyed */
        while (kfifo_get(&vm->faults, &fault))
@@ -2773,7 +2796,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
        }
 
-       amdgpu_vm_free_levels(&vm->root);
+       root = amdgpu_bo_ref(vm->root.base.bo);
+       r = amdgpu_bo_reserve(root, true);
+       if (r) {
+               dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
+       } else {
+               amdgpu_vm_free_levels(&vm->root);
+               amdgpu_bo_unreserve(root);
+       }
+       amdgpu_bo_unref(&root);
        dma_fence_put(vm->last_update);
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
                amdgpu_vm_free_reserved_vmid(adev, vm, i);
index d68f39b4e5e71770f9d5622cf1aada5d8053bae1..aa914256b4bc75d98015ced2bbe413799f8d3466 100644 (file)
@@ -73,6 +73,16 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
 #define AMDGPU_PTE_MTYPE_MASK  AMDGPU_PTE_MTYPE(3ULL)
 
+/* For Raven */
+#define AMDGPU_MTYPE_CC 2
+
+#define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
+                                | AMDGPU_PTE_SNOOPED    \
+                                | AMDGPU_PTE_EXECUTABLE \
+                                | AMDGPU_PTE_READABLE   \
+                                | AMDGPU_PTE_WRITEABLE  \
+                                | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
+
 /* How to programm VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER     0
 #define AMDGPU_VM_FAULT_STOP_FIRST     1
index 147e92b3a9596de92d4d6ea5809847fe39a62154..b8002ac3e53691d159050159cb03c5b0ec009e61 100644 (file)
@@ -20,6 +20,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
+#include <linux/kernel.h>
 #include <linux/firmware.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
@@ -3952,10 +3953,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
                                adev->gfx.rlc.reg_list_format_size_bytes >> 2,
                                unique_indices,
                                &indices_count,
-                               sizeof(unique_indices) / sizeof(int),
+                               ARRAY_SIZE(unique_indices),
                                indirect_start_offsets,
                                &offset_count,
-                               sizeof(indirect_start_offsets)/sizeof(int));
+                               ARRAY_SIZE(indirect_start_offsets));
 
        /* save and restore list */
        WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@@ -3977,14 +3978,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
        /* starting offsets starts */
        WREG32(mmRLC_GPM_SCRATCH_ADDR,
                adev->gfx.rlc.starting_offsets_start);
-       for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+       for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
                WREG32(mmRLC_GPM_SCRATCH_DATA,
                                indirect_start_offsets[i]);
 
        /* unique indices */
        temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
        data = mmRLC_SRM_INDEX_CNTL_DATA_0;
-       for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
+       for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
                if (unique_indices[i] != 0) {
                        WREG32(temp + i, unique_indices[i] & 0x3FFFF);
                        WREG32(data + i, unique_indices[i] >> 20);
@@ -6394,6 +6395,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 }
 
+static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+                                          bool acquire)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int pipe_num, tmp, reg;
+       int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+       pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+       /* first me only has 2 entries, GFX and HP3D */
+       if (ring->me > 0)
+               pipe_num -= 2;
+
+       reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
+       tmp = RREG32(reg);
+       tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+       WREG32(reg, tmp);
+}
+
+static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
+                                           struct amdgpu_ring *ring,
+                                           bool acquire)
+{
+       int i, pipe;
+       bool reserve;
+       struct amdgpu_ring *iring;
+
+       mutex_lock(&adev->gfx.pipe_reserve_mutex);
+       pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+       if (acquire)
+               set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+       else
+               clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+       if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
+               /* Clear all reservations - everyone reacquires all resources */
+               for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+                       gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+                                                      true);
+
+               for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+                       gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+                                                      true);
+       } else {
+               /* Lower all pipes without a current reservation */
+               for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+                       iring = &adev->gfx.gfx_ring[i];
+                       pipe = amdgpu_gfx_queue_to_bit(adev,
+                                                      iring->me,
+                                                      iring->pipe,
+                                                      0);
+                       reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+                       gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+               }
+
+               for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+                       iring = &adev->gfx.compute_ring[i];
+                       pipe = amdgpu_gfx_queue_to_bit(adev,
+                                                      iring->me,
+                                                      iring->pipe,
+                                                      0);
+                       reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+                       gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+               }
+       }
+
+       mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
+                                     struct amdgpu_ring *ring,
+                                     bool acquire)
+{
+       uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+       uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+       mutex_lock(&adev->srbm_mutex);
+       vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+       WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+       WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+       vi_srbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+}
+static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+                                              enum amd_sched_priority priority)
+{
+       struct amdgpu_device *adev = ring->adev;
+       bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+
+       if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+               return;
+
+       gfx_v8_0_hqd_set_priority(adev, ring, acquire);
+       gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
+}
+
 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
                                             u64 addr, u64 seq,
                                             unsigned flags)
@@ -6839,6 +6938,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .test_ib = gfx_v8_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .set_priority = gfx_v8_0_ring_set_priority_compute,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
index 99a5b3b92e8edbd9729493e881c8832da6499e95..7f15bb2c5233566b771afc111ac17a5cbfe4ccc8 100644 (file)
@@ -20,6 +20,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
+#include <linux/kernel.h>
 #include <linux/firmware.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
@@ -1730,10 +1731,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
                                adev->gfx.rlc.reg_list_format_size_bytes >> 2,
                                unique_indirect_regs,
                                &unique_indirect_reg_count,
-                               sizeof(unique_indirect_regs)/sizeof(int),
+                               ARRAY_SIZE(unique_indirect_regs),
                                indirect_start_offsets,
                                &indirect_start_offsets_count,
-                               sizeof(indirect_start_offsets)/sizeof(int));
+                               ARRAY_SIZE(indirect_start_offsets));
 
        /* enable auto inc in case it is disabled */
        tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1770,12 +1771,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
        /* write the starting offsets to RLC scratch ram */
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
                adev->gfx.rlc.starting_offsets_start);
-       for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+       for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
                        indirect_start_offsets[i]);
 
        /* load unique indirect regs*/
-       for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
+       for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
                        unique_indirect_regs[i] & 0x3FFFF);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
index 2812d88a8bdd480371b9ad93ced23322e581e9c2..b4906d2f30d3eef8b00f4aa21398918d81b3a76a 100644 (file)
@@ -183,6 +183,12 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
                        pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
                        return r;
                }
+               /* Retrieve checksum from mailbox2 */
+               if (req == IDH_REQ_GPU_INIT_ACCESS) {
+                       adev->virt.fw_reserve.checksum_key =
+                               RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+                                       mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
+               }
        }
 
        return 0;
index 1c006ba9d826ac20b065417b6f6ec8b5fcf3f967..3ca9d114f630e67e3e7f23b1318e550b49c6f2e4 100644 (file)
@@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
 }
 static u32 soc15_get_xclk(struct amdgpu_device *adev)
 {
-       if (adev->asic_type == CHIP_VEGA10)
-               return adev->clock.spll.reference_freq/4;
-       else
-               return adev->clock.spll.reference_freq;
+       return adev->clock.spll.reference_freq;
 }
 
 
index 60af7310a23432596f2772173b375cf6adcf0347..71299c67c517a1e4a753c1e2bb0216c7c8857f8b 100644 (file)
@@ -268,8 +268,9 @@ err:
  *
  * Close up a stream for HW test or if userspace failed to do so
  */
-int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                                bool direct, struct dma_fence **fence)
+static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+                                       uint32_t handle,
+                                       bool direct, struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
index 189f3b54a385faf784fc95d9ff4eeb8ac9b02a75..ad1f6b57884b716620b602e51c8a53e566d61209 100644 (file)
@@ -961,18 +961,13 @@ static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
 
 static int cz_start_dpm(struct pp_hwmgr *hwmgr)
 {
-       int ret = 0;
        struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-       unsigned long dpm_features = 0;
 
        cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
-       dpm_features |= SCLK_DPM_MASK;
 
-       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+       return smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_EnableAllSmuFeatures,
-                               dpm_features);
-
-       return ret;
+                               SCLK_DPM_MASK);
 }
 
 static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -1279,27 +1274,18 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
 
 int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                        PHM_PlatformCaps_UVDPowerGating))
-               return smum_send_msg_to_smc(hwmgr,
-                                                    PPSMC_MSG_UVDPowerOFF);
+       if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
+               return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
        return 0;
 }
 
 int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                        PHM_PlatformCaps_UVDPowerGating)) {
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                 PHM_PlatformCaps_UVDDynamicPowerGating)) {
-                       return smum_send_msg_to_smc_with_parameter(
-                                                               hwmgr,
-                                                  PPSMC_MSG_UVDPowerON, 1);
-               } else {
-                       return smum_send_msg_to_smc_with_parameter(
-                                                               hwmgr,
-                                                  PPSMC_MSG_UVDPowerON, 0);
-               }
+       if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
+               return smum_send_msg_to_smc_with_parameter(
+                       hwmgr,
+                       PPSMC_MSG_UVDPowerON,
+                       PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
        }
 
        return 0;
@@ -1313,17 +1299,16 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 
        if (!bgate) {
                /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                        PHM_PlatformCaps_StablePState)
-                       || hwmgr->en_umd_pstate) {
+               if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+                   hwmgr->en_umd_pstate) {
                        cz_hwmgr->uvd_dpm.hard_min_clk =
                                   ptable->entries[ptable->count - 1].vclk;
 
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                                    PPSMC_MSG_SetUvdHardMin,
-                                                     cz_get_uvd_level(hwmgr,
-                                            cz_hwmgr->uvd_dpm.hard_min_clk,
-                                                  PPSMC_MSG_SetUvdHardMin));
+                               PPSMC_MSG_SetUvdHardMin,
+                               cz_get_uvd_level(hwmgr,
+                                       cz_hwmgr->uvd_dpm.hard_min_clk,
+                                       PPSMC_MSG_SetUvdHardMin));
 
                        cz_enable_disable_uvd_dpm(hwmgr, true);
                } else {
@@ -1343,17 +1328,16 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                hwmgr->dyn_state.vce_clock_voltage_dependency_table;
 
        /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_StablePState)
-                                       || hwmgr->en_umd_pstate) {
+       if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+           hwmgr->en_umd_pstate) {
                cz_hwmgr->vce_dpm.hard_min_clk =
                                  ptable->entries[ptable->count - 1].ecclk;
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
-                                       PPSMC_MSG_SetEclkHardMin,
-                                       cz_get_eclk_level(hwmgr,
-                                            cz_hwmgr->vce_dpm.hard_min_clk,
-                                               PPSMC_MSG_SetEclkHardMin));
+                       PPSMC_MSG_SetEclkHardMin,
+                       cz_get_eclk_level(hwmgr,
+                               cz_hwmgr->vce_dpm.hard_min_clk,
+                               PPSMC_MSG_SetEclkHardMin));
        } else {
                /*Program HardMin based on the vce_arbiter.ecclk */
                if (hwmgr->vce_arbiter.ecclk == 0) {
@@ -1366,10 +1350,10 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                } else {
                        cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
                        smum_send_msg_to_smc_with_parameter(hwmgr,
-                                               PPSMC_MSG_SetEclkHardMin,
-                                               cz_get_eclk_level(hwmgr,
-                                               cz_hwmgr->vce_dpm.hard_min_clk,
-                                               PPSMC_MSG_SetEclkHardMin));
+                               PPSMC_MSG_SetEclkHardMin,
+                               cz_get_eclk_level(hwmgr,
+                                       cz_hwmgr->vce_dpm.hard_min_clk,
+                                       PPSMC_MSG_SetEclkHardMin));
                }
        }
        return 0;
@@ -1377,8 +1361,7 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
 
 int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                        PHM_PlatformCaps_VCEPowerGating))
+       if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
                return smum_send_msg_to_smc(hwmgr,
                                                     PPSMC_MSG_VCEPowerOFF);
        return 0;
@@ -1386,8 +1369,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
 
 int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
 {
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                        PHM_PlatformCaps_VCEPowerGating))
+       if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
                return smum_send_msg_to_smc(hwmgr,
                                                     PPSMC_MSG_VCEPowerON);
        return 0;
@@ -1871,6 +1853,33 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        }
 }
 
+static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+                                       uint32_t virtual_addr_low,
+                                       uint32_t virtual_addr_hi,
+                                       uint32_t mc_addr_low,
+                                       uint32_t mc_addr_hi,
+                                       uint32_t size)
+{
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramAddrHiVirtual,
+                                       mc_addr_hi);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramAddrLoVirtual,
+                                       mc_addr_low);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramAddrHiPhysical,
+                                       virtual_addr_hi);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramAddrLoPhysical,
+                                       virtual_addr_low);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramBufferSize,
+                                       size);
+       return 0;
+}
+
+
 static const struct pp_hwmgr_func cz_hwmgr_funcs = {
        .backend_init = cz_hwmgr_backend_init,
        .backend_fini = cz_hwmgr_backend_fini,
@@ -1894,12 +1903,14 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
        .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
        .get_clock_by_type = cz_get_clock_by_type,
        .get_max_high_clocks = cz_get_max_high_clocks,
+       .get_temperature = cz_thermal_get_temperature,
        .read_sensor = cz_read_sensor,
        .power_off_asic = cz_power_off_asic,
        .asic_setup = cz_setup_asic_task,
        .dynamic_state_management_enable = cz_enable_dpm_tasks,
        .power_state_set = cz_set_power_state_tasks,
        .dynamic_state_management_disable = cz_disable_dpm_tasks,
+       .notify_cac_buffer_info = cz_notify_cac_buffer_info,
 };
 
 int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
index 4826b2991b7e271370513743eda86d4a06bbd853..e32f18a9907463d9e186645890f74e5589635fc4 100644 (file)
@@ -4645,6 +4645,47 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
        return 0;
 }
 
+static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+                                       uint32_t virtual_addr_low,
+                                       uint32_t virtual_addr_hi,
+                                       uint32_t mc_addr_low,
+                                       uint32_t mc_addr_hi,
+                                       uint32_t size)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       data->soft_regs_start +
+                                       smum_get_offsetof(hwmgr,
+                                       SMU_SoftRegisters, DRAM_LOG_ADDR_H),
+                                       mc_addr_hi);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       data->soft_regs_start +
+                                       smum_get_offsetof(hwmgr,
+                                       SMU_SoftRegisters, DRAM_LOG_ADDR_L),
+                                       mc_addr_low);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       data->soft_regs_start +
+                                       smum_get_offsetof(hwmgr,
+                                       SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
+                                       virtual_addr_hi);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       data->soft_regs_start +
+                                       smum_get_offsetof(hwmgr,
+                                       SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
+                                       virtual_addr_low);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       data->soft_regs_start +
+                                       smum_get_offsetof(hwmgr,
+                                       SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
+                                       size);
+       return 0;
+}
+
 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
        .backend_init = &smu7_hwmgr_backend_init,
        .backend_fini = &smu7_hwmgr_backend_fini,
@@ -4696,6 +4737,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
        .avfs_control = smu7_avfs_control,
        .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
        .start_thermal_controller = smu7_start_thermal_controller,
+       .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
 };
 
 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
index 48de45ec0eaff59dbc7f89f80911ed6509be4e0b..0519338e0e5e1d31a9c543863f470be51d0b1872 100644 (file)
@@ -1161,6 +1161,8 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
 {
        int i;
 
+       dpm_table->count = 0;
+
        for (i = 0; i < dep_table->count; i++) {
                if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
                                dep_table->entries[i].clk) {
@@ -1269,10 +1271,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
                        return -EINVAL);
 
        /* Initialize Sclk DPM table based on allow Sclk values */
-       data->dpm_table.soc_table.count = 0;
-       data->dpm_table.gfx_table.count = 0;
-       data->dpm_table.dcef_table.count = 0;
-
        dpm_table = &(data->dpm_table.soc_table);
        vega10_setup_default_single_dpm_table(hwmgr,
                        dpm_table,
@@ -4994,6 +4992,33 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
        return 0;
 }
 
+static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+                                       uint32_t virtual_addr_low,
+                                       uint32_t virtual_addr_hi,
+                                       uint32_t mc_addr_low,
+                                       uint32_t mc_addr_hi,
+                                       uint32_t size)
+{
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSystemVirtualDramAddrHigh,
+                                       virtual_addr_hi);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSystemVirtualDramAddrLow,
+                                       virtual_addr_low);
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramLogSetDramAddrHigh,
+                                       mc_addr_hi);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramLogSetDramAddrLow,
+                                       mc_addr_low);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_DramLogSetDramSize,
+                                       size);
+       return 0;
+}
+
 static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
                const void *info)
 {
@@ -5079,7 +5104,9 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
        .get_mclk_od = vega10_get_mclk_od,
        .set_mclk_od = vega10_set_mclk_od,
        .avfs_control = vega10_avfs_enable,
+       .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
        .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
+       .start_thermal_controller = vega10_start_thermal_controller,
 };
 
 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
index e343df1903754fc19e0f2ac082f8b8369c5f1201..f14c7611fad303c4bc4130545cea5bcd9bfa5ae4 100644 (file)
@@ -291,8 +291,7 @@ static int get_mm_clock_voltage_table(
        table_size = sizeof(uint32_t) +
                        sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) *
                        mm_dependency_table->ucNumEntries;
-       mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       mm_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!mm_table)
                return -ENOMEM;
@@ -519,8 +518,7 @@ static int get_socclk_voltage_dependency_table(
                        sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
                        clk_dep_table->ucNumEntries;
 
-       clk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       clk_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!clk_table)
                return -ENOMEM;
@@ -554,8 +552,7 @@ static int get_mclk_voltage_dependency_table(
                        sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
                        mclk_dep_table->ucNumEntries;
 
-       mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       mclk_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!mclk_table)
                return -ENOMEM;
@@ -596,8 +593,7 @@ static int get_gfxclk_voltage_dependency_table(
                        sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
                        clk_dep_table->ucNumEntries;
 
-       clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       clk_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!clk_table)
                return -ENOMEM;
@@ -663,8 +659,7 @@ static int get_pix_clk_voltage_dependency_table(
                        sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
                        clk_dep_table->ucNumEntries;
 
-       clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       clk_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!clk_table)
                return -ENOMEM;
@@ -728,8 +723,7 @@ static int get_dcefclk_voltage_dependency_table(
                        sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
                        num_entries;
 
-       clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       clk_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!clk_table)
                return -ENOMEM;
@@ -772,8 +766,7 @@ static int get_pcie_table(struct pp_hwmgr *hwmgr,
                        sizeof(struct phm_ppt_v1_pcie_record) *
                        atom_pcie_table->ucNumEntries;
 
-       pcie_table = (struct phm_ppt_v1_pcie_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       pcie_table = kzalloc(table_size, GFP_KERNEL);
 
        if (!pcie_table)
                return -ENOMEM;
@@ -1026,10 +1019,9 @@ static int get_vddc_lookup_table(
        table_size = sizeof(uint32_t) +
                        sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
 
-       table = (phm_ppt_v1_voltage_lookup_table *)
-                       kzalloc(table_size, GFP_KERNEL);
+       table = kzalloc(table_size, GFP_KERNEL);
 
-       if (NULL == table)
+       if (table == NULL)
                return -ENOMEM;
 
        table->count = vddc_lookup_pp_tables->ucNumEntries;
@@ -1138,12 +1130,12 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
 
        hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL);
 
-       PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
+       PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
                            "Failed to allocate hwmgr->pptable!", return -ENOMEM);
 
        powerplay_table = get_powerplay_table(hwmgr);
 
-       PP_ASSERT_WITH_CODE((NULL != powerplay_table),
+       PP_ASSERT_WITH_CODE((powerplay_table != NULL),
                "Missing PowerPlay Table!", return -1);
 
        result = check_powerplay_tables(hwmgr, powerplay_table);
@@ -1182,7 +1174,6 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
 
 static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
 {
-       int result = 0;
        struct phm_ppt_v2_information *pp_table_info =
                        (struct phm_ppt_v2_information *)(hwmgr->pptable);
 
@@ -1225,7 +1216,7 @@ static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
        kfree(hwmgr->pptable);
        hwmgr->pptable = NULL;
 
-       return result;
+       return 0;
 }
 
 const struct pp_table_func vega10_pptable_funcs = {
@@ -1238,7 +1229,7 @@ int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
        const ATOM_Vega10_State_Array *state_arrays;
        const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
 
-       PP_ASSERT_WITH_CODE((NULL != pp_table),
+       PP_ASSERT_WITH_CODE((pp_table != NULL),
                        "Missing PowerPlay Table!", return -1);
        PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >=
                        ATOM_Vega10_TABLE_REVISION_VEGA10),
index f34ce04cfd890dc31de5913183468f2b3f5758c6..82f10bdd5f07c00f42c7e0c05d6c2161159fab69 100644 (file)
@@ -71,7 +71,8 @@ extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
 extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
 extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
 extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
-
+extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
+                               struct PP_TemperatureRange *range);
 extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
 
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
deleted file mode 100644 (file)
index 9d391f0..0000000
+++ /dev/null
@@ -1,2007 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _FIJI_PWRVIRUS_H_
-#define _FIJI_PWRVIRUS_H_
-
-#define mmCP_HYP_MEC1_UCODE_ADDR       0xf81a
-#define mmCP_HYP_MEC1_UCODE_DATA       0xf81b
-#define mmCP_HYP_MEC2_UCODE_ADDR       0xf81c
-#define mmCP_HYP_MEC2_UCODE_DATA       0xf81d
-
-struct PWR_Command_Table
-{
-   uint32_t           data;
-   uint32_t           reg;
-};
-typedef struct PWR_Command_Table PWR_Command_Table;
-
-struct PWR_DFY_Section {
-       uint32_t dfy_cntl;
-       uint32_t dfy_addr_hi, dfy_addr_lo;
-       uint32_t dfy_size;
-       uint32_t dfy_data[];
-};
-
-typedef struct PWR_DFY_Section PWR_DFY_Section;
-
-static const PWR_Command_Table PwrVirusTable_pre[] =
-{
-    { 0x100100b6, mmPCIE_INDEX                               },
-    { 0x00000000, mmPCIE_DATA                                },
-    { 0x100100b6, mmPCIE_INDEX                               },
-    { 0x0300078c, mmPCIE_DATA                                },
-    { 0x00000000, mmBIF_CLK_CTRL                             },
-    { 0x00000001, mmBIF_CLK_CTRL                             },
-    { 0x00000000, mmBIF_CLK_CTRL                             },
-    { 0x00000003, mmBIF_FB_EN                                },
-    { 0x00000000, mmBIF_FB_EN                                },
-    { 0x00000001, mmBIF_DOORBELL_APER_EN                     },
-    { 0x00000000, mmBIF_DOORBELL_APER_EN                     },
-    { 0x014000c0, mmPCIE_INDEX                               },
-    { 0x00000000, mmPCIE_DATA                                },
-    { 0x014000c0, mmPCIE_INDEX                               },
-    { 0x22000000, mmPCIE_DATA                                },
-    { 0x014000c0, mmPCIE_INDEX                               },
-    { 0x00000000, mmPCIE_DATA                                },
-    /*
-    { 0x009f0090, mmMC_VM_FB_LOCATION                        },
-    { 0x00000000, mmMC_CITF_CNTL                             },
-    { 0x00000000, mmMC_VM_FB_LOCATION                        },
-    { 0x009f0090, mmMC_VM_FB_LOCATION                        },
-    { 0x00000000, mmMC_VM_FB_LOCATION                        },
-    { 0x009f0090, mmMC_VM_FB_LOCATION                        },
-    { 0x00000000, mmMC_VM_FB_OFFSET                          },*/
-    { 0x00000000, mmRLC_CSIB_ADDR_LO                         },
-    { 0x00000000, mmRLC_CSIB_ADDR_HI                         },
-    { 0x00000000, mmRLC_CSIB_LENGTH                          },
-    /*
-    { 0x00000000, mmMC_VM_MX_L1_TLB_CNTL                     },
-    { 0x00000001, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR           },
-    { 0x00000000, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR          },
-    { 0x00000000, mmMC_VM_FB_LOCATION                        },
-    { 0x009f0090, mmMC_VM_FB_LOCATION                        },*/
-    { 0x00000000, mmVM_CONTEXT0_CNTL                         },
-    { 0x00000000, mmVM_CONTEXT1_CNTL                         },
-    /*
-    { 0x00000000, mmMC_VM_AGP_BASE                           },
-    { 0x00000002, mmMC_VM_AGP_BOT                            },
-    { 0x00000000, mmMC_VM_AGP_TOP                            },*/
-    { 0x04000000, mmATC_VM_APERTURE0_LOW_ADDR                },
-    { 0x0400ff20, mmATC_VM_APERTURE0_HIGH_ADDR               },
-    { 0x00000002, mmATC_VM_APERTURE0_CNTL                    },
-    { 0x0000ffff, mmATC_VM_APERTURE0_CNTL2                   },
-    { 0x00000001, mmATC_VM_APERTURE1_LOW_ADDR                },
-    { 0x00000000, mmATC_VM_APERTURE1_HIGH_ADDR               },
-    { 0x00000000, mmATC_VM_APERTURE1_CNTL                    },
-    { 0x00000000, mmATC_VM_APERTURE1_CNTL2                   },
-    //{ 0x00000000, mmMC_ARB_RAMCFG                            },
-    { 0x12011003, mmGB_ADDR_CONFIG                           },
-    { 0x00800010, mmGB_TILE_MODE0                            },
-    { 0x00800810, mmGB_TILE_MODE1                            },
-    { 0x00801010, mmGB_TILE_MODE2                            },
-    { 0x00801810, mmGB_TILE_MODE3                            },
-    { 0x00802810, mmGB_TILE_MODE4                            },
-    { 0x00802808, mmGB_TILE_MODE5                            },
-    { 0x00802814, mmGB_TILE_MODE6                            },
-    { 0x00000000, mmGB_TILE_MODE7                            },
-    { 0x00000004, mmGB_TILE_MODE8                            },
-    { 0x02000008, mmGB_TILE_MODE9                            },
-    { 0x02000010, mmGB_TILE_MODE10                           },
-    { 0x06000014, mmGB_TILE_MODE11                           },
-    { 0x00000000, mmGB_TILE_MODE12                           },
-    { 0x02400008, mmGB_TILE_MODE13                           },
-    { 0x02400010, mmGB_TILE_MODE14                           },
-    { 0x02400030, mmGB_TILE_MODE15                           },
-    { 0x06400014, mmGB_TILE_MODE16                           },
-    { 0x00000000, mmGB_TILE_MODE17                           },
-    { 0x0040000c, mmGB_TILE_MODE18                           },
-    { 0x0100000c, mmGB_TILE_MODE19                           },
-    { 0x0100001c, mmGB_TILE_MODE20                           },
-    { 0x01000034, mmGB_TILE_MODE21                           },
-    { 0x01000024, mmGB_TILE_MODE22                           },
-    { 0x00000000, mmGB_TILE_MODE23                           },
-    { 0x0040001c, mmGB_TILE_MODE24                           },
-    { 0x01000020, mmGB_TILE_MODE25                           },
-    { 0x01000038, mmGB_TILE_MODE26                           },
-    { 0x02c00008, mmGB_TILE_MODE27                           },
-    { 0x02c00010, mmGB_TILE_MODE28                           },
-    { 0x06c00014, mmGB_TILE_MODE29                           },
-    { 0x00000000, mmGB_TILE_MODE30                           },
-    { 0x00000000, mmGB_TILE_MODE31                           },
-    { 0x000000a8, mmGB_MACROTILE_MODE0                       },
-    { 0x000000a4, mmGB_MACROTILE_MODE1                       },
-    { 0x00000090, mmGB_MACROTILE_MODE2                       },
-    { 0x00000090, mmGB_MACROTILE_MODE3                       },
-    { 0x00000090, mmGB_MACROTILE_MODE4                       },
-    { 0x00000090, mmGB_MACROTILE_MODE5                       },
-    { 0x00000090, mmGB_MACROTILE_MODE6                       },
-    { 0x00000000, mmGB_MACROTILE_MODE7                       },
-    { 0x000000ee, mmGB_MACROTILE_MODE8                       },
-    { 0x000000ea, mmGB_MACROTILE_MODE9                       },
-    { 0x000000e9, mmGB_MACROTILE_MODE10                      },
-    { 0x000000e5, mmGB_MACROTILE_MODE11                      },
-    { 0x000000e4, mmGB_MACROTILE_MODE12                      },
-    { 0x000000e0, mmGB_MACROTILE_MODE13                      },
-    { 0x00000090, mmGB_MACROTILE_MODE14                      },
-    { 0x00000000, mmGB_MACROTILE_MODE15                      },
-    { 0x00900000, mmHDP_NONSURFACE_BASE                      },
-    { 0x00008000, mmHDP_NONSURFACE_INFO                      },
-    { 0x3fffffff, mmHDP_NONSURFACE_SIZE                      },
-    { 0x00000003, mmBIF_FB_EN                                },
-    //{ 0x00000000, mmMC_VM_FB_OFFSET                          },
-    { 0x00000000, mmSRBM_CNTL                                },
-    { 0x00020000, mmSRBM_CNTL                                },
-    { 0x80000000, mmATC_VMID0_PASID_MAPPING                  },
-    { 0x00000000, mmATC_VMID_PASID_MAPPING_UPDATE_STATUS     },
-    { 0x00000000, mmRLC_CNTL                                 },
-    { 0x00000000, mmRLC_CNTL                                 },
-    { 0x00000000, mmRLC_CNTL                                 },
-    { 0xe0000000, mmGRBM_GFX_INDEX                           },
-    { 0x00000000, mmCGTS_TCC_DISABLE                         },
-    { 0x00000000, mmTCP_ADDR_CONFIG                          },
-    { 0x000000ff, mmTCP_ADDR_CONFIG                          },
-    { 0x76543210, mmTCP_CHAN_STEER_LO                        },
-    { 0xfedcba98, mmTCP_CHAN_STEER_HI                        },
-    { 0x00000000, mmDB_DEBUG2                                },
-    { 0x00000000, mmDB_DEBUG                                 },
-    { 0x00002b16, mmCP_QUEUE_THRESHOLDS                      },
-    { 0x00006030, mmCP_MEQ_THRESHOLDS                        },
-    { 0x01000104, mmSPI_CONFIG_CNTL_1                        },
-    { 0x98184020, mmPA_SC_FIFO_SIZE                          },
-    { 0x00000001, mmVGT_NUM_INSTANCES                        },
-    { 0x00000000, mmCP_PERFMON_CNTL                          },
-    { 0x01180000, mmSQ_CONFIG                                },
-    { 0x00000000, mmVGT_CACHE_INVALIDATION                   },
-    { 0x00000000, mmSQ_THREAD_TRACE_BASE                     },
-    { 0x0000df80, mmSQ_THREAD_TRACE_MASK                     },
-    { 0x02249249, mmSQ_THREAD_TRACE_MODE                     },
-    { 0x00000000, mmPA_SC_LINE_STIPPLE_STATE                 },
-    { 0x00000000, mmCB_PERFCOUNTER0_SELECT1                  },
-    { 0x06000100, mmCGTT_VGT_CLK_CTRL                        },
-    { 0x00000007, mmPA_CL_ENHANCE                            },
-    { 0x00000001, mmPA_SC_ENHANCE                            },
-    { 0x00ffffff, mmPA_SC_FORCE_EOV_MAX_CNTS                 },
-    { 0x00000000, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000010, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000020, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000030, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000040, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000050, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000060, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000070, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000080, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000090, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x000000a0, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x000000b0, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x000000c0, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x000000d0, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x000000e0, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x000000f0, mmSRBM_GFX_CNTL                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000000, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmRLC_PG_CNTL                              },
-    { 0x00000000, mmGRBM_STATUS2                             },
-    { 0x15000000, mmCP_ME_CNTL                               },
-    { 0x50000000, mmCP_MEC_CNTL                              },
-    { 0x00000000, mmSRBM_GFX_CNTL                            },
-    { 0x0000000e, mmSH_MEM_APE1_BASE                         },
-    { 0x0000020d, mmSH_MEM_APE1_LIMIT                        },
-    { 0x00000000, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmSH_MEM_CONFIG                            },
-    { 0x00000320, mmSH_MEM_CONFIG                            },
-    { 0x00000000, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_RB_VMID                               },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmRLC_CNTL                                 },
-    { 0x00000000, mmRLC_CNTL                                 },
-    { 0x00000000, mmRLC_SRM_CNTL                             },
-    { 0x00000002, mmRLC_SRM_CNTL                             },
-    { 0x00000000, mmCP_ME_CNTL                               },
-    { 0x15000000, mmCP_ME_CNTL                               },
-    { 0x00000000, mmCP_MEC_CNTL                              },
-    { 0x50000000, mmCP_MEC_CNTL                              },
-    { 0x80000004, mmCP_DFY_CNTL                              },
-    { 0x0840800a, mmCP_RB0_CNTL                              },
-    { 0xf30fff0f, mmTCC_CTRL                                 },
-    { 0x00000002, mmTCC_EXE_DISABLE                          },
-    { 0x000000ff, mmTCP_ADDR_CONFIG                          },
-    { 0x540ff000, mmCP_CPC_IC_BASE_LO                        },
-    { 0x000000b4, mmCP_CPC_IC_BASE_HI                        },
-    { 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR                   },
-    { 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00221408, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00591260, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00621387, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA                   },
-    { 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR                   },
-    { 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00221408, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00591260, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00621387, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA                   },
-    { 0x00000000, 0xFFFFFFFF                                 },
-};
-
-static const PWR_DFY_Section pwr_virus_section1 = {
-       .dfy_cntl = 0x80000004,
-       .dfy_addr_hi = 0x000000b4,
-       .dfy_addr_lo = 0x540fe800,
-       .dfy_data = {
-       0x7e000200, 0x7e020201, 0x7e040204, 0x7e060205, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
-       0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0xbf810000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x54106f00, 0x000400b4, 0x00004000, 0x00804fac, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       },
-       .dfy_size = 416
-};
-
-static const PWR_DFY_Section pwr_virus_section2 = {
-       .dfy_cntl = 0x80000004,
-       .dfy_addr_hi = 0x000000b4,
-       .dfy_addr_lo = 0x540fef00,
-       .dfy_data = {
-       0xc0031502, 0x00001e00, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       },
-       .dfy_size = 16
-};
-
-static const PWR_DFY_Section pwr_virus_section3 = {
-       .dfy_cntl = 0x80000004,
-       .dfy_addr_hi = 0x000000b4,
-       .dfy_addr_lo = 0x540ff000,
-       .dfy_data = {
-       0xc424000b, 0x80000145, 0x94800001, 0x94c00001, 0x95000001, 0x95400001, 0x95800001, 0xdc810000,
-       0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xc4080061, 0xd8400013, 0xd8000003, 0xc40c0001,
-       0x24ccffff, 0x3cd08000, 0x9500fffd, 0x1cd0ffcf, 0x7d018001, 0xc4140004, 0x050c0019, 0xd8400008,
-       0x84c00000, 0x80000023, 0x80000067, 0x8000006a, 0x8000006d, 0x80000079, 0x80000084, 0x8000008f,
-       0x80000099, 0x800000a0, 0x800000af, 0xd8400053, 0xc4080007, 0x388c0001, 0x08880002, 0x04100003,
-       0x94c00005, 0x98800003, 0x04100004, 0x8000002d, 0x04100005, 0x8c00003f, 0x8c000043, 0x28cc0000,
-       0xccc00050, 0x8c000055, 0x28080001, 0xcc000004, 0x7d808001, 0xd8400013, 0xd88130b8, 0xcd400008,
-       0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, 0xcc800005, 0xdc080000, 0x80000168, 0xc40c000e,
-       0x28cc0008, 0xccc00013, 0x90000000, 0xcd013278, 0xc4113278, 0x95000001, 0x24cc0700, 0xd8400029,
-       0xc4113255, 0xcd01324f, 0xc4113254, 0x1d10ffdf, 0xcd013254, 0x10cc0014, 0x1d10c017, 0x7d0d000a,
-       0xd8400013, 0xd8400008, 0xcd0130b7, 0x14cc0010, 0x90000000, 0xd9c00036, 0x8000005d, 0xd8400013,
-       0xc00c4000, 0xccc130b5, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc40c0021, 0x14d00011, 0x9500fffe,
-       0xdc030000, 0xd800000c, 0xd800000d, 0xc40c005e, 0x94c01b10, 0xd8400013, 0x90000000, 0xc00e0080,
-       0xccc130b5, 0x8000013b, 0xc00e0800, 0xccc130b5, 0x8000013b, 0xd8400053, 0x04100006, 0x8c00003f,
-       0x8c000043, 0x28cc0000, 0xccc00050, 0x8c000055, 0x280c0008, 0xccc00052, 0xd8000021, 0x28180039,
-       0x80000034, 0xd8400053, 0x04100007, 0x8c00003f, 0x8c000043, 0x28cc0001, 0xccc00050, 0x8c000055,
-       0x280c0010, 0xccc00052, 0x28180039, 0x80000034, 0xd8400053, 0x04100008, 0x8c00003f, 0x8c000043,
-       0x28cc0003, 0xccc00050, 0x8c000055, 0x280c0020, 0xccc00052, 0x28180039, 0x80000034, 0xdc030000,
-       0xd8000069, 0x28080001, 0xc428000d, 0x7ca88004, 0xcc800079, 0x04280001, 0xcc00006f, 0x8000013b,
-       0x80000034, 0x04100010, 0x8c00003f, 0x8c000043, 0xccc00078, 0x8c000055, 0x28180080, 0x80000034,
-       0x04100001, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xcd013278, 0xc4113278, 0x95000001, 0xc00c4000,
-       0xc4113254, 0x1d10c017, 0xd8400013, 0xd8400008, 0xccc130b5, 0xcd0130b7, 0x8000013b, 0x95c00001,
-       0x96000001, 0x96400001, 0x96800001, 0x96c00001, 0x97000001, 0x97400001, 0x97800001, 0x97c00001,
-       0xdc810000, 0xc40c000c, 0xcd4c0380, 0xcdcc0388, 0x55dc0020, 0xcdcc038c, 0xce0c0390, 0x56200020,
-       0xce0c0394, 0xce4c0398, 0x56640020, 0xce4c039c, 0xce8c03a0, 0x56a80020, 0xce8c03a4, 0xcecc03a8,
-       0x56ec0020, 0xcecc03ac, 0xcf0c03b0, 0x57300020, 0xcf0c03b4, 0xcf4c03b8, 0x57740020, 0xcf4c03bc,
-       0xcf8c03c0, 0x57b80020, 0xcf8c03c4, 0xcfcc03c8, 0x57fc0020, 0xcfcc03cc, 0xd9000033, 0xc41c0009,
-       0x25dc0010, 0x95c0fffe, 0xd8400013, 0xc41c000c, 0x05dc002f, 0xcdc12009, 0xc41d200a, 0xd8400013,
-       0xcc012009, 0xd9000034, 0x25e01c00, 0x12200013, 0x25e40300, 0x12640008, 0x25e800c0, 0x12a80002,
-       0x25ec003f, 0x7e25c00a, 0x7eae400a, 0x7de5c00a, 0xddc10000, 0xc02ee000, 0xcec1c200, 0xc40c005f,
-       0xccc00037, 0x24d000ff, 0x31100006, 0x9500007b, 0x8c000190, 0xdc1c0000, 0xd8400013, 0xcdc1c200,
-       0xc40c000c, 0xc4df0388, 0xc4d7038c, 0x51540020, 0x7d5dc01a, 0xc4e30390, 0xc4d70394, 0x51540020,
-       0x7d62001a, 0xc4e70398, 0xc4d7039c, 0x51540020, 0x7d66401a, 0xc4eb03a0, 0xc4d703a4, 0x51540020,
-       0x7d6a801a, 0xc4ef03a8, 0xc4d703ac, 0x51540020, 0x7d6ec01a, 0xc4f303b0, 0xc4d703b4, 0x51540020,
-       0x7d73001a, 0xc4f703b8, 0xc4d703bc, 0x51540020, 0x7d77401a, 0xc4fb03c0, 0xc4d703c4, 0x51540020,
-       0x7d7b801a, 0xc4ff03c8, 0xc4d703cc, 0x51540020, 0x7d7fc01a, 0xdc080000, 0xcc800013, 0xc4d70380,
-       0xc4080001, 0x1c88001c, 0xcd400008, 0xc40c0083, 0x94c00010, 0xdc0e0000, 0x94c0000e, 0xc40c0082,
-       0x24d00001, 0x9900000b, 0x18cc01e3, 0x3cd00004, 0x95000008, 0xc40c0085, 0x18cc006a, 0x98c00005,
-       0xc40c0082, 0x18cc01e3, 0x3cd00004, 0x9900fffa, 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000,
-       0xcc800004, 0xdc080000, 0x90000000, 0xc4080001, 0x1c88001c, 0xcd400008, 0xdc180000, 0xdc140000,
-       0xdc100000, 0xdc0c0000, 0xcc800004, 0xdc080000, 0x90000000, 0xd8400051, 0xc428000c, 0x04180018,
-       0x32640002, 0x9a80001f, 0x9a40001e, 0xcd800013, 0xc4293265, 0x040c0000, 0x1aac0027, 0x2aa80080,
-       0xce813265, 0x9ac00017, 0xd80002f1, 0x04080002, 0x08880001, 0xd8080250, 0xd8080258, 0xd8080230,
-       0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, 0xd8080278, 0xd8080280, 0xd8080228,
-       0xd8000367, 0x9880fff3, 0x04080010, 0x08880001, 0xd80c0309, 0xd80c0319, 0x04cc0001, 0x9880fffc,
-       0x7c408001, 0x88000000, 0xc00e0100, 0xd8400013, 0xd8400008, 0xccc130b5, 0x8000016e, 0xc4180032,
-       0x29980008, 0xcd800013, 0x95800001, 0x7c40c001, 0x18d0003f, 0x24d4001f, 0x24d80001, 0x155c0001,
-       0x05e80180, 0x9900000b, 0x202c003d, 0xcd800010, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x86800000,
-       0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0xc4200007, 0x0a200001, 0xce000010, 0x80001b70,
-       0x7c40c001, 0x8c000190, 0xc410001b, 0xd8000032, 0xd8000031, 0x9900091a, 0x7c408001, 0x88000000,
-       0x24d000ff, 0x05280196, 0x18d4fe04, 0x29540008, 0xcd400013, 0x86800000, 0x800001b4, 0x8000032b,
-       0x80000350, 0x80000352, 0x8000035f, 0x80000701, 0x8000047c, 0x8000019f, 0x80000800, 0xc419325b,
-       0x1d98001f, 0xcd81325b, 0x8c00003f, 0xc4140004, 0xd8400008, 0x04100002, 0x8c000043, 0x28cc0002,
-       0xccc00050, 0xc43c0044, 0x27fc0003, 0x9bc00002, 0x97c00006, 0xc00c4000, 0xccc130b5, 0x8c000055,
-       0xd8400013, 0xd88130b8, 0xcd400008, 0x90000000, 0xd8400008, 0xcd400013, 0x7d40c001, 0xd8400028,
-       0xd8400029, 0xd9400036, 0xc4193256, 0xc41d3254, 0x15540008, 0xcd400009, 0xcd40005b, 0xcd40005e,
-       0xcd40005d, 0xd840006d, 0xc421325a, 0xc42d3249, 0x11540015, 0x19a4003c, 0x1998003f, 0x1af0007d,
-       0x11dc000b, 0x1264001f, 0x15dc000d, 0x7d65400a, 0x13300018, 0x1a38003f, 0x7dd5c00a, 0x7df1c00a,
-       0xcd800045, 0xcdc00100, 0xc411326a, 0xc415326b, 0xc419326c, 0xc41d326d, 0xc425326e, 0xc4293279,
-       0xce800077, 0xcd000056, 0xcd400057, 0xcd800058, 0xcdc00059, 0xc4193265, 0x259c8000, 0x99c00004,
-       0xce40005a, 0x29988000, 0xcd813265, 0xc4113248, 0x2510000f, 0xcd000073, 0xc418000d, 0xc411326f,
-       0x17300019, 0x97000009, 0x25140fff, 0x95400007, 0xd800003a, 0x8c001b6d, 0xc4153279, 0xcd400077,
-       0xcd00005f, 0xd8000075, 0x26f00001, 0x15100010, 0x7d190004, 0xcd000035, 0x97000035, 0x1af07fe8,
-       0xd8800013, 0xd8400010, 0xd8400008, 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001,
-       0x04300010, 0xdf430000, 0x7c434001, 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078,
-       0xdf030000, 0xd4412e40, 0xd8400013, 0xcc41c030, 0xcc41c031, 0xc43dc031, 0xccc00013, 0x04343000,
-       0xc4113246, 0xc41d3245, 0xcf413267, 0x51100020, 0x7dd1c01a, 0xc4353267, 0x45dc0160, 0xc810001f,
-       0x1b4c0057, 0x1b700213, 0x1b740199, 0x7f4f400a, 0x7f73400a, 0x55180020, 0x2198003f, 0xd1c00025,
-       0xcf400024, 0xcd000026, 0xcd800026, 0xd8400027, 0x9bc00001, 0x248dfffe, 0xd8800013, 0xccc12e00,
-       0x7c434001, 0x7c434001, 0x8c00142b, 0xc43c000e, 0x1af4007d, 0x2bfc0008, 0x33740003, 0x26d80001,
-       0xcfc00013, 0x1ae8003e, 0x9680000c, 0xc4253277, 0x26680001, 0x96800009, 0x2a640002, 0xce413277,
-       0xd8400013, 0xc4253348, 0xce413348, 0xc4253348, 0x96400001, 0xcfc00013, 0x9b400003, 0x958000d8,
-       0x80000315, 0xc4253277, 0x04303000, 0x26680001, 0xcf013267, 0xc4193246, 0xc41d3245, 0xc4313267,
-       0x96800041, 0x51980020, 0x1b342010, 0x7d9d801a, 0x1714000c, 0x25540800, 0x1b30c012, 0x459801b0,
-       0x7d77400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0xd180001e, 0xd8400021, 0x04240010, 0x199c01e2,
-       0x7e5e4002, 0x3e5c0004, 0x3e540002, 0xc428000f, 0x9a80ffff, 0x95c00006, 0xc80c0011, 0xc8140011,
-       0x54d00020, 0x55580020, 0x80000282, 0x95400015, 0xc80c0011, 0x0a640002, 0x041c0001, 0x45980008,
-       0x54d00020, 0x96400004, 0xc8140011, 0x45980004, 0x041c0000, 0xcf00001c, 0xd180001e, 0xd8400021,
-       0xc428000f, 0x9a80ffff, 0x99c00003, 0xc8180011, 0x80000282, 0xc8140011, 0x55580020, 0x80000282,
-       0x45980004, 0xc80c0011, 0xcf00001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8100011,
-       0xc8140011, 0x55580020, 0xd8400013, 0xccc1334e, 0xcd01334f, 0xcd413350, 0xcd813351, 0xd881334d,
-       0xcfc00013, 0xc4193273, 0xc41d3275, 0xc40d3271, 0xc4113270, 0xc4153274, 0x50cc0020, 0x7cd0c01a,
-       0x7cdcc011, 0x05900008, 0xcd00006a, 0xcdc0006b, 0xc41d3272, 0x7d594002, 0x54d00020, 0xd8800013,
-       0xccc12e23, 0xcd012e24, 0xcdc12e25, 0xcfc00013, 0xc4193246, 0xc41d3245, 0xc4313267, 0x15540002,
-       0x51980020, 0x7d9d801a, 0xc81c001f, 0x1b340057, 0x1b280213, 0x1b300199, 0x45980198, 0x7f37000a,
-       0x7f2b000a, 0x55e40020, 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0xcd40000d,
-       0xcd40000a, 0xc40d3249, 0x20cc003c, 0xccc13249, 0xc4113274, 0xdd430000, 0xc01e0001, 0x29dc0002,
-       0x04280000, 0xd8000036, 0xcc400078, 0xcc400078, 0x2d540002, 0x95400022, 0x078c0000, 0x07d40000,
-       0x8c00120d, 0x8c001239, 0x8c001232, 0x04f80000, 0x057c0000, 0xcdc00013, 0xc414000d, 0xc41c0019,
-       0x7dd5c005, 0x25dc0001, 0xd840007c, 0xd8400074, 0xd8400069, 0xc40c005e, 0x94c018a6, 0xd4412e22,
-       0xd800007c, 0xc40c005e, 0x94c018a2, 0x95c00007, 0xc40c0019, 0x7cd4c005, 0x24cc0001, 0x94c00008,
-       0x9680fffc, 0x800002e3, 0xc40c0057, 0x7cd0c002, 0x94c00003, 0x9680fffd, 0x800002e3, 0xd8000069,
-       0xcfc00013, 0xcd013273, 0xcd013275, 0xd8000074, 0xc414005e, 0x9540188f, 0xcfc00013, 0xc40d3249,
-       0xc013cfff, 0x7cd0c009, 0xccc13249, 0x9680000b, 0xc40c0077, 0x38d00001, 0x99000006, 0x04cc0002,
-       0xdcc30000, 0xc40c005e, 0x94c01882, 0xd4400078, 0xd800000d, 0x80000304, 0x7c41c001, 0x7c41c001,
-       0xd840002f, 0xc41c0015, 0x95c0ffff, 0xd8400030, 0xc41c0016, 0x95c0ffff, 0xd8000030, 0xc41c0016,
-       0x99c0ffff, 0xd800002f, 0xc41c0015, 0x99c0ffff, 0xc81c001f, 0x49980198, 0x55e40020, 0x459801a0,
-       0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0x04302000, 0xcfc00013, 0xcf013267,
-       0xc4313267, 0x96800004, 0x97000001, 0xd8000036, 0x80000329, 0xd8800013, 0xcc812e00, 0x04302000,
-       0xcfc00013, 0xcf013267, 0xc4313267, 0x97000001, 0xc4193256, 0xc42d3249, 0x16ec001f, 0xd8000028,
-       0xd800002b, 0x1998003e, 0xcec00031, 0xd8000036, 0xd8000010, 0x97800004, 0xd8400010, 0xce00000a,
-       0x1a18003e, 0xcd800008, 0x90000000, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000,
-       0x7d43c001, 0xcd400013, 0xc4093249, 0x1888003e, 0x94800015, 0xd8400074, 0x8c000671, 0xcd400013,
-       0x9a400006, 0xc419324c, 0x259c0001, 0x1598001f, 0x95c0000d, 0x9580000c, 0x99000003, 0xd8400036,
-       0x04100001, 0xc40c0021, 0x14d80011, 0x24dc00ff, 0x31e00002, 0x31dc0003, 0x9580fff0, 0x9a000003,
-       0x99c00002, 0xd9c00036, 0x94800004, 0xd8000074, 0xc418005e, 0x95801827, 0xcf800008, 0x90000000,
-       0xd8800036, 0x90000000, 0xd8c00036, 0xc424000b, 0x32640002, 0x9a400004, 0xc4180014, 0x9580ffff,
-       0xd840002f, 0xc40c0021, 0x14dc0011, 0x95c0fffe, 0xccc00037, 0x8c000190, 0x90000000, 0xd8400008,
-       0xd800006d, 0xc41d3246, 0xc4193245, 0x51dc0020, 0x7d9d801a, 0xd8400028, 0xd8400029, 0xc420000b,
-       0x32200002, 0x9a0000ad, 0x04200032, 0xd9000010, 0xde030000, 0xd8400033, 0x04080000, 0xc43c0009,
-       0x27fc0002, 0x97c0fffe, 0xc42c0015, 0x96c0ffff, 0xd800002e, 0xc42d3249, 0x1af4003e, 0x9740004d,
-       0xc428000d, 0xc4080060, 0x7ca88005, 0x24880001, 0x7f4b4009, 0x97400046, 0xc4313274, 0xc4100057,
-       0x7d33400c, 0x97400009, 0x28240100, 0x7e6a4004, 0xce400079, 0x1eecffdd, 0xcec13249, 0xcf013273,
-       0xcf013275, 0x800003c3, 0xc429326f, 0x1aa80030, 0x96800006, 0x28240001, 0xc428000d, 0x06a80008,
-       0x7e6a8004, 0xce800035, 0xc41d3272, 0x25cc0001, 0x10cc0004, 0x19e80042, 0x25dc0006, 0x11dc0001,
-       0x7e8e800a, 0x7de9c00a, 0xc40d3271, 0xc4293270, 0x50cc0020, 0x7ce8c01a, 0x7cd30011, 0x11e80007,
-       0x2aa80000, 0xce80001c, 0xd300001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc4300011, 0x1b30003f,
-       0x33300000, 0xc4240059, 0x1660001f, 0x7e320009, 0xc0328000, 0x7e72400a, 0x0430000c, 0x9a000002,
-       0x04300008, 0xc02ac000, 0x7d310002, 0x17300002, 0x2aa87600, 0x7cd0c011, 0xcdc00024, 0xd0c00025,
-       0xce800026, 0x04280222, 0xce800026, 0x96000002, 0xce400026, 0xd8400027, 0xc4280058, 0x22ec003d,
-       0xcec13249, 0xcd013273, 0xce813275, 0xd800007b, 0xc8380018, 0x57b00020, 0x04343108, 0xc429325d,
-       0x040c3000, 0x13740008, 0x2374007e, 0x32a80003, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
-       0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0x94800003, 0xd4400078, 0x800003e7, 0x04200022, 0xde030000,
-       0xccc00024, 0xd1800025, 0xcf400026, 0xd4400026, 0xd8400027, 0x04200010, 0xde030000, 0xccc00024,
-       0x45980104, 0xd1800025, 0xd4400026, 0xcf800026, 0xcf000026, 0xd8400027, 0x49980104, 0x9a80000a,
-       0xc81c001f, 0x45980168, 0x55e00020, 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027,
-       0x800003f2, 0x8c000448, 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xc40d3249,
-       0x18cc003e, 0xd8400030, 0xc42c0016, 0x96c0ffff, 0xd8000030, 0xc42c0016, 0x9ac0ffff, 0xd800002f,
-       0xc42c0015, 0x9ac0ffff, 0xd8400034, 0xc4300025, 0xc4340024, 0xc4380081, 0xcf813279, 0xcf41326e,
-       0xcf01326d, 0x94c0000d, 0x254c0700, 0xc424001e, 0x10cc0010, 0x1a641fe8, 0x28cc0726, 0x2a640200,
-       0xd8400013, 0xccc1237b, 0x2264003f, 0xcd400013, 0xd8813260, 0xce41325b, 0xc4240033, 0xc4280034,
-       0xd9000036, 0xd8000010, 0x8c001427, 0x96400006, 0xde430000, 0xce40000c, 0xc40c005e, 0x94c01755,
-       0xd4400078, 0x9680000a, 0xce80000a, 0x06a80002, 0xd8400010, 0xde830000, 0xce80000d, 0xc40c005e,
-       0x94c0174c, 0xd4400078, 0xd8000010, 0x8c00142b, 0xc4393265, 0x2bb80040, 0xd8400032, 0xcf813265,
-       0xc4200012, 0x9a00ffff, 0xc4100044, 0x19180024, 0xc8100072, 0x551c003f, 0x99c00003, 0x95800010,
-       0x8000043d, 0xc00c8000, 0xd840006c, 0x28200000, 0x8000043f, 0xc00c4000, 0x282000f0, 0xcd400013,
-       0xd8400008, 0xc4113255, 0xcd01324f, 0xd8400013, 0xd88130b8, 0xccc130b5, 0xce000053, 0x90000000,
-       0x195c00e8, 0xc4100004, 0x2555fff0, 0xc0360001, 0x042c0000, 0x29540001, 0xd8400008, 0x04240000,
-       0x04280004, 0xc420000b, 0x32200002, 0x9a000009, 0xcd400013, 0xcec1c200, 0xc5e124dc, 0x0aa80001,
-       0x7ef6c001, 0x7e624001, 0x96000001, 0x9a80fff9, 0xc02ee000, 0xcd400013, 0x2555fff0, 0xcec1c200,
-       0x29540008, 0xc81c001f, 0xcd400013, 0x55e00020, 0xc42d3255, 0xc4353259, 0xd8013260, 0x45980158,
-       0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, 0x49980158, 0x45980170, 0xc4200012,
-       0x16200010, 0x9a00fffe, 0xccc00024, 0xd1800025, 0xc429324f, 0xce400026, 0xce800026, 0xcec00026,
-       0xcf400026, 0xd8400027, 0xcd000008, 0x90000000, 0xc40d325b, 0x7d43c001, 0x195400e8, 0x1154000a,
-       0x18dc00e8, 0x05e80488, 0x18d0006c, 0x18f807f0, 0x18e40077, 0x18ec0199, 0x7e6e400a, 0x86800000,
-       0x8000048e, 0x80000494, 0x800004de, 0x80000685, 0x80000686, 0x800006ac, 0x1ccc001f, 0xccc1325b,
-       0xc411325d, 0x251001ef, 0xcd01325d, 0x90000000, 0xc4293254, 0x1264000a, 0xc4300004, 0x7d79400a,
-       0x7e7a400a, 0x52a8001e, 0x15180001, 0x7d69401a, 0x202c007d, 0xcec1325b, 0x95000008, 0x95800028,
-       0xc42d3267, 0xc4193246, 0xc41d3245, 0x1aec0028, 0xc40d325c, 0x800004cc, 0xc42d3256, 0xc419324e,
-       0x26e8003f, 0x1aec003e, 0x12f4000e, 0xc41d324d, 0xc40d324f, 0x7d75401a, 0x04100002, 0x7d290004,
-       0x7f8f4001, 0x7f52800f, 0x51980020, 0x7d9d801a, 0x50e00002, 0x51980008, 0x9a800002, 0x800004d1,
-       0x7d0dc002, 0x6665fc00, 0x7e5e401a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000,
-       0xce400002, 0x7f534002, 0x6665fc00, 0x7e76401a, 0xd1800002, 0xce400002, 0x800004d7, 0xc42d325a,
-       0xc4193258, 0x1aec003e, 0xc41d3257, 0xc4213259, 0x12f4000e, 0x7d75401a, 0x51980020, 0x52200002,
-       0x7d9d801a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, 0xce400002, 0x202c003d,
-       0xcf000008, 0xcfc00013, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x90000000, 0xc4193260, 0x259c0007,
-       0x15980004, 0x05e804e3, 0x86800000, 0x800004e7, 0x800004f0, 0x80000505, 0x8000016a, 0xc4380004,
-       0xcfc00013, 0xd8400008, 0xc435325d, 0xd801325b, 0x277401ef, 0xcf41325d, 0xcf800008, 0x90000000,
-       0xc4380004, 0xd8400008, 0x8c000671, 0x9640fff4, 0x17e00008, 0xc418000d, 0xce000009, 0xd84131db,
-       0xcf800008, 0xcd800009, 0xc430001e, 0xcfc00013, 0xc42d325b, 0x1b301ff8, 0x2b300400, 0x2330003f,
-       0x26edf000, 0x7ef2c00a, 0xd8413260, 0xcec1325b, 0x90000000, 0x05a80507, 0x86800000, 0x8000050c,
-       0x80000528, 0x8000057d, 0x800005c2, 0x800005f3, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013,
-       0x9a400012, 0x1bd400e8, 0xc42c004a, 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000c, 0xc4100019,
-       0x7d150005, 0x25100001, 0x99000008, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277,
-       0xd801326f, 0x80000624, 0x04240012, 0x1be00fe4, 0xce413260, 0xce000066, 0xcf800008, 0x90000000,
-       0xd8400068, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, 0x9a400013, 0x1bd400e8, 0xc42c004a,
-       0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000d, 0xc4100019, 0x7d150005, 0x25100001, 0x99000009,
-       0xd8400067, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, 0xd801326f, 0x80000624,
-       0x1bd400e8, 0xc42c0060, 0x7ed6c005, 0x26ec0001, 0xc4113271, 0xc4153270, 0xc4193272, 0xc41d3273,
-       0x04280022, 0x51100020, 0x7d51401a, 0xc4113274, 0xc4213275, 0xc4253276, 0xc4313248, 0xd1400061,
-       0x2730000f, 0x13300010, 0x7db1800a, 0xcd800060, 0x96c00002, 0x05dc0008, 0xcdc00062, 0x042c3000,
-       0xcd000063, 0xce000064, 0xce400065, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xce813260,
-       0x52ec0020, 0x7ef2c01a, 0xc820001f, 0x1b700057, 0x1b680213, 0x1b740199, 0x46ec0188, 0x7f73400a,
-       0x7f6b400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
-       0xc418000d, 0x17e00008, 0xce000009, 0xcec13267, 0xc42d3267, 0x26e01000, 0x9a00fffe, 0xd8400013,
-       0xd9c131fc, 0xcd800009, 0xcf800008, 0x96c00001, 0x90000000, 0xc4380004, 0xd8400008, 0xc4113277,
-       0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0x29dc0001, 0x25140001, 0x191807e4,
-       0x192007ec, 0x95400004, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x9580000e, 0x09980001, 0x041c0001,
-       0x95800005, 0x09980001, 0x51dc0001, 0x69dc0001, 0x9980fffd, 0x7de20014, 0x561c0020, 0xd8400013,
-       0xce013344, 0xcdc13345, 0xcfc00013, 0x95400022, 0x042c3000, 0xcec13267, 0xc42d3246, 0xc4313245,
-       0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, 0xc419334e, 0xc41d334f, 0xc4213350,
-       0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, 0x1b740199, 0x46ec01b0, 0x7f6b400a,
-       0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, 0xcdc00026, 0xce000026, 0xce400026,
-       0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, 0x04280032, 0xce813260, 0xd8800068,
-       0xcf800008, 0x90000000, 0xc4380004, 0xd8400008, 0x2010007d, 0xcd01325b, 0xc411325b, 0x1910003e,
-       0x9500fffe, 0x04100040, 0xcd00001b, 0xd8400021, 0xc410000f, 0x9900ffff, 0x04100060, 0xcd00001b,
-       0xd8400021, 0xc410000f, 0x9900ffff, 0xcfc00013, 0x2010003d, 0xcd01325b, 0xc4113277, 0x25140001,
-       0x191807e4, 0x9540000b, 0x2511fffd, 0xcd013277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
-       0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x95800005, 0xd8400013, 0xd8013344, 0xd8013345,
-       0xcfc00013, 0xc4180050, 0xc41c0052, 0x04280042, 0xcd813273, 0xcdc13275, 0xce813260, 0xd9000068,
-       0xd8400067, 0xcf800008, 0x90000000, 0x07d40000, 0x8c00120d, 0x8c00124f, 0x8c001232, 0x057c0000,
-       0x042c3000, 0xc4380004, 0xcfc00013, 0xd8400008, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267,
-       0x52ec0020, 0x7ef2c01a, 0x1b680057, 0x1b700213, 0x1b740199, 0xc820001f, 0x46ec0190, 0x7f6b400a,
-       0x7f73400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
-       0xcfc00013, 0xcec13267, 0xc4153249, 0x2154003d, 0xc41c0019, 0x1bd800e8, 0x7dd9c005, 0x25dc0001,
-       0xc42c004a, 0xcd80005e, 0xc420004d, 0xcec0005e, 0x11dc0010, 0x7e1e000a, 0xcd413249, 0xce01326f,
-       0x28340001, 0x05980008, 0x7f598004, 0xcd800035, 0x1be800e8, 0xc42c004a, 0xce80005e, 0xd801327a,
-       0xd800005f, 0xd8000075, 0xd800007f, 0xc424004c, 0xce41326e, 0xcec0005e, 0x28240100, 0x7e6a4004,
-       0xce400079, 0xc435325d, 0x277401ef, 0x04240020, 0xce41325e, 0xd801325b, 0xd8013260, 0xcf41325d,
-       0xda000068, 0xcf800008, 0x90000000, 0xc4113277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
-       0x11dc0008, 0x29dc0001, 0x25140001, 0x9540002d, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x042c3000,
-       0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe,
-       0xc419334e, 0xc41d334f, 0xc4213350, 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213,
-       0x1b740199, 0x46ec01b0, 0x7f6b400a, 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026,
-       0xcdc00026, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001,
-       0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013,
-       0x90000000, 0xc430000b, 0x33300002, 0x04240000, 0x9b000010, 0x1be000e8, 0x042c0000, 0xc0360001,
-       0x04280004, 0xd8400013, 0xcec1c200, 0xc63124dc, 0x0aa80001, 0x7ef6c001, 0x7e724001, 0x97000001,
-       0x9a80fff9, 0xc02ee000, 0xd8400013, 0xcec1c200, 0x90000000, 0x90000000, 0xc4253260, 0x7fc14001,
-       0xc40d3249, 0x18cc003e, 0x98c00005, 0x194c1c03, 0xccc0003b, 0xc40c002d, 0x80000697, 0xc420004a,
-       0x194c00e8, 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x98c00003,
-       0x8c0007e0, 0x95c00008, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, 0xcf01325b,
-       0x90000000, 0xcd400013, 0xd801325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x25100007, 0x31100005,
-       0x9900008e, 0xc40c0007, 0xd9000010, 0x8000075e, 0x202c007d, 0xcec1325b, 0xc4293265, 0xc4353254,
-       0x26a9feff, 0xc4380004, 0xd8400008, 0x1374000b, 0xc40c000d, 0xd8000009, 0x1774000d, 0xd8400013,
-       0xc41d30b8, 0xcfc00013, 0x95c00008, 0xc411325d, 0xd801325b, 0xccc00009, 0xcf800008, 0x251001ef,
-       0xcd01325d, 0x90000000, 0xce813265, 0xcf400100, 0xc00ac006, 0xc00e0000, 0x28880700, 0x28cc0014,
-       0x8c0006de, 0x14cc0010, 0x30d4000f, 0x04cc0001, 0x10cc0010, 0x28cc0014, 0x99400009, 0xd8400013,
-       0xc41530b8, 0xcfc00013, 0xc4193265, 0x19980028, 0x99400003, 0x99800002, 0x800006c8, 0xcfc00013,
-       0xc411325d, 0xd801325b, 0xcf800008, 0x251001ef, 0xcd01325d, 0x90000000, 0x15600008, 0xce000009,
-       0xc8380023, 0xc4180081, 0x11a00002, 0x7fa38011, 0xc4100026, 0x05980008, 0x7d1a0002, 0x282c2002,
-       0x3e280008, 0xcec00013, 0xc4300027, 0x042c0008, 0xd3800025, 0xcf000024, 0x202400d0, 0x7ca48001,
-       0xcc800026, 0xccc00026, 0x28240006, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800004, 0x32280000,
-       0x9a800002, 0x9a000000, 0xd8400027, 0x24d8003f, 0xd840003c, 0xcec0003a, 0xd8800013, 0xcd81a2a4,
-       0x90000000, 0xc41d325d, 0x25dc0007, 0xc40d3249, 0x18cc003e, 0x94c0000a, 0xc420004a, 0x194c00e8,
-       0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x80000712, 0x194c1c03,
-       0xccc0003b, 0xc40c002d, 0x05e80714, 0x86800000, 0x8000071c, 0x80000720, 0x80000747, 0x8000071d,
-       0x800007c4, 0x80000732, 0x80000745, 0x80000744, 0x90000000, 0x98c00006, 0x8000072e, 0x90000000,
-       0x98c00003, 0x8c0007e0, 0x95c0000c, 0xcd400013, 0xc4253265, 0x2a64008c, 0xce413265, 0xc430001e,
-       0x1b301fe8, 0x2b300400, 0x2330003f, 0xd8013260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010,
-       0x04240000, 0x8000075e, 0x98c0fff1, 0x8c0007e0, 0x95c00002, 0x80000723, 0xcd400013, 0xc41f02f1,
-       0x95c00004, 0xd8013247, 0xd801325d, 0x80000743, 0xd8813247, 0xd801325d, 0xc4100004, 0xd8400008,
-       0xd8400013, 0xd88130b8, 0xcd000008, 0x90000000, 0x04100001, 0x98c0ffde, 0x8000072e, 0x98c00003,
-       0x8c0007e0, 0x95c00012, 0xc4340004, 0xd8400008, 0x15600008, 0xc418000d, 0xce000009, 0xd8400013,
-       0xd84131db, 0xcf400008, 0xcd800009, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013,
-       0xd8413260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, 0x04240000, 0xcd400013, 0x041c3000,
-       0xcdc13267, 0xc41d3267, 0xc41d3265, 0x25dc8000, 0x95c00007, 0xc41c004a, 0x195800e8, 0xcd80005e,
-       0xc418004c, 0xcd81326e, 0xcdc0005e, 0xc41d3265, 0x25dd7fff, 0xcdc13265, 0xc41d3246, 0xc4193245,
-       0xc42d3267, 0x51e00020, 0x7e1a001a, 0x46200200, 0x04283247, 0x04300033, 0x1af80057, 0x1af40213,
-       0x042c000c, 0x7f7b400a, 0x7f6f400a, 0xcf400024, 0xd2000025, 0xcd800026, 0xcdc00026, 0xc6990000,
-       0x329c325d, 0x99c00008, 0x329c3269, 0x99c00006, 0x329c3267, 0x95c00005, 0xc01defff, 0x7d9d8009,
-       0x8000078a, 0x25980000, 0x0b300001, 0x06a80001, 0xcd800026, 0x9b00fff2, 0xd8400027, 0xc43c0012,
-       0x9bc0ffff, 0xcd400013, 0xd801325b, 0xc431325a, 0xc03e7ff0, 0x7f3f0009, 0xcf01325a, 0xc4313249,
-       0x1f30001f, 0xcf013249, 0xc03e4000, 0xcfc13254, 0xcd400013, 0xd8013254, 0xc431325d, 0xd801324f,
-       0xd8013255, 0xd8013247, 0xd801325d, 0x1b300028, 0x8c00120d, 0x8c001219, 0x8c001232, 0xc4380004,
-       0xd8400008, 0xd8400013, 0x9900000d, 0xd88130b8, 0x9700000b, 0xc43d30b5, 0x1bf0003a, 0x9b000b80,
-       0x203c003a, 0xc430000e, 0x27300700, 0x13300014, 0x2b300001, 0xcf0130b7, 0xcfc130b5, 0x46200008,
-       0xcf400024, 0xd2000025, 0xd8000026, 0xd8400027, 0x043c2000, 0xcd400013, 0xcfc13267, 0xc43d3267,
-       0x9bc00001, 0xccc00010, 0xcf800008, 0x90000000, 0xc4080007, 0xd9000010, 0xc4193260, 0x259c0003,
-       0x31dc0003, 0x95c00014, 0x040c3000, 0xd8400008, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
-       0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0xc4193246, 0xc41d3245, 0x51980020, 0x7d9d801a, 0x8c000448,
-       0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xcc800010, 0xd801325d, 0x90000000,
-       0xc418000b, 0x31980002, 0x041c0000, 0x9980001c, 0x19580066, 0x15600008, 0x040c0000, 0xc0120001,
-       0x11980003, 0x04240004, 0x7da18001, 0xc4200007, 0xc4340004, 0xd9000010, 0xd8400008, 0xd8400013,
-       0xccc1c200, 0xc41d24db, 0x7cd0c001, 0x0a640001, 0x7dd9c005, 0x25dc0001, 0x99c00002, 0x9a40fff8,
-       0xc418005e, 0x9580137b, 0xc00ee000, 0xd8400013, 0xccc1c200, 0xce000010, 0xcf400008, 0x90000000,
-       0xd840004f, 0xc4113269, 0x19080070, 0x190c00e8, 0x2510003f, 0x2518000f, 0xcd813268, 0x05a80809,
-       0x86800000, 0x8000080e, 0x8000080f, 0x80000898, 0x80000946, 0x800009e1, 0x80000a5a, 0x04a80811,
-       0x86800000, 0x80000815, 0x80000834, 0x8000085e, 0x8000085e, 0x04341001, 0xcf400013, 0xc4380004,
-       0xd8400008, 0xc42d3045, 0xcec1c091, 0x31300021, 0x9700000b, 0xd84002f1, 0xd8400013, 0xc43130b8,
-       0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, 0xcf800008, 0x9b000241, 0x8000084a, 0xcf400013,
-       0xd8400008, 0xc43130b6, 0x9b000003, 0xc02f0001, 0xcec130b6, 0xc4252087, 0x5668001a, 0x26a80005,
-       0x9a80fffd, 0xcf400013, 0xd80130b6, 0x8000084a, 0xc4380004, 0xd8400008, 0x04341001, 0xcf400013,
-       0xc431ecaa, 0x27300080, 0x9b000010, 0xc02e0001, 0xcec130b6, 0xcf400013, 0xd80130b6, 0x31300021,
-       0x9700000a, 0xd84002f1, 0xd8400013, 0xc43130b8, 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a,
-       0xcf800008, 0x9b00021d, 0xdd410000, 0x040c0005, 0xd84802e9, 0x8c001a41, 0xc43b02f1, 0x9b800006,
-       0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0xcf800008, 0xcec80278, 0x56f00020, 0xcf080280,
-       0x8c001608, 0xdc140000, 0xcd400013, 0xd8813247, 0xd80802e9, 0x8000085e, 0xcd400013, 0x31100011,
-       0x950001fa, 0xc02e0001, 0x2aec0008, 0xc01c0020, 0xc0180001, 0xc00c0007, 0x11a40006, 0x7de6000a,
-       0x10e40008, 0x7e26000a, 0x7e2e000a, 0xce000013, 0xc4113254, 0x1d10ffdf, 0x2110003e, 0xcd013254,
-       0xd801324f, 0xd8013255, 0x1d10ff9e, 0xcd013254, 0xd8013247, 0xd801325d, 0xd801325e, 0xc0245301,
-       0xce413249, 0xd801325f, 0xc425326c, 0xc0121fff, 0x29108eff, 0x7e524009, 0xce41326c, 0xc425325a,
-       0xc0127ff0, 0x7e524009, 0xce41325a, 0xc425325b, 0xc0131fff, 0x7e524009, 0xce41325b, 0xd801326d,
-       0xd801326e, 0xd8013279, 0x94c00003, 0x08cc0001, 0x80000866, 0xc00c0007, 0x95800003, 0x09980001,
-       0x80000866, 0xc0100010, 0x7dd2400c, 0x9a400004, 0xc0180003, 0x7dd1c002, 0x80000866, 0x80000a5a,
-       0x04a8089a, 0x86800000, 0x8000089e, 0x800008fa, 0x80000945, 0x80000945, 0x31300022, 0x97000007,
-       0xc4380004, 0xd8400008, 0xd8400013, 0xc43130b8, 0x27300001, 0xcf800008, 0xcd400013, 0x04183000,
-       0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
-       0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
-       0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000036, 0x45980008, 0xd180001e,
-       0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002f, 0xc43c0004, 0xd8400008, 0xd8400013,
-       0x13b80001, 0xc79d3300, 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e,
-       0x964012a4, 0x7c028009, 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x800008d2,
-       0xc4180006, 0x9980ffff, 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001,
-       0x9980fffd, 0xc02620c0, 0xce41c078, 0xce81c080, 0xcc01c081, 0xcf01c082, 0x57240020, 0xce41c083,
-       0xc0260400, 0x7e6e400a, 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x800008d2, 0xc4180006, 0x9980ffff,
-       0xcdf93300, 0xce393301, 0xcfc00008, 0xcd400013, 0xc43c0004, 0xd8400008, 0x04182000, 0xcd813267,
-       0xcfc00008, 0x80000903, 0x31240022, 0x96400008, 0x04100001, 0xc4380004, 0xd8400008, 0xd8400013,
-       0xc43130b8, 0x27300001, 0xcf800008, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x7ec30011,
-       0x32f80000, 0x9b800011, 0x043c0020, 0x04280000, 0x67180001, 0x0bfc0001, 0x57300001, 0x95800006,
-       0x8c001628, 0x9a400003, 0xd981325d, 0x80000915, 0xd9c1325d, 0x06a80001, 0x9bc0fff6, 0x7f818001,
-       0x8c001606, 0x7d838001, 0x94800010, 0xcd400013, 0xc41d3259, 0xc421325a, 0x16240014, 0x12640014,
-       0x1a2801f0, 0x12a80010, 0x2620ffff, 0x7e2a000a, 0x7de1c001, 0x7e5e400a, 0x9b800002, 0x2264003f,
-       0xce41325a, 0xd8013259, 0xc40c0007, 0xd9000010, 0x8c00075e, 0xc4af0228, 0x043c0000, 0x66d80001,
-       0x95800010, 0x04300002, 0x1330000d, 0x13f40014, 0x7f73400a, 0xcf400013, 0x04380040, 0xcf80001b,
-       0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff,
-       0x07fc0001, 0x56ec0001, 0x33e80010, 0x9680ffec, 0x80000a5a, 0x80000a5a, 0x04a80948, 0x86800000,
-       0x8000094c, 0x8000099b, 0x800009e0, 0x800009e0, 0xc43c0004, 0xd8400008, 0xcd400013, 0x04183000,
-       0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
-       0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
-       0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000033, 0x45980008, 0xd180001e,
-       0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002c, 0xd8400013, 0x13b80001, 0xc79d3300,
-       0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, 0x964011fe, 0x7c028009,
-       0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x80000978, 0xc4180006, 0x9980ffff,
-       0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, 0x9980fffd, 0xc0260010,
-       0xce41c078, 0xcf01c080, 0x57240020, 0xce41c081, 0xce81c082, 0xcc01c083, 0xc0260800, 0x7e6e400a,
-       0xce41c084, 0x7eae8001, 0x7f2f0011, 0x80000978, 0xc4180006, 0x9980ffff, 0xcdf93300, 0xce393301,
-       0x04182000, 0xcd813267, 0xcfc00008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, 0x7dda801a,
-       0x7d41c001, 0x7e838011, 0xd84802e9, 0x8c001802, 0x469c0390, 0xc4313267, 0x04183000, 0xcd813267,
-       0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
-       0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, 0x45dc0004, 0xd1c0001e,
-       0xd8400021, 0xc418000f, 0x9980ffff, 0xc4240011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
-       0x9980ffff, 0xc4280011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc42c0011,
-       0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4300011, 0x45dc0004, 0xd1c0001e,
-       0xd8400021, 0xc418000f, 0x9980ffff, 0xc4340011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
-       0x9980ffff, 0xc4380011, 0xcd400013, 0x04182000, 0xcd813267, 0x043c0001, 0x8c0014df, 0x80000a5a,
-       0x80000a5a, 0x31280014, 0xce8802ef, 0x9a800062, 0x31280034, 0x9a800060, 0x04a809e8, 0x86800000,
-       0x800009ec, 0x80000a45, 0x80000a59, 0x80000a59, 0xcd400013, 0xc4113246, 0xc4193245, 0x51100020,
-       0x7d91801a, 0x45980400, 0xc4b30258, 0xc4a70250, 0x53300020, 0x7e72401a, 0xc4313267, 0x1b342010,
-       0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0x042c0020,
-       0x66740001, 0x97400041, 0xcd400013, 0x04383000, 0xcf813267, 0xc4393267, 0x9b800001, 0xd180001e,
-       0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4300011, 0x1b38007e, 0x33b40003, 0x9b400003, 0x4598001c,
-       0x9740002f, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc40c0011, 0x45980004,
-       0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x45980004, 0xd180001e, 0xd8400021,
-       0xc438000f, 0x9b80ffff, 0xc4340011, 0xcf4002eb, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f,
-       0x9b80ffff, 0xc4340011, 0xcf4002ec, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff,
-       0xc4340011, 0xcf4002ed, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4340011,
-       0xcf4002ee, 0x45980004, 0xcd400013, 0x04382000, 0xcf813267, 0xd84802e9, 0x8c001715, 0xcd400013,
-       0x04382000, 0xcf813267, 0x56640001, 0x0aec0001, 0x9ac0ffbc, 0xc4380004, 0xd8400008, 0x04341001,
-       0xcf400013, 0x94800005, 0xc431ecaa, 0x27300080, 0x97000002, 0x80000a55, 0xc43130b6, 0x233c0032,
-       0xcfc130b6, 0xcf400013, 0xcf0130b6, 0xc49302ef, 0x99000003, 0xcd400013, 0xd8413247, 0xcf800008,
-       0x80000a5a, 0x80000a5a, 0xcd400013, 0x04180001, 0x5198001f, 0xcd813268, 0xc4193269, 0x2598000f,
-       0x9980fffe, 0xd80002f1, 0xcd400013, 0xd8013268, 0xd800004f, 0x90000000, 0xcd400013, 0x04380001,
-       0x53b8001f, 0x7db9801a, 0xcd813268, 0x80000a5e, 0xd8400029, 0xc40c005e, 0x94c01106, 0xd8800013,
-       0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xd8400029, 0xc40c005e, 0x94c010fd,
-       0x7c40c001, 0x50640020, 0x7ce4c01a, 0xd0c00072, 0xc80c0072, 0x58e801fc, 0x12a80009, 0x2aa80000,
-       0xd0c0001e, 0xce80001c, 0xd8400021, 0xc424000f, 0x9a40ffff, 0x04240010, 0x18dc01e2, 0x7e5e4002,
-       0x3e5c0003, 0x3e540002, 0x95c00006, 0xc8180011, 0xc8100011, 0xc8100011, 0x55140020, 0x80000aa2,
-       0x9540000a, 0xc8180011, 0x44cc0008, 0x55900020, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
-       0xc4140011, 0x80000aa2, 0x44cc0004, 0xc4180011, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
-       0xc8100011, 0x55140020, 0xd8800013, 0xcd812e01, 0xcd012e02, 0xcd412e03, 0xcc412e00, 0xc428000e,
-       0x2aa80008, 0xce800013, 0xc4253249, 0x2264003f, 0xce413249, 0xce800013, 0xc4253249, 0x96400001,
-       0xd800002a, 0xc410001a, 0xc40c0021, 0xc4140028, 0x95000005, 0x1e64001f, 0xce800013, 0xce413249,
-       0x80001b70, 0x14d00010, 0xc4180030, 0xc41c0007, 0x99000004, 0x99400009, 0x9980000c, 0x80000ab1,
-       0xccc00037, 0x8c000190, 0xc420001c, 0xd8000032, 0x9a0010ac, 0x80000aa7, 0xd880003f, 0x95c00002,
-       0xd8c0003f, 0x80001082, 0xd8800040, 0x95c00002, 0xd8c00040, 0x800010de, 0xc010ffff, 0x18d403f7,
-       0x7d0cc009, 0xc41b0367, 0x7d958004, 0x7d85800a, 0xdc1e0000, 0x90000000, 0xc424000b, 0x32640002,
-       0x7c40c001, 0x18d001fc, 0x05280adc, 0x86800000, 0x80000af1, 0x80000adf, 0x80000ae7, 0x8c000ace,
-       0xd8c00013, 0x96400002, 0xd8400013, 0xcd8d2000, 0x99c00010, 0x7c408001, 0x88000000, 0x18d803f7,
-       0xc010ffff, 0x7d0cc009, 0x04140000, 0x11940014, 0x29544001, 0x9a400002, 0x29544003, 0xcd400013,
-       0x80000af4, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44d2000, 0x7c408001, 0x88000000, 0xc424000b,
-       0x32640002, 0x7c40c001, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44dc000, 0x7c408001, 0x88000000,
-       0x7c40c001, 0x18d0003c, 0x95000006, 0x8c000ace, 0xd8800013, 0xcd8d2c00, 0x99c00003, 0x80000b0a,
-       0xd8800013, 0xd44d2c00, 0x7c408001, 0x88000000, 0x7c40c001, 0x28148004, 0x24d800ff, 0xccc00019,
-       0xcd400013, 0xd4593240, 0x7c408001, 0x88000000, 0xd8400029, 0xc40c005e, 0x94c0105e, 0x7c410001,
-       0x50540020, 0x7c418001, 0x2198003f, 0x199c0034, 0xc40c0007, 0x95c00028, 0xc428000e, 0x2aa80008,
-       0xce800013, 0xc42d324f, 0xc4313255, 0x7ef3400c, 0x9b400021, 0xd800002a, 0x80001b70, 0xc40c0007,
-       0x14e80001, 0x9a8000af, 0xd9000010, 0x041c0002, 0x042c01c8, 0x8c000d61, 0xccc00010, 0xd8400029,
-       0xc40c005e, 0x94c01043, 0x7c410001, 0x50540020, 0x7c418001, 0x18a01fe8, 0x3620005c, 0x9a00000e,
-       0x2464003f, 0xd8400013, 0xc6290ce7, 0x16ac001f, 0x96c00004, 0x26ac003f, 0x7ee6c00d, 0x96c00005,
-       0x06200001, 0x2620000f, 0x9a00fff8, 0x8000016a, 0xce000367, 0xc424005e, 0x9640102e, 0xc428000e,
-       0x199c0037, 0x19a00035, 0x2aa80008, 0xce800013, 0x95c0005d, 0xd800002a, 0xc42d3256, 0xc431325a,
-       0x2330003f, 0x16f8001f, 0x9780000d, 0xc4253248, 0xc035f0ff, 0x7e764009, 0x19b401f8, 0x13740008,
-       0x7e76400a, 0xce800013, 0xce413248, 0xcf01325a, 0xce800013, 0xc431325a, 0x97000001, 0x7d15001a,
-       0xd1000072, 0xc8100072, 0x55140020, 0x199c0034, 0xd8400010, 0xd8400029, 0x9b800004, 0x1ae4003e,
-       0xce400008, 0x80000b7c, 0xc4353254, 0x16a80008, 0x1aec003c, 0x19a4003f, 0x12a80015, 0x12ec001f,
-       0x1374000b, 0x7eae800a, 0xc02e4000, 0x1774000d, 0x7eae800a, 0xce400008, 0x7f6b400a, 0x95c00005,
-       0xc43d3248, 0x1bfc01e8, 0x13fc0018, 0x7dbd800a, 0x1d98ff15, 0x592c00fc, 0xcd80000a, 0x12e00016,
-       0x7da1800a, 0x592c007e, 0x12e00015, 0x7da1800a, 0xd1000001, 0xcd800001, 0x11a0000c, 0x1264001e,
-       0x1620000c, 0x7e26000a, 0x7e32000a, 0x12e4001b, 0x7e26000a, 0x5924007e, 0x12640017, 0x7e26000a,
-       0x19a4003c, 0x12640018, 0x7e26000a, 0xd800002a, 0xce01325a, 0xcd013257, 0xcd413258, 0xc429325a,
-       0xc40c005e, 0x94c00fdb, 0x96800001, 0x95c00003, 0x7c40c001, 0x7c410001, 0x9780f5ca, 0xcf400100,
-       0xc40c0007, 0xd9000010, 0x8c00120d, 0x8c001219, 0x8c001232, 0xccc00010, 0x8c001b6d, 0x7c408001,
-       0x88000000, 0xc42d324e, 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x52ec0008,
-       0x07740003, 0x04240002, 0x269c003f, 0x7e5e4004, 0x7f67000f, 0x97000003, 0x7f674002, 0x0b740001,
-       0x53740002, 0x7ef6c011, 0x1ab42010, 0x1ab8c006, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f7b400a,
-       0x7f6b400a, 0xcf40001c, 0xd2c0001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4180011, 0x9a000003,
-       0x8c000bec, 0x80000b47, 0xc42c001d, 0xc4313256, 0x1b34060b, 0x1b300077, 0x7f37000a, 0x13300017,
-       0x04340100, 0x26ec00ff, 0xc03a8004, 0x7ef6c00a, 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16,
-       0xc40c0032, 0xc410001d, 0x28cc0008, 0xccc00013, 0xc415325b, 0x7c418001, 0x7c418001, 0x18580037,
-       0x251000ff, 0xc421325d, 0x262001ef, 0xce01325d, 0x99800004, 0x7d15400a, 0xcd41325b, 0x80000168,
-       0x1d54001f, 0xcd41325b, 0x7c408001, 0x88000000, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
-       0x7eae800a, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0xcd280200, 0xcd680208,
-       0xcda80210, 0x9b00000c, 0x9b400014, 0x9b800017, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
-       0x7eae800a, 0xc6930200, 0xc6970208, 0xc69b0210, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037,
-       0x8c000190, 0xd8000032, 0x90000000, 0xd8000028, 0xd800002b, 0x80000168, 0xd900003f, 0x97c00002,
-       0xd940003f, 0x80001082, 0xd9000040, 0x97c00002, 0xd9400040, 0x800010de, 0xc40c0021, 0x14fc0011,
-       0x24f800ff, 0x33b80001, 0x97c0fffc, 0x9b800007, 0xccc00037, 0x8c000190, 0xd8000032, 0xd8000028,
-       0xd800002b, 0x80001b70, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, 0x04140000,
-       0xc418000e, 0x29980008, 0x7d83c001, 0xcd800013, 0xc4093249, 0x1888003e, 0x94800020, 0xd8400074,
-       0x8c000671, 0x9a400009, 0xc418000e, 0x29980008, 0xcd800013, 0xc419324c, 0x259c0001, 0x1598001f,
-       0x95c00016, 0x95800015, 0x99000003, 0xd8400036, 0x04100001, 0xc40c0021, 0x14d80011, 0x24e000ff,
-       0x321c0002, 0x32200001, 0x9580ffee, 0x99c00014, 0x96000004, 0xccc00037, 0x04140001, 0x80000c30,
-       0x9480000a, 0xd8000074, 0xc418005e, 0x95800f29, 0xcf800008, 0x80000c16, 0x94800004, 0xd8000074,
-       0xc418005e, 0x95800f23, 0xd9c00036, 0x99400002, 0xccc00037, 0xcf800008, 0x80000c16, 0x94800004,
-       0xd8000074, 0xc418005e, 0x95800f1a, 0xccc00037, 0xd8800036, 0x80001b70, 0x041c0003, 0x042c01c8,
-       0x8c000d61, 0xc4200007, 0xc40c0077, 0x94c00001, 0x7c418001, 0xc428000e, 0x9600f502, 0x0a200001,
-       0x98c0f500, 0x2aa80008, 0xce000010, 0x9a000f05, 0xce800013, 0xc431325a, 0xc42d3256, 0x1f30001f,
-       0x16e4001f, 0xcf01325a, 0xc431325a, 0x97000001, 0x9640f4f4, 0xc434000b, 0x33740002, 0x9b40f4f1,
-       0xc4353254, 0x16a80008, 0x1aec003c, 0x12a80015, 0x12ec001f, 0x1374000b, 0x7eae800a, 0xc02e4000,
-       0x1774000d, 0x7eae800a, 0x7f6b400a, 0xcf400100, 0x12780001, 0x2bb80001, 0xc00ac005, 0xc00e0002,
-       0x28cc8000, 0x28884900, 0x28cc0014, 0x80000ff3, 0xc43c0007, 0x7c40c001, 0x17fc0001, 0xd8400013,
-       0x9bc00004, 0xd8400029, 0xc424005e, 0x96400ee1, 0xcc41c40a, 0xcc41c40c, 0xcc41c40d, 0x7c414001,
-       0x24d0007f, 0x15580010, 0x255400ff, 0xcd01c411, 0xcd81c40f, 0xcd41c40e, 0xcc41c410, 0x7c414001,
-       0x7c418001, 0x04200000, 0x18e80033, 0x18ec0034, 0xcc41c414, 0xcc41c415, 0xcd81c413, 0xcd41c412,
-       0x18dc0032, 0x7c030011, 0x7c038011, 0x95c00027, 0x96c00002, 0xc431c417, 0xc435c416, 0x96800004,
-       0x96c00002, 0xc439c419, 0xc43dc418, 0xc41c000e, 0x29dc0008, 0xcdc00013, 0xcf413261, 0x96c00002,
-       0xcf013262, 0x96800004, 0xcfc13263, 0x96c00002, 0xcf813264, 0x18dc0030, 0xc43c0007, 0x95c00017,
-       0x17fc0001, 0x9ac00005, 0x7d77000c, 0x9bc00015, 0x9700000a, 0x80000cd6, 0x51b80020, 0x53300020,
-       0x7f97801a, 0x7f37001a, 0x7f3b000c, 0x9bc0000d, 0x97800002, 0x80000cd6, 0x9a000018, 0xd8400013,
-       0x28200001, 0x80000ca7, 0x18dc0031, 0x95c00003, 0xc435c40b, 0x9740fffd, 0xd800002a, 0x80001b70,
-       0xc4280032, 0x2aa80008, 0xce800013, 0xc40d325b, 0x97000002, 0x800012c2, 0xc438001d, 0x1bb81ff0,
-       0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0xc428000e, 0xc43c0007,
-       0x2aa80008, 0xc438001d, 0xce800013, 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077,
-       0x7ff3c00a, 0x80000cf4, 0xc43d325a, 0x1bfc0677, 0x13fc0017, 0x04300100, 0x1bb81fe8, 0x7f73400a,
-       0xc032800b, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, 0x80000c16, 0xc43c0007, 0x7c40c001,
-       0x18d42011, 0x17fc0001, 0x18d001e8, 0x24cc007f, 0x7cd4c00a, 0x9bc00004, 0xd8400029, 0xc428005e,
-       0x96800e6c, 0x7c414001, 0x50580020, 0x7d59401a, 0xd1400072, 0xc8140072, 0x596001fc, 0x12200009,
-       0x7ce0c00a, 0x7c418001, 0x505c0020, 0x7d9d801a, 0x7c41c001, 0x50600020, 0x7de1c01a, 0x7c420001,
-       0xccc0001b, 0xd140001d, 0xd180001f, 0xd1c00020, 0xd8400021, 0x95000010, 0x04300000, 0xc428000f,
-       0x9a80ffff, 0xc8240010, 0x7e5e800c, 0x9bc00015, 0x9a80000c, 0x9b000024, 0x28300001, 0x122c0004,
-       0x06ec0001, 0x0aec0001, 0x9ac0ffff, 0xd8400021, 0x80000d1f, 0xc428000f, 0x9a80ffff, 0xc8240010,
-       0x566c0020, 0xc428000e, 0x2aa80008, 0xce800013, 0xce413261, 0xcec13262, 0xd800002a, 0x80001b70,
-       0xc4340032, 0x2b740008, 0xcf400013, 0xc40d325b, 0x96800005, 0x566c0020, 0xce413261, 0xcec13262,
-       0x800012c2, 0xc438001d, 0x1bb81fe8, 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d,
-       0x80001b70, 0xc43c0007, 0xc438001d, 0xc428000e, 0x2aa80008, 0xce800013, 0x13f4000c, 0x9bc00006,
-       0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x80000d57, 0xc43d325a, 0x1bfc0677, 0x13fc0017,
-       0x04300100, 0x1bb81fe8, 0x7f73400a, 0xc0328009, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b,
-       0x80000c16, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0xc4253246, 0xc4113245, 0x04143000, 0xcd413267,
-       0x52640020, 0x7e51001a, 0xc4153267, 0x7d2d0011, 0x19640057, 0x19580213, 0x19600199, 0x7da6400a,
-       0x7e26400a, 0xd1000025, 0xce400024, 0xcdc00026, 0xd8400027, 0x04142000, 0xcfc00013, 0xcd413267,
-       0xc4153267, 0x99400001, 0x90000000, 0x7c40c001, 0x18d001e8, 0x18d40030, 0x18d80034, 0x05280d83,
-       0x7c420001, 0x7c424001, 0x86800000, 0x80000d8a, 0x8000016a, 0x80000d95, 0x80000db1, 0x8000016a,
-       0x80000d95, 0x80000dbc, 0x11540010, 0x7e010001, 0x8c00187c, 0x7d75400a, 0xcd400013, 0xd4610000,
-       0x9580f3d8, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0xd8000016, 0x526c0020, 0x18e80058,
-       0x7e2ec01a, 0xd2c00072, 0xc82c0072, 0x5ae0073a, 0x7ea2800a, 0x9940000a, 0xce800024, 0xd2c00025,
-       0xd4400026, 0xd8400027, 0x9580f3c6, 0xc4380012, 0x9b80ffff, 0x7c408001, 0x88000000, 0xdc3a0000,
-       0x0bb80001, 0xce800024, 0xd2c00025, 0xcc400026, 0xd8400027, 0x9b80fffb, 0x9980fff5, 0x7c408001,
-       0x88000000, 0xc02a0001, 0x2aa80001, 0x16200002, 0xce800013, 0xce01c405, 0xd441c406, 0x9580f3b1,
-       0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, 0x32640002, 0x9a40000b, 0x11540010,
-       0x29540002, 0xcd400013, 0xd4610000, 0x9580f3a5, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001,
-       0x88000000, 0xd4400078, 0x80000168, 0xd8400029, 0xc40c005e, 0x94c00da7, 0x7c40c001, 0x50500020,
-       0x7cd0c01a, 0xd0c00072, 0xc8280072, 0x5aac007e, 0x12d80017, 0x7c41c001, 0x7d9d800a, 0x56a00020,
-       0x2620ffff, 0x7da1800a, 0x51980020, 0x7e82400a, 0x7e58c01a, 0x19d4003d, 0x28182002, 0x99400030,
-       0x8c00104f, 0xc430000d, 0xc4340035, 0xd800002a, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005,
-       0xc011000f, 0xc4240004, 0x11a00002, 0x7c908009, 0x12640004, 0x7d614011, 0xc4100026, 0x05980008,
-       0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, 0x20880188, 0x54ec0020, 0x7cb4800a, 0xc4300027,
-       0x04380008, 0xd1400025, 0xcf000024, 0x20240090, 0x7ca48001, 0xcc800026, 0xccc00026, 0xcec00026,
-       0xcec00026, 0x28240004, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800005, 0x32280000, 0x9a800002,
-       0x9a000000, 0x7c018001, 0xd8400027, 0xd8000016, 0xcf80003a, 0xd901a2a4, 0x80001037, 0xc418000e,
-       0x29980008, 0xcd800013, 0xc421326c, 0x1624001f, 0x9a40fffe, 0xd841325f, 0xd8800033, 0xc43c0009,
-       0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xd8800034, 0xc429325f,
-       0x26ac0001, 0x9ac0fffe, 0x26ac0002, 0x96c00003, 0xd800002a, 0x80001b70, 0xc43c0007, 0xc430001e,
-       0xd8800033, 0x13f4000c, 0x1b301ff0, 0x2b300300, 0x2330003f, 0x7f37000a, 0x9680000b, 0xc43c0009,
-       0x27fc0004, 0x97c0fffe, 0xd8400039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xcf01325b, 0xd8800034,
-       0x80000c16, 0xd8800034, 0x8c0001a2, 0x80001b70, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
-       0x18ac0024, 0x2b304000, 0x7c40c001, 0xcec00008, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a,
-       0x29980008, 0xcd800013, 0xc4113249, 0x1910003e, 0x99000002, 0xd840003d, 0x7c410001, 0xd4400078,
-       0x51100020, 0xcf01326c, 0x7cd0c01a, 0xc421326c, 0x12a80014, 0x2220003f, 0x7e2a000a, 0xcd800013,
-       0xce01326c, 0xd8800033, 0xc43c0009, 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022,
-       0x9bc0ffff, 0xd8800034, 0x80001190, 0x7c40c001, 0x18dc003d, 0x95c00004, 0x041c0001, 0x042c01c8,
-       0x8c000d61, 0x18d40030, 0x18d001e8, 0x18fc0034, 0x24e8000f, 0x06a80e71, 0x7c418001, 0x7c41c001,
-       0x86800000, 0x80000edd, 0x80000e91, 0x80000e91, 0x80000ea1, 0x80000eaa, 0x80000e7c, 0x80000e7f,
-       0x80000e7f, 0x80000e87, 0x80000e8f, 0x8000016a, 0x51dc0020, 0x7d9e001a, 0x80000ee6, 0xc420000e,
-       0x2a200008, 0xce000013, 0xc4213262, 0xc4253261, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc420000e,
-       0x2a200008, 0xce000013, 0xc4213264, 0xc4253263, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc820001f,
-       0x80000ee6, 0x18e82005, 0x51e00020, 0x2aa80000, 0x7da1801a, 0xd1800072, 0xc8180072, 0x59a001fc,
-       0x12200009, 0x7ea2800a, 0xce80001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8200011,
-       0x80000ee6, 0x15980002, 0xd8400013, 0xcd81c400, 0xc421c401, 0x95400041, 0xc425c401, 0x52640020,
-       0x7e26001a, 0x80000ee6, 0x31ac2580, 0x9ac00011, 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d,
-       0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005,
-       0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, 0xc4340004, 0xd8400008, 0x80000ede, 0x39ac7c06,
-       0x3db07c00, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002,
-       0x80000ebc, 0x39acc335, 0x3db0c336, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9002, 0x3db09001,
-       0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9012, 0x3db09011, 0x9ac00003, 0x97000002, 0x80000ebc,
-       0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000ebc, 0xc4340004, 0xd8400013, 0xc5a10000,
-       0x95400005, 0x05980001, 0xc5a50000, 0x52640020, 0x7e26001a, 0xcf400008, 0x05280eea, 0x7c418001,
-       0x7c41c001, 0x86800000, 0x80000ef1, 0x8000016a, 0x80000efe, 0x80000f11, 0x80000f2e, 0x80000efe,
-       0x80000f1f, 0xc4340004, 0xd8400013, 0xce190000, 0x95400005, 0x05980001, 0x56200020, 0xce190000,
-       0xcf400008, 0x97c0f26f, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x51ec0020, 0x18e80058,
-       0x7daec01a, 0xd2c00072, 0xc82c0072, 0x5af8073a, 0x7eba800a, 0xd2c00025, 0xce800024, 0xce000026,
-       0x95400003, 0x56240020, 0xce400026, 0xd8400027, 0x97c0f25c, 0xc4380012, 0x9b80ffff, 0x7c408001,
-       0x88000000, 0xc02a0001, 0x2aa80001, 0x15980002, 0xce800013, 0xcd81c405, 0xce01c406, 0x95400003,
-       0x56240020, 0xce41c406, 0x97c0f24e, 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b,
-       0x32640002, 0x9a40f247, 0xd8800013, 0xce190000, 0x95400004, 0x05980001, 0x56200020, 0xce190000,
-       0x97c0f240, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x31ac2580, 0x9ac00011,
-       0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009,
-       0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004,
-       0xc4340004, 0xd8400008, 0x80000ef2, 0x39ac7c06, 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000f40,
-       0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, 0x80000f40, 0x39acc335, 0x3db0c336, 0x9ac00003,
-       0x97000002, 0x80000f40, 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9002,
-       0x3db09002, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9012, 0x3db09012, 0x9ac00003, 0x97000002,
-       0x80000f40, 0x80000ef1, 0xc40c0006, 0x98c0ffff, 0x7c40c001, 0x7c410001, 0x7c414001, 0x7c418001,
-       0x7c41c001, 0x7c43c001, 0x95c00001, 0xc434000e, 0x2b740008, 0x2b780001, 0xcf400013, 0xd8c1325e,
-       0xcf80001a, 0xd8400013, 0x7c034001, 0x7c038001, 0x18e0007d, 0x32240003, 0x9a400006, 0x32240000,
-       0x9a400004, 0xcd01c080, 0xcd41c081, 0x80000f88, 0x51640020, 0x7e52401a, 0xd2400072, 0xc8280072,
-       0xce81c080, 0x56ac0020, 0x26f0ffff, 0xcf01c081, 0x1af000fc, 0x1334000a, 0x24e02000, 0x7f63400a,
-       0x18e00074, 0x32240003, 0x9a400006, 0x32240000, 0x9a400004, 0xcd81c082, 0xcdc1c083, 0x80000f9d,
-       0x51e40020, 0x7e5a401a, 0xd2400072, 0xc8280072, 0xce81c082, 0x56ac0020, 0x26f0ffff, 0xcf01c083,
-       0x1af000fc, 0x13380016, 0x18e00039, 0x12200019, 0x7fa3800a, 0x7fb7800a, 0x18e0007d, 0x1220001d,
-       0x7fa3800a, 0x18e00074, 0x12200014, 0x7fa3800a, 0xcf81c078, 0xcfc1c084, 0x80000c16, 0x7c40c001,
-       0x18dc003d, 0x95c00004, 0x041c0000, 0x042c01c8, 0x8c000d61, 0x18d001e8, 0x31140005, 0x99400003,
-       0x31140006, 0x95400002, 0x8c00104f, 0x05280fb7, 0x28140002, 0xcd400013, 0x86800000, 0x80000fbe,
-       0x80000fbe, 0x80000fc2, 0x80000fbe, 0x80000fd1, 0x80000ff2, 0x80000ff2, 0x24cc003f, 0xccc1a2a4,
-       0x7c408001, 0x88000000, 0x7c414001, 0x18e80039, 0x52a8003b, 0x50580020, 0x24cc003f, 0x7d59401a,
-       0xd1400072, 0xc8140072, 0x7d69401a, 0xc41c0017, 0x99c0ffff, 0xd140004b, 0xccc1a2a4, 0x7c408001,
-       0x88000000, 0xc414000d, 0x04180001, 0x24cc003f, 0x7d958004, 0xcd800035, 0xccc1a2a4, 0xc43c000e,
-       0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x97c00002, 0xd8400074, 0xc4100019, 0x7d150005,
-       0x25100001, 0x9500000b, 0x97c0fffc, 0xc4180021, 0x159c0011, 0x259800ff, 0x31a00003, 0x31a40001,
-       0x7e25800a, 0x95c0fff5, 0x9580fff4, 0x80000fef, 0xc411326f, 0x1d100010, 0xcd01326f, 0x97c00002,
-       0xd8000074, 0x80001b70, 0x04380000, 0xc430000d, 0xc8140023, 0xc4180081, 0x13300005, 0xc011000f,
-       0xc4240004, 0x33b40003, 0x97400003, 0xc0340008, 0x80000ffe, 0xc4340035, 0x11a00002, 0x7c908009,
-       0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x282c2002,
-       0x208801a8, 0x3e280008, 0x7cb4800a, 0xcec00013, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024,
-       0x20240030, 0x7ca48001, 0xcc800026, 0xccc00026, 0x9b800013, 0xcc400026, 0x7c414001, 0x28340000,
-       0xcf400013, 0x507c0020, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
-       0xcd400026, 0xcfc00026, 0xd4400026, 0x9a80000e, 0x32280000, 0x9a80000b, 0x8000102f, 0xcc000026,
-       0xcc000026, 0xcc000026, 0xcc000026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
-       0x7c018001, 0xcc000026, 0xd8400027, 0x1cccfe08, 0xd8800013, 0xcec0003a, 0xccc1a2a4, 0xc43c000e,
-       0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x9bc00007, 0xc428000e, 0x16a80008, 0xce800009,
-       0xc42c005e, 0x96c00b33, 0xd840003c, 0xc4200025, 0x7da2400f, 0x7da28002, 0x7e1ac002, 0x0aec0001,
-       0x96400002, 0x7d2ac002, 0x3ef40010, 0x9b40f11d, 0x04380030, 0xcf81325e, 0x80000c16, 0xde410000,
-       0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xddc10000, 0xde010000, 0xc40c000e, 0x7c024001,
-       0x28cc0008, 0xccc00013, 0xc8100086, 0x5510003f, 0xc40d3249, 0x18cc003e, 0x98c00003, 0x99000011,
-       0x80001075, 0x9900000c, 0xc40c0026, 0xc4100081, 0xc4140025, 0x7d15800f, 0x7d15c002, 0x7d520002,
-       0x0a200001, 0x95800002, 0x7cde0002, 0x3e20001a, 0x9a000009, 0x040c0030, 0xccc1325e, 0x80001071,
-       0xd9c00036, 0xd8400029, 0xc40c005e, 0x94c00b01, 0x04240001, 0xdc200000, 0xdc1c0000, 0xdc180000,
-       0xdc140000, 0xdc100000, 0xdc0c0000, 0x96400004, 0xdc240000, 0xdc0c0000, 0x80000c16, 0xdc240000,
-       0x90000000, 0xcc40003f, 0xd8c00010, 0xc4080029, 0xcc80003b, 0xc418000e, 0x18a800e5, 0x1d980008,
-       0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0x18a400e5, 0x12500009, 0x248c0008, 0x94c00006,
-       0x200c006d, 0x7cd0c00a, 0xccc1326c, 0xc421326c, 0x96000001, 0xcd800013, 0x200c0228, 0x7cd0c00a,
-       0xccc1326c, 0xc421326c, 0x96000001, 0xc40c002a, 0xc410002b, 0x18881fe8, 0x18d4072c, 0x18cc00d1,
-       0x7cd4c00a, 0x3094000d, 0x38d80000, 0x311c0003, 0x99400006, 0x30940007, 0x1620001f, 0x9940001d,
-       0x9a000023, 0x800010c4, 0x9580001a, 0x99c00019, 0xccc00041, 0x25140001, 0xc418002c, 0x9940000d,
-       0x259c007f, 0x95c00013, 0x19a00030, 0xcdc0001b, 0xd8400021, 0xd8400022, 0xc430000f, 0x17300001,
-       0x9b00fffe, 0x9a000012, 0xd8400023, 0x800010cb, 0x199c0fe8, 0xcdc0001b, 0xd8400021, 0xd8400023,
-       0xc430000f, 0x17300001, 0x9b00fffe, 0x800010cb, 0xd8c00010, 0xd8000022, 0xd8000023, 0xc430005e,
-       0x97000aac, 0x7c408001, 0x88000000, 0xc43c000e, 0xc434002e, 0x2bfc0008, 0x2020002c, 0xcfc00013,
-       0xce01326c, 0x17780001, 0x27740001, 0x07a810d8, 0xcf400010, 0xc421326c, 0x96000001, 0x86800000,
-       0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0x8000104c, 0xcc400040, 0xd8800010, 0xc4180032,
-       0x29980008, 0xcd800013, 0x200c007d, 0xccc1325b, 0xc411325b, 0x95000001, 0x7c408001, 0x88000000,
-       0x28240007, 0xde430000, 0xd4400078, 0x80001190, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
-       0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0xc40d3249, 0x18cc003e,
-       0x98c00002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x7c40c001, 0x7c410001, 0x7c414001,
-       0x192400fd, 0x50580020, 0x7d59401a, 0x7c41c001, 0x06681110, 0x7c420001, 0xcc400078, 0x18ac0024,
-       0x19180070, 0x19100078, 0xcec00008, 0x18f40058, 0x5978073a, 0x7f7b400a, 0x97000001, 0x86800000,
-       0x80001117, 0x80001118, 0x80001122, 0x8000112d, 0x80001130, 0x80001133, 0x8000016a, 0x8000117b,
-       0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, 0xcf400024, 0xcdc00026,
-       0xd8400027, 0x8000117b, 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025,
-       0xcf400024, 0xcdc00026, 0xce000026, 0xd8400027, 0x8000117b, 0xc81c001f, 0x55e00020, 0x80001122,
-       0xc81c0020, 0x55e00020, 0x80001122, 0x8c00116b, 0xd8400013, 0xc02a0200, 0x7e8e8009, 0x22a8003d,
-       0x22a80074, 0x2774001c, 0x13740014, 0x7eb6800a, 0x25ecffff, 0x55700020, 0x15f40010, 0x13740002,
-       0x275c001f, 0x95c00027, 0x7c018001, 0x7f41c001, 0x15dc0002, 0x39e00008, 0x25dc0007, 0x7dc1c01e,
-       0x05dc0001, 0x96000004, 0x05e40008, 0x8c00116e, 0x80001168, 0x7dc2001e, 0x06200001, 0x05e40008,
-       0x7e62000e, 0x9a000004, 0x7da58001, 0x8c00116e, 0x80001165, 0x7dc2001e, 0x06200001, 0x7e1a0001,
-       0x05cc0008, 0x7e0d000e, 0x95000007, 0x7e02401e, 0x06640001, 0x06640008, 0x05d80008, 0x8c00116e,
-       0x80001168, 0x7dc2401e, 0x06640001, 0x7da58001, 0x8c00116e, 0x05e00008, 0x7da2000c, 0x9600ffe6,
-       0x17640002, 0x8c00116e, 0x80001190, 0xc4200006, 0x9a00ffff, 0x90000000, 0x8c00116b, 0xc420000e,
-       0x2a200001, 0xce00001a, 0xce81c078, 0xcec1c080, 0xcc01c081, 0xcd41c082, 0xcf01c083, 0x12640002,
-       0x22640435, 0xce41c084, 0x90000000, 0x0528117e, 0x312c0003, 0x86800000, 0x80001190, 0x80001185,
-       0x80001182, 0x80001182, 0xc4300012, 0x9b00ffff, 0x9ac0000c, 0xc03a0400, 0xc4340004, 0xd8400013,
-       0xd8400008, 0xc418000e, 0x15980008, 0x1198001c, 0x7d81c00a, 0xcdc130b7, 0xcf8130b5, 0xcf400008,
-       0x04240008, 0xc418000e, 0xc41c0049, 0x19a000e8, 0x29a80008, 0x7de2c00c, 0xce800013, 0xc421325e,
-       0x26200010, 0xc415326d, 0x9a000006, 0xc420007d, 0x96000004, 0x96c00003, 0xce40003e, 0x800011a3,
-       0x7d654001, 0xcd41326d, 0x7c020001, 0x96000005, 0xc4100026, 0xc4240081, 0xc4140025, 0x800011b6,
-       0xc4253279, 0xc415326d, 0xc431326c, 0x2730003f, 0x3b380006, 0x97800004, 0x3f38000b, 0x9b800004,
-       0x800011b4, 0x04300006, 0x800011b4, 0x0430000b, 0x04380002, 0x7fb10004, 0x7e57000f, 0x7e578002,
-       0x7d67c002, 0x0be40001, 0x97000002, 0x7d3a4002, 0x202c002c, 0xc421325e, 0x04280020, 0xcec1326c,
-       0x26200010, 0x3e640010, 0x96000003, 0x96400002, 0xce81325e, 0xc4300028, 0xc434002e, 0x17780001,
-       0x27740001, 0x07a811cf, 0x9b00feb8, 0xcf400010, 0xc414005e, 0x954009a7, 0x86800000, 0x80000168,
-       0x80000aa7, 0x80000bfc, 0x800012e9, 0x80000168, 0x8c00120d, 0x7c40c001, 0xccc1c07c, 0xcc41c07d,
-       0xcc41c08c, 0x7c410001, 0xcc41c079, 0xcd01c07e, 0x7c414001, 0x18f0012f, 0x18f40612, 0x18cc00c1,
-       0x7f73400a, 0x7cf7400a, 0x39600004, 0x9a000002, 0xc0140004, 0x11600001, 0x18fc003e, 0x9740001c,
-       0xcf400041, 0xc425c07f, 0x97c00003, 0x166c001f, 0x800011ee, 0x1a6c003e, 0x96c00006, 0x04200002,
-       0x0a200001, 0x9a00ffff, 0xd8400013, 0x800011e8, 0xc428002c, 0x96800010, 0x26ac007f, 0xcec0001b,
-       0xd8400021, 0x1ab00030, 0x1aac0fe8, 0xc434000f, 0x9b40ffff, 0x97000008, 0xcec0001b, 0xd8400021,
-       0xc434000f, 0x9b40ffff, 0x80001205, 0x0a200001, 0x9a00ffff, 0xd8400013, 0xc425c07f, 0x166c001f,
-       0x11600001, 0x9ac0fffa, 0x8c001232, 0x7c408001, 0x88000000, 0xd8000033, 0xc438000b, 0xc43c0009,
-       0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, 0x7ffbc00c, 0x97c0fffd,
-       0x90000000, 0xc03a2800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380040,
-       0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f,
-       0x9b80ffff, 0x04380002, 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010,
-       0x9bc0fffa, 0x90000000, 0xd8400013, 0xd801c07f, 0xd8400013, 0xc43dc07f, 0xcfc00078, 0xd8000034,
-       0x90000000, 0xc03ae000, 0xcf81c200, 0xc03a0800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079,
-       0xcc01c07e, 0x04380040, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380002, 0x0bb80001,
-       0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, 0xc03ae000,
-       0xcf81c200, 0xc03a4000, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380002,
-       0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000,
-       0xc40c0007, 0x30d00002, 0x99000052, 0xd8400029, 0xc424005e, 0x9640090f, 0x7c410001, 0xc428000e,
-       0x1514001f, 0x19180038, 0x2aa80008, 0x99400030, 0x30dc0001, 0xce800013, 0x99c0000a, 0xc42d324e,
-       0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x1ab0c006, 0x52ec0008, 0x8000127f,
-       0xc42d3258, 0xc4313257, 0x52ec0020, 0x7ef2c01a, 0xc4353259, 0xc429325a, 0x1ab0c012, 0x07740001,
-       0x04240002, 0x26a0003f, 0x7e624004, 0x7f67800f, 0x97800002, 0x04340000, 0x53740002, 0x7ef6c011,
-       0x1ab42010, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f73400a, 0x7f6b400a, 0xcf40001c, 0xd2c0001e,
-       0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x1514001f, 0x99400006, 0x9980000a, 0x8c0012e1,
-       0xc40c0007, 0x04100000, 0x80001267, 0xd800002a, 0xc424005e, 0x964008d7, 0xd9800036, 0x80000c16,
-       0xc42c001d, 0x95c00005, 0xc431325a, 0x1b300677, 0x11dc000c, 0x800012aa, 0xc4313256, 0x1b34060b,
-       0x1b300077, 0x7f37000a, 0x13300017, 0x04340100, 0x26ec00ff, 0xc03a8002, 0x7ef6c00a, 0x7edec00a,
-       0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, 0xc4140032, 0xc410001d, 0x29540008, 0xcd400013,
-       0xc40d325b, 0x1858003f, 0x251000ff, 0x99800007, 0x7d0cc00a, 0xccc1325b, 0xc411325d, 0x251001ef,
-       0xcd01325d, 0x80000168, 0x18d0006c, 0x18d407f0, 0x9900000e, 0x04100002, 0xc4193256, 0xc41d324f,
-       0x2598003f, 0x7d190004, 0x7d5d4001, 0x7d52000f, 0x9a000003, 0xcd41324f, 0x800012d8, 0x7d514002,
-       0xcd41324f, 0x800012d8, 0xc4193259, 0xc41d325a, 0x7d958001, 0x7dd5c002, 0xcd813259, 0xcdc1325a,
-       0xc411325d, 0x251001ef, 0xcd01325d, 0x1ccc001e, 0xccc1325b, 0xc40d325b, 0x94c00001, 0x7c408001,
-       0x88000000, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0x9b000004, 0x9b40000c,
-       0x9b80000f, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, 0x8c000190, 0xd8000032, 0x90000000,
-       0xd8000028, 0xd800002b, 0x80000168, 0xd980003f, 0x97c00002, 0xd9c0003f, 0x80001082, 0xd9800040,
-       0x97c00002, 0xd9c00040, 0x800010de, 0xc43c0007, 0x33f80003, 0x97800051, 0xcc80003b, 0x24b00008,
-       0xc418000e, 0x1330000a, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013,
-       0xc4353249, 0x1b74003e, 0x9b400002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x97000001,
-       0x7c434001, 0x1b4c00f8, 0x7c410001, 0x7c414001, 0x50700020, 0x04e81324, 0x18ac0024, 0x7c41c001,
-       0x50600020, 0xcc400078, 0x30e40004, 0x9a400007, 0x7d71401a, 0x596401fc, 0x12640009, 0x1b74008d,
-       0x7e76400a, 0x2a640000, 0xcec00008, 0x86800000, 0x8000016a, 0x8000016a, 0x8000016a, 0x8000016a,
-       0x8000132c, 0x8000133b, 0x80001344, 0x8000016a, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42530b5,
-       0x1a68003a, 0x9a80fffe, 0x2024003a, 0xc418000e, 0x25980700, 0x11980014, 0x7d19000a, 0xcd0130b7,
-       0xce4130b5, 0xcf400008, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, 0x9a80ffff,
-       0xc4240011, 0x7de6800f, 0x9a80ffea, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f,
-       0x9a80ffff, 0xc8240011, 0x7de1c01a, 0x7de6800f, 0x9a80ffe0, 0x80001190, 0x8c00104f, 0x28182002,
-       0xc430000d, 0xc4340035, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, 0xc4240004, 0x11a00002,
-       0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008,
-       0x7cb4800a, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, 0x20240030, 0x7ca48001, 0xcc800026,
-       0x7c434001, 0x1b4c00f8, 0xcf400026, 0xcc400026, 0x28340000, 0xcf400013, 0x7c414001, 0x507c0020,
-       0x30e40004, 0x9a400005, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
-       0xcd400026, 0xcfc00026, 0xd4400026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
-       0x7c018001, 0xd8400027, 0xd8800013, 0x04380028, 0xcec0003a, 0xcf81a2a4, 0x80001037, 0xd8400029,
-       0xc40c005e, 0x94c007eb, 0x7c40c001, 0x50500020, 0x7d0d001a, 0xd1000072, 0xc8100072, 0x591c01fc,
-       0x11dc0009, 0x45140210, 0x595801fc, 0x11980009, 0x29dc0000, 0xcdc0001c, 0xd140001e, 0xd8400021,
-       0xc418000f, 0x9980ffff, 0xc4200011, 0x1624001f, 0x96400069, 0xc40c000e, 0x28cc0008, 0xccc00013,
-       0xce013249, 0x1a307fe8, 0xcf00000a, 0x23304076, 0xd1000001, 0xcf000001, 0xc41d3254, 0xc4253256,
-       0x18cc00e8, 0x10cc0015, 0x4514020c, 0xd140001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011,
-       0xce013248, 0x1a2001e8, 0x12200014, 0x2a204001, 0xce000013, 0x1a64003c, 0x1264001f, 0x11dc0009,
-       0x15dc000b, 0x7dcdc00a, 0x7e5dc00a, 0xcdc00100, 0xd8800013, 0xd8400010, 0xd800002a, 0xd8400008,
-       0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, 0x04300010, 0xdf430000, 0x7c434001,
-       0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, 0xdf030000, 0xd4412e40, 0xd8400013,
-       0xcc41c030, 0xcc41c031, 0x248dfffe, 0xccc12e00, 0xd8800013, 0xcc812e00, 0x7c434001, 0x7c434001,
-       0x8c00142b, 0xd8000010, 0xc40c000e, 0x28cc0008, 0xccc00013, 0x45140248, 0xd140001e, 0xd8400021,
-       0xc418000f, 0x9980ffff, 0xc8200011, 0xce013257, 0x56200020, 0xce013258, 0x0434000c, 0xdb000024,
-       0xd1400025, 0xd8000026, 0xd8000026, 0xd8400027, 0x45540008, 0xd140001e, 0xd8400021, 0xc418000f,
-       0x9980ffff, 0xc8200011, 0xce013259, 0x56200020, 0xc0337fff, 0x7f220009, 0xce01325a, 0x55300020,
-       0x7d01c001, 0x042c01d0, 0x8c000d61, 0x06ec0004, 0x7f01c001, 0x8c000d61, 0x041c0002, 0x042c01c8,
-       0x8c000d61, 0xc4380012, 0x9b80ffff, 0xd800002a, 0x80000aa7, 0xd800002a, 0x7c408001, 0x88000000,
-       0xd8400029, 0x7c40c001, 0x50500020, 0x8c001427, 0x7cd0c01a, 0xc4200007, 0xd0c00072, 0xc8240072,
-       0xd240001e, 0x7c414001, 0x19682011, 0x5a6c01fc, 0x12ec0009, 0x7eeac00a, 0x2aec0000, 0xcec0001c,
-       0xd8400021, 0xc430000f, 0x9b00ffff, 0xc4180011, 0x7c438001, 0x99800007, 0xdf830000, 0xcfa0000c,
-       0x8c00142b, 0xd4400078, 0xd800002a, 0x80001b70, 0x8c00142b, 0xd800002a, 0x80001b70, 0xd8000012,
-       0xc43c0008, 0x9bc0ffff, 0x90000000, 0xd8400012, 0xc43c0008, 0x97c0ffff, 0x90000000, 0xc4380007,
-       0x7c40c001, 0x17b80001, 0x18d40038, 0x7c410001, 0x9b800004, 0xd8400029, 0xc414005e, 0x9540073d,
-       0x18c80066, 0x7c414001, 0x30880001, 0x7c418001, 0x94800008, 0x8c00187c, 0xcf400013, 0xc42c0004,
-       0xd8400008, 0xcd910000, 0xcec00008, 0x7d410001, 0x043c0000, 0x7c41c001, 0x7c420001, 0x04240001,
-       0x06200001, 0x4220000c, 0x0a640001, 0xcc000078, 0x9a40fffe, 0x24e80007, 0x24ec0010, 0xd8400013,
-       0x9ac00006, 0xc42c0004, 0xd8400008, 0xc5310000, 0xcec00008, 0x80001465, 0x51540020, 0x7d15001a,
-       0xd1000072, 0xc82c0072, 0xd2c0001e, 0x18f02011, 0x5aec01fc, 0x12ec0009, 0x7ef2c00a, 0x2aec0000,
-       0xcec0001c, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc4300011, 0x96800012, 0x12a80001, 0x0aa80001,
-       0x06a8146a, 0x7f1f0009, 0x86800000, 0x7f1b400f, 0x80001478, 0x7f1b400e, 0x80001478, 0x7f1b400c,
-       0x8000147a, 0x7f1b400d, 0x8000147a, 0x7f1b400f, 0x8000147a, 0x7f1b400e, 0x8000147a, 0x7f334002,
-       0x97400014, 0x8000147b, 0x9b400012, 0x9b800005, 0x9bc0001f, 0x7e024001, 0x043c0001, 0x8000144a,
-       0xc40c0032, 0xc438001d, 0x28cc0008, 0xccc00013, 0xc43d325b, 0x1bb81ff0, 0x7fbfc00a, 0xcfc1325b,
-       0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0x94800007, 0x8c00187c, 0xcf400013, 0xc42c0004,
-       0xd8400008, 0xcd910000, 0xcec00008, 0x9b800003, 0xd800002a, 0x80001b70, 0xc40c0032, 0x28cc0008,
-       0xccc00013, 0xc40d325b, 0x800012c2, 0xc40c000e, 0xc43c0007, 0xc438001d, 0x28cc0008, 0xccc00013,
-       0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x800014a9, 0xc43d325a,
-       0x1bfc0677, 0x04300100, 0x1bb81ff0, 0x7f73400a, 0xc0328007, 0x7fb7800a, 0x13fc0017, 0x7ff3c00a,
-       0x7ffbc00a, 0xcfc1325b, 0xc03a0002, 0xc4340004, 0xd8400013, 0xd8400008, 0xcf8130b5, 0xcf400008,
-       0x80000c16, 0x043c0000, 0xc414000e, 0x29540008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020,
-       0x7dd9c01a, 0x45dc0390, 0xc4313267, 0x04183000, 0xcd813267, 0x1b380057, 0x1b340213, 0x1b300199,
-       0x7f7b400a, 0x7f73400a, 0xcf400024, 0xd1c00025, 0xcc800026, 0x7c420001, 0xce000026, 0x7c424001,
-       0xce400026, 0x7c428001, 0xce800026, 0x7c42c001, 0xcec00026, 0x7c430001, 0xcf000026, 0x7c434001,
-       0xcf400026, 0x7c438001, 0xcf800026, 0xd8400027, 0xcd400013, 0x04182000, 0xcd813267, 0xd840004f,
-       0x1a0800fd, 0x109c000a, 0xc4193265, 0x7dd9c00a, 0xcdc13265, 0x2620ffff, 0xce080228, 0x9880000e,
-       0xce480250, 0xce880258, 0xd8080230, 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270,
-       0xd8080278, 0xd8080280, 0xd800004f, 0x97c0ec75, 0x90000000, 0x040c0000, 0x041c0010, 0x26180001,
-       0x09dc0001, 0x16200001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80230, 0xd8080238, 0xd8080240,
-       0xd8080248, 0x040c0000, 0xce480250, 0xce880258, 0x52a80020, 0x7e6a401a, 0x041c0020, 0x66580001,
-       0x09dc0001, 0x56640001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80260, 0xd8080268, 0xd8080270,
-       0xd8080278, 0xd8080280, 0x040c0000, 0xcec80288, 0xcf080290, 0xcec80298, 0xcf0802a0, 0x040c0000,
-       0x041c0010, 0xcf4802a8, 0x27580001, 0x09dc0001, 0x17740001, 0x95800002, 0x04cc0001, 0x99c0fffb,
-       0xccc802b0, 0xd80802b8, 0x178c000b, 0x27b8003f, 0x7cf8c001, 0xcf8802c0, 0xccc802c8, 0xcf8802d0,
-       0xcf8802d8, 0xd800004f, 0x97c00002, 0x90000000, 0x7c408001, 0x88000000, 0xc40c000e, 0x28cc0008,
-       0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c418001, 0x25b8ffff, 0xc4930240, 0xc48f0238, 0x04cc0001,
-       0x24cc000f, 0x7cd2800c, 0x9a80000b, 0xc5230309, 0x2620ffff, 0x7e3a400c, 0x9a400004, 0x05100001,
-       0x2510000f, 0x80001539, 0xcd08034b, 0xd4400078, 0x80000168, 0xc48f0230, 0xc4930240, 0x98c00004,
-       0xcd880353, 0x8c00163f, 0xc49b0353, 0xc4930238, 0xc48f0228, 0x05100001, 0x2510000f, 0x7cd14005,
-       0x25540001, 0x99400004, 0x05100001, 0x2510000f, 0x8000154f, 0xc48f0230, 0x7c41c001, 0xcd080238,
-       0xcd08034b, 0x08cc0001, 0x2598ffff, 0x3d200008, 0xccc80230, 0xcd900309, 0xd8100319, 0x04340801,
-       0x2198003f, 0xcf400013, 0xcd910ce7, 0xc4190ce6, 0x7d918005, 0x25980001, 0x9580fffd, 0x7d918004,
-       0xcd810ce6, 0x9a000003, 0xcdd1054f, 0x8000156e, 0x090c0008, 0xcdcd050e, 0x040c0000, 0x110c0014,
-       0x28cc4001, 0xccc00013, 0xcc41230a, 0xcc41230b, 0xcc41230c, 0xcc41230d, 0xcc480329, 0xcc48032a,
-       0xcc4802e0, 0xd8000055, 0xc48f02e0, 0x24d8003f, 0x09940001, 0x44100001, 0x9580002c, 0x95400005,
-       0x09540001, 0x51100001, 0x69100001, 0x8000157f, 0x24cc003f, 0xc4970290, 0xc49b0288, 0x51540020,
-       0x7d59401a, 0xc49b02a0, 0xc49f0298, 0x51980020, 0x7d9d801a, 0x041c0040, 0x04200000, 0x7dcdc002,
-       0x7d924019, 0x7d26400c, 0x09dc0001, 0x9a400008, 0x51100001, 0x06200001, 0x99c0fffa, 0xc48f0230,
-       0xc4930240, 0x8c00163f, 0x80001579, 0x7d010021, 0x7d914019, 0xc4930238, 0x55580020, 0xcd480298,
-       0xcd8802a0, 0x10d40010, 0x12180016, 0xc51f0309, 0x7d95800a, 0x7d62000a, 0x7dd9c00a, 0xd8400013,
-       0xcdd00309, 0xce113320, 0xc48f02e0, 0xc49b02b0, 0x18dc01e8, 0x7dd9400e, 0xc48f0230, 0xc4930240,
-       0x95c0001d, 0x95400003, 0x8c00163f, 0x800015aa, 0xc48f0238, 0xc4a302b8, 0x12240004, 0x7e5e400a,
-       0xc4ab02a8, 0x04100000, 0xce4c0319, 0x7d9d8002, 0x7ea14005, 0x25540001, 0x99400004, 0x06200001,
-       0x2620000f, 0x800015bc, 0x09dc0001, 0x04240001, 0x7e624004, 0x06200001, 0x7d25000a, 0x2620000f,
-       0x99c0fff4, 0xd8400013, 0xcd0d3330, 0xce0802b8, 0xcd8802b0, 0xc4ab02e0, 0x1aa807f0, 0xc48f02d0,
-       0xc49702d8, 0xc49b02c8, 0xc49f02c0, 0x96800028, 0x7d4e000f, 0x9600000b, 0x7d964002, 0x7e6a000f,
-       0x96000003, 0x7d694001, 0x800015e9, 0x7cde4002, 0x7e6a000f, 0x96000008, 0x7de94001, 0x800015e9,
-       0x7cd64002, 0x7e6a000e, 0x96000003, 0x7d694001, 0x800015e9, 0xc48f0230, 0xc4930240, 0x8c00163f,
-       0x800015cd, 0xc4930238, 0x7d698002, 0xcd4802d8, 0x129c0008, 0xc50f0319, 0x11a0000e, 0x11140001,
-       0xc4340004, 0xd8400008, 0xd8400013, 0x7e1e000a, 0x1198000a, 0xcd953300, 0x7e0e000a, 0x12a8000a,
-       0xce953301, 0xce100319, 0xcf400008, 0xc4b70280, 0xc4b30278, 0x7f73800a, 0x536c0020, 0x7ef2c01a,
-       0x9780eb68, 0x8c001608, 0xd8080278, 0xd8080280, 0x7c408001, 0x88000000, 0x043c0003, 0x80001609,
-       0x043c0001, 0x30b40000, 0x9b400011, 0xc4b70258, 0xc4b30250, 0x53780020, 0x7fb3801a, 0x7faf8019,
-       0x04300020, 0x04280000, 0x67b40001, 0x0b300001, 0x57b80001, 0x97400002, 0x06a80001, 0x9b00fffb,
-       0xc4bb0260, 0x7fab8001, 0xcf880260, 0x04300020, 0x04280000, 0x66f40001, 0x0b300001, 0x56ec0001,
-       0x97400005, 0x8c001628, 0xc4353247, 0x7f7f4009, 0x9b40fffe, 0x06a80001, 0x9b00fff7, 0x90000000,
-       0x269c0007, 0x11dc0008, 0x29dc0008, 0x26a00018, 0x12200003, 0x7de1c00a, 0x26a00060, 0x06200020,
-       0x16200001, 0x7de1c00a, 0xcdc00013, 0x90000000, 0x269c0018, 0x26a00007, 0x26a40060, 0x11dc0006,
-       0x12200006, 0x16640001, 0x29dc0008, 0x7de1c00a, 0x7de5c00a, 0xcdc00013, 0x90000000, 0xc4b70228,
-       0x05100001, 0x04cc0001, 0x2510000f, 0xccc80230, 0x7f514005, 0x25540001, 0x99400004, 0x05100001,
-       0x2510000f, 0x80001644, 0xc4b30248, 0xcd080240, 0x7f130005, 0x27300001, 0x9b000002, 0x8c001688,
-       0x8c00120d, 0x8c001219, 0x8c001232, 0x04300001, 0x04340801, 0x7f130004, 0xcf400013, 0xcf01051e,
-       0xc42d051f, 0x7ed2c005, 0x26ec0001, 0x96c0fffd, 0xcf01051f, 0xd8000055, 0xc5170309, 0x195c07f0,
-       0x196007f6, 0x04340000, 0x95c00008, 0x09dc0001, 0x04340001, 0x95c00005, 0x09dc0001, 0x53740001,
-       0x6b740001, 0x80001665, 0xc4a702a0, 0xc4ab0298, 0x52640020, 0x7e6a401a, 0x7f634014, 0x7e76401a,
-       0xc4300004, 0xd8400008, 0xd8400013, 0x56680020, 0xd8113320, 0xce480298, 0xce8802a0, 0xc5170319,
-       0xc4b702b0, 0x255c000f, 0x7f5f4001, 0xd8113330, 0xcf4802b0, 0x11340001, 0x195c07e8, 0x196007ee,
-       0xd8353300, 0x7e1e4001, 0xd8353301, 0xce4802d0, 0xd8100309, 0xd8100319, 0xcf000008, 0x90000000,
-       0xc4970258, 0xc48f0250, 0x51540020, 0x7cd4c01a, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a,
-       0x04140020, 0x04280000, 0x64d80001, 0x09540001, 0x54cc0001, 0x95800060, 0x8c001628, 0xc4193247,
-       0x25980001, 0x9580005c, 0x7dc24001, 0xc41d3248, 0x25dc000f, 0x7dd2000c, 0x96000057, 0xc41d3255,
-       0xc435324f, 0x7df5c00c, 0x99c00004, 0xc4193265, 0x25980040, 0x9580fffe, 0xc439325b, 0x1bb0003f,
-       0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, 0x1bb000e4,
-       0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x800016f1, 0xce400013, 0xc033ffff,
-       0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, 0xd8c00033,
-       0xc4300009, 0x27300008, 0x9700fffe, 0x1a7003e6, 0x27380003, 0x13b80004, 0x27300003, 0x13300003,
-       0x7fb38001, 0x1a7000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, 0x1a700064,
-       0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
-       0x0b300003, 0x800016df, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
-       0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xce400013, 0xc431325d,
-       0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xd841325d, 0x2030007b, 0xcf01325b,
-       0x800016f2, 0xd841325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9c, 0x8c001608,
-       0xd8080278, 0xd8080280, 0x90000000, 0xd840004f, 0xc414000e, 0x29540008, 0xcd400013, 0xc43d3265,
-       0x1bc800ea, 0xd80802e9, 0x7c40c001, 0x18fc0064, 0x9bc00042, 0xc4193246, 0xc41d3245, 0x51980020,
-       0x7dd9801a, 0x45980400, 0xc4313267, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x9bc00001, 0x1b380057,
-       0x1b340213, 0x1b300199, 0x7f7b400a, 0x7f73400a, 0xcf400024, 0x14f4001d, 0xc4bf02e9, 0x9bc0001c,
-       0x7c410001, 0x192807fa, 0xc4bf0258, 0xc4a70250, 0x53fc0020, 0x7e7e401a, 0x042c0000, 0x04300000,
-       0x667c0001, 0x56640001, 0x06ec0001, 0x97c0fffd, 0x07300001, 0x0aec0001, 0x7eebc00c, 0x06ec0001,
-       0x97c0fff8, 0x0b300001, 0x43300007, 0x53300002, 0x7db30011, 0xd3000025, 0xc03ec005, 0x2bfca200,
-       0xcfc00026, 0xccc00026, 0xcd000026, 0x192807fa, 0xc01f007f, 0x7d1d0009, 0x2110007d, 0x8c001628,
-       0x203c003f, 0xcfc13256, 0x8c0017f5, 0xcd013254, 0x18fc01e8, 0xcfc13248, 0x8c00185b, 0xd8413247,
-       0x0b740001, 0x9b40ffd5, 0xd800004f, 0xc4bf02e9, 0x97c0ea24, 0x90000000, 0x14d4001d, 0xc4930260,
-       0x7d52400e, 0xc49f0258, 0xc4a30250, 0x51dc0020, 0x7de1801a, 0x96400017, 0x7d534002, 0xc4af0270,
-       0x7dae4005, 0x26640001, 0x32e0001f, 0x9a400006, 0x06ec0001, 0x96000002, 0x042c0000, 0xcec80270,
-       0x8000174f, 0x0b740001, 0x8c00178a, 0x05100001, 0x9b40fff3, 0xc4af0280, 0xc4b30278, 0x52ec0020,
-       0x7ef2c01a, 0x8c001608, 0xd8080278, 0xd8080280, 0xc4ab0268, 0x7daa4005, 0x26640001, 0x32a0001f,
-       0x9a400005, 0x06a80001, 0x96000002, 0x24280000, 0x80001765, 0x7c410001, 0xc01f007f, 0x09540001,
-       0x7d1d0009, 0x2110007d, 0x8c001628, 0xd8013256, 0x8c0017f2, 0xcd013254, 0xc4113248, 0x15100004,
-       0x11100004, 0xc4b3034b, 0x7f13000a, 0xcf013248, 0xc4930260, 0x8c001855, 0x32a4001f, 0xd8413247,
-       0xd800004f, 0x09100001, 0x06a80001, 0x96400002, 0x24280000, 0xcd080260, 0xce880268, 0x9940ffc0,
-       0x7c408001, 0x88000000, 0x7ec28001, 0x8c001628, 0x32e0001f, 0xc4253247, 0x26640001, 0x9640005e,
-       0xc4293265, 0xc4253255, 0xc431324f, 0x7e72400c, 0x26a80040, 0x9a400002, 0x9680fff7, 0xc429325b,
-       0x1aa4003f, 0x96400049, 0x1aa400e8, 0x32680003, 0x9a800046, 0x32640002, 0x9640000a, 0xc4293260,
-       0x1aa400e4, 0x32640004, 0x96400040, 0xc425325d, 0x26640010, 0x9a40fffe, 0x800017e2, 0xcdc00013,
-       0xc027ffff, 0x2e6400ff, 0xc429325b, 0x7e6a4009, 0xce41325b, 0xc429325b, 0x26a800ff, 0x9a80fffe,
-       0xd8c00033, 0xc4240009, 0x26640008, 0x9640fffe, 0x19e403e6, 0x26680003, 0x12a80004, 0x26640003,
-       0x12640003, 0x7ea68001, 0x19e400e8, 0x7ea68001, 0x12640001, 0x7ea68001, 0x06a80002, 0xd8400013,
-       0x19e40064, 0x32640002, 0x96400009, 0x16a40005, 0x06640003, 0xce412082, 0xcc01203f, 0xd8400013,
-       0xcc01203f, 0x0a640003, 0x800017d0, 0x16a40005, 0xce412082, 0xcc01203f, 0xd8400013, 0xcc01203f,
-       0x12640005, 0x7ea64002, 0xc4292083, 0x7ea68005, 0x26a80001, 0x9a80ffdf, 0xd8c00034, 0xcdc00013,
-       0xc425325d, 0x26640010, 0x9a40fffe, 0xc429325b, 0x26a400ff, 0x9a40ffca, 0xd841325d, 0x2024007b,
-       0xce41325b, 0x800017e3, 0xd841325d, 0xc4a70280, 0xc4ab0278, 0x52640020, 0x7e6a401a, 0x04280001,
-       0x7eae8014, 0x7e6a401a, 0x56680020, 0xce480278, 0xce880280, 0x06ec0001, 0x96000002, 0x042c0000,
-       0xcec80270, 0x90000000, 0x7c438001, 0x7c420001, 0x800017fe, 0xc4bf02e9, 0x9bc00006, 0x7c438001,
-       0x7c420001, 0xcf800026, 0xce000026, 0x800017fe, 0xc43b02eb, 0xc42302ec, 0xcf813245, 0xce013246,
-       0x52200020, 0x7fa3801a, 0x47b8020c, 0x15e00008, 0x1220000a, 0x2a206032, 0x513c001e, 0x7e3e001a,
-       0xc4bf02e9, 0x9bc00005, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x8000180f, 0xcd400013, 0xc4313267,
-       0x1b3c0077, 0x1b300199, 0x7ff3000a, 0x1330000a, 0x2b300032, 0x043c3000, 0xcfc13267, 0xc43d3267,
-       0xd200000b, 0xc4200007, 0xd3800002, 0xcf000002, 0xd8000040, 0x96000002, 0xd8400040, 0xd8400018,
-       0x043c2000, 0xcfc13267, 0xd8000018, 0xd8800010, 0xcdc00013, 0x7dc30001, 0xdc1e0000, 0x04380032,
-       0xcf80000e, 0x8c001427, 0xcc413248, 0xc43d3269, 0x27fc000f, 0x33fc0003, 0x97c00011, 0x043c001f,
-       0xdfc30000, 0xd4413249, 0x7c43c001, 0x7c43c001, 0x043c0024, 0x0bfc0021, 0xdfc30000, 0xd441326a,
-       0x173c0008, 0x1b300303, 0x7f3f0001, 0x043c0001, 0x7ff3c004, 0xcfc13084, 0x80001842, 0x043c0024,
-       0xdfc30000, 0xd4413249, 0x7c43c001, 0x23fc003f, 0xcfc1326d, 0x0bb80026, 0xdf830000, 0xd441326e,
-       0x7c438001, 0x7c438001, 0xc4393265, 0x1fb8ffc6, 0xddc30000, 0xcf813265, 0x9a000003, 0xcdc0000c,
-       0x80001852, 0xcdc0000d, 0xce000010, 0x8c00142b, 0x90000000, 0x7c41c001, 0x7c420001, 0xcdc13252,
-       0xce013253, 0x8c001628, 0x80001878, 0xc49f02e9, 0x99c00018, 0x7c41c001, 0x7c420001, 0xcdc13252,
-       0xce013253, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x97c0ffff,
-       0xcdc00026, 0xce000026, 0xd8400027, 0xc41c0012, 0x99c0ffff, 0xc43c000e, 0x2bfc0008, 0xcfc00013,
-       0x043c2000, 0xcfc13267, 0x8c001628, 0x80001878, 0xc41f02ed, 0xc42302ee, 0xcdc13252, 0xce013253,
-       0x04200001, 0x7e2a0004, 0xce013084, 0x90000000, 0x28340001, 0x313c0bcc, 0x9bc00010, 0x393c051f,
-       0x9bc00004, 0x3d3c050e, 0x9bc0000c, 0x97c0000c, 0x393c0560, 0x9bc00004, 0x3d3c054f, 0x9bc00007,
-       0x97c00007, 0x393c1538, 0x9bc00005, 0x3d3c1537, 0x9bc00002, 0x97c00002, 0x2b740800, 0x90000000,
-       0xc40c000e, 0x28cc0008, 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e8007c, 0x7c42c001,
-       0x06a8189a, 0x86800000, 0x8000189e, 0x800018c5, 0x800018f2, 0x8000016a, 0x7c414001, 0x18d0007e,
-       0x50580020, 0x09200001, 0x7d59401a, 0xd1400072, 0xc8140072, 0x09240002, 0x7c418001, 0x7c41c001,
-       0x99000011, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42130b5, 0x1a24002c, 0x9a40fffe, 0x2020002c,
-       0xc418000d, 0x1198001c, 0x10cc0004, 0x14cc0004, 0x7cd8c00a, 0xccc130b7, 0xce0130b5, 0xcf400008,
-       0x80000168, 0xd1400025, 0x5978073a, 0x2bb80002, 0xcf800024, 0xcd800026, 0xcdc00026, 0xd8400027,
-       0x9600e8a8, 0xc4300012, 0x9b00ffff, 0x9640e8a5, 0x800018a9, 0x04140000, 0xc55b0309, 0x3d5c0010,
-       0x05540001, 0x2598ffff, 0x09780001, 0x7dad800c, 0x99c0ffd2, 0x9580fff9, 0xc4970258, 0xc4930250,
-       0x51540020, 0x7d15001a, 0x04140020, 0x04280000, 0x442c0000, 0x65180001, 0x09540001, 0x55100001,
-       0x9580000b, 0x8c001628, 0xc41d3248, 0x04300001, 0x7f2b0014, 0x25dc000f, 0x7df9c00c, 0x95c00004,
-       0x7ef2c01a, 0xd8c13260, 0xd901325d, 0x06a80001, 0x9940fff1, 0x04140020, 0x04280000, 0x66d80001,
-       0x09540001, 0x56ec0001, 0x95800005, 0x8c001628, 0xc421325d, 0x26240007, 0x9a40fffe, 0x06a80001,
-       0x9940fff7, 0x8000189e, 0x04140020, 0x04280000, 0x09540001, 0x8c001628, 0xc41d3254, 0xc023007f,
-       0x19e4003e, 0x7de1c009, 0x7dee000c, 0x96400008, 0x96000007, 0xd8c13260, 0xd901325d, 0xc421325d,
-       0x261c0007, 0x99c0fffe, 0x8000189e, 0x06a80001, 0x9940fff0, 0x8000189e, 0xc40c000e, 0x28cc0008,
-       0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e00064, 0x06281911, 0x14f4001d, 0x24cc0003,
-       0x86800000, 0x80001915, 0x800019af, 0x80001a2b, 0x8000016a, 0xcc48032b, 0xcc480333, 0xcc48033b,
-       0xcc480343, 0x98800011, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267,
-       0x04203000, 0xce013267, 0xc4213267, 0x9a000001, 0x1b3c0057, 0x1b200213, 0x1b300199, 0x7e3e000a,
-       0x7e32000a, 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278,
-       0x52ec0020, 0x7ef2c01a, 0x04180000, 0x04140020, 0x04280000, 0x7f438001, 0x8c001628, 0xc41d3247,
-       0x25dc0001, 0x95c00068, 0xc4213254, 0x1a1c003e, 0x95c00065, 0xc01f007f, 0x7e1e0009, 0x97800062,
-       0x0bb80001, 0x43bc0008, 0x7fcbc001, 0xc7df032b, 0x7e1fc00c, 0x97c0fffa, 0x043c0101, 0x94c00002,
-       0x043c0102, 0xc439325b, 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002,
-       0x97000009, 0xc4393260, 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe,
-       0x80001994, 0x8c001628, 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b,
-       0x27b800ff, 0x9b80fffe, 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003,
-       0x13b80004, 0x27300003, 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001,
-       0x07b80002, 0xd8400013, 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082,
-       0xcc01203f, 0xd8400013, 0xcc01203f, 0x0b300003, 0x80001982, 0x17b00005, 0xcf012082, 0xcc01203f,
-       0xd8400013, 0xcc01203f, 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf,
-       0xd8c00034, 0xcdc00013, 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffcb,
-       0xcfc1325d, 0x2030007b, 0xcf01325b, 0x80001995, 0xcfc1325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a,
-       0x98800009, 0x41bc0007, 0x53fc0002, 0x7e7fc011, 0xd3c00025, 0xd8000026, 0xd8400027, 0xc43c0012,
-       0x9bc0ffff, 0x653c0001, 0x7dbd8001, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff8f, 0xc43c000e,
-       0x2bfc0008, 0xcfc00013, 0x043c2000, 0xcfc13267, 0xd8080278, 0xd8080280, 0x80000168, 0x7c410001,
-       0x04140000, 0xc55b0309, 0x3d5c0010, 0x2598ffff, 0x05540001, 0x7d91800c, 0x95c00003, 0xd4400078,
-       0x80000168, 0x9580fff8, 0x09780001, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280,
-       0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x09540001, 0x55100001,
-       0x9580005d, 0x8c001628, 0xc4253247, 0x26640001, 0x04200101, 0x96400058, 0x7dc24001, 0xc41d3248,
-       0x25dc000f, 0x7df9c00c, 0x95c00053, 0x94c00002, 0x04200102, 0x7e41c001, 0xc425325b, 0x1a70003f,
-       0x97000049, 0x1a7000e8, 0x33240003, 0x9a400046, 0x33300002, 0x9700000a, 0xc4253260, 0x1a7000e4,
-       0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001a21, 0xcdc00013, 0xc033ffff,
-       0x2f3000ff, 0xc425325b, 0x7f270009, 0xcf01325b, 0xc425325b, 0x266400ff, 0x9a40fffe, 0xd8c00033,
-       0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27240003, 0x12640004, 0x27300003, 0x13300003,
-       0x7e724001, 0x19f000e8, 0x7e724001, 0x13300001, 0x7e724001, 0x06640002, 0xd8400013, 0x19f00064,
-       0x33300002, 0x97000009, 0x16700005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
-       0x0b300003, 0x80001a0f, 0x16700005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
-       0x7e730002, 0xc4252083, 0x7e724005, 0x26640001, 0x9a40ffdf, 0xd8c00034, 0xcdc00013, 0xc431325d,
-       0x27300010, 0x9b00fffe, 0xc425325b, 0x267000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, 0xcf01325b,
-       0x80001a22, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9f, 0xd4400078,
-       0xd8080278, 0xd8080280, 0x80000168, 0x8c001a31, 0xd4400078, 0xd8080278, 0xd8080280, 0x7c408001,
-       0x88000000, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, 0x04203000,
-       0xce013267, 0xc4213267, 0x9a000001, 0x1b180057, 0x1b200213, 0x1b300199, 0x7e1a000a, 0x7e32000a,
-       0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, 0x52ec0020,
-       0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x95800060, 0x8c001628, 0xc4193247, 0x25980001,
-       0x04200101, 0x94c00005, 0x30f00005, 0x04200005, 0x9b000002, 0x04200102, 0x95800056, 0xc439325b,
-       0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260,
-       0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001aa2, 0xcdc00013,
-       0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe,
-       0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, 0x13b80004, 0x27300003,
-       0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013,
-       0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013,
-       0xcc01203f, 0x0b300003, 0x80001a90, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
-       0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xcdc00013,
-       0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xce01325d, 0x2030007b,
-       0xcf00325b, 0x80001aa3, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0xc49b02e9, 0x99800005,
-       0xd2400025, 0x4664001c, 0xd8000026, 0xd8400027, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff9c,
-       0xc49b02e9, 0x99800008, 0xc430000e, 0x2b300008, 0xcf000013, 0x04302000, 0xcf013267, 0xc4313267,
-       0x97000001, 0x90000000, 0x244c00ff, 0xcc4c0200, 0x7c408001, 0x88000000, 0xc44f0200, 0xc410000b,
-       0xc414000c, 0x7d158010, 0x059cc000, 0xd8400013, 0xccdd0000, 0x7c408001, 0x88000000, 0xc40c0037,
-       0x94c0ffff, 0xcc000049, 0xc40c003a, 0x94c0ffff, 0x7c40c001, 0x24d00001, 0x9500e69a, 0x18d0003b,
-       0x18d40021, 0x99400006, 0xd840004a, 0xc40c003c, 0x94c0ffff, 0x14cc0001, 0x94c00028, 0xd8000033,
-       0xc438000b, 0xc43c0009, 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078,
-       0x7ffbc00c, 0x97c0fffd, 0x99000004, 0xc0120840, 0x282c0040, 0x80001ae8, 0xc0121841, 0x282c001a,
-       0xcd01c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04200004, 0xcec0001b, 0xd8400021,
-       0x0a200001, 0x9a00ffff, 0xc425c07f, 0x166c001f, 0x04200004, 0x9ac0fffb, 0xc434000f, 0x9b40ffff,
-       0xd801c07f, 0xd8400013, 0xc425c07f, 0xce400078, 0xd8000034, 0x9940e66b, 0xd800004a, 0x7c408001,
-       0x88000000, 0xc40c0036, 0x24d00001, 0x9900fffe, 0x18cc0021, 0xccc00047, 0xcc000046, 0xc40c0039,
-       0x94c0ffff, 0xc40c003d, 0x98c0ffff, 0x7c40c001, 0x24d003ff, 0x18d47fea, 0x18d87ff4, 0xcd00004c,
-       0xcd40004e, 0xcd80004d, 0xd8400013, 0xcd41c405, 0xc02a0001, 0x2aa80001, 0xce800013, 0xcd01c406,
-       0xcc01c406, 0xcc01c406, 0xc40c0006, 0x98c0ffff, 0xc414000e, 0x29540008, 0x295c0001, 0xcd400013,
-       0xd8c1325e, 0xcdc0001a, 0x11980002, 0x4110000c, 0xc0160800, 0x7d15000a, 0xc0164010, 0xd8400013,
-       0xcd41c078, 0xcc01c080, 0xcc01c081, 0xcd81c082, 0xcc01c083, 0xcd01c084, 0xc40c0006, 0x98c0ffff,
-       0xd8400048, 0xc40c003b, 0x94c0ffff, 0x80000c16, 0xd8400013, 0xd801c40a, 0xd901c40d, 0xd801c410,
-       0xd801c40e, 0xd801c40f, 0xc40c0040, 0x04140001, 0x09540001, 0x9940ffff, 0x04140096, 0xd8400013,
-       0xccc1c400, 0xc411c401, 0x9500fffa, 0xc424003e, 0x04d00001, 0x11100002, 0xcd01c40c, 0xc0180034,
-       0xcd81c411, 0xd841c414, 0x0a540001, 0xcd41c412, 0x2468000f, 0xc419c416, 0x41980003, 0xc41c003f,
-       0x7dda0001, 0x12200002, 0x10cc0002, 0xccc1c40c, 0xd901c411, 0xce41c412, 0xd8800013, 0xce292e40,
-       0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xc43c0007, 0xdc120000, 0x31144000,
-       0x95400005, 0xdc030000, 0xd800002a, 0xcc3c000c, 0x80001b70, 0x33f80003, 0xd4400078, 0x9780e601,
-       0x188cfff0, 0x04e40002, 0x80001190, 0x7c408001, 0x88000000, 0xc424005e, 0x96400006, 0x90000000,
-       0xc424005e, 0x96400003, 0x7c408001, 0x88000000, 0x80001b74, 0x80000168, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
-       0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       },
-       .dfy_size = 7440
-};
-
-static const PWR_DFY_Section pwr_virus_section4 = {
-       .dfy_cntl = 0x80000004,
-       .dfy_addr_hi = 0x000000b4,
-       .dfy_addr_lo = 0x54106500,
-       .dfy_data = {
-       0x7e000200, 0x7e020204, 0xc00a0505, 0x00000000, 0xbf8c007f, 0xb8900904, 0xb8911a04, 0xb8920304,
-       0xb8930b44, 0x921c0d0c, 0x921c1c13, 0x921d0c12, 0x811c1d1c, 0x811c111c, 0x921cff1c, 0x00000400,
-       0x921dff10, 0x00000100, 0x81181d1c, 0x7e040218, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0701000, 0x80050002, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0701000, 0x80050102, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
-       0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       },
-       .dfy_size = 240
-};
-
-static const PWR_DFY_Section pwr_virus_section5 = {
-       .dfy_cntl = 0x80000004,
-       .dfy_addr_hi = 0x000000b4,
-       .dfy_addr_lo = 0x54106900,
-       .dfy_data = {
-       0x7e080200, 0x7e100204, 0xbefc00ff, 0x00010000, 0x24200087, 0x262200ff, 0x000001f0, 0x20222282,
-       0x28182111, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
-       0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
-       0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
-       0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
-       0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
-       0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
-       0x1100000c, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       },
-       .dfy_size = 384
-};
-
-static const PWR_DFY_Section pwr_virus_section6 = {
-       .dfy_cntl = 0x80000004,
-       .dfy_addr_hi = 0x000000b4,
-       .dfy_addr_lo = 0x54116f00,
-       .dfy_data = {
-       0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4540fe8, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000041, 0x0000000c, 0x00000000, 0x07808000, 0xffffffff,
-       0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
-       0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x54116f00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
-       0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
-       0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb454105e, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x000000c0, 0x00000010, 0x00000000, 0x07808000, 0xffffffff,
-       0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
-       0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x54117300, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
-       0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
-       0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541065, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000500, 0x0000001c, 0x00000000, 0x07808000, 0xffffffff,
-       0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
-       0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x54117700, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
-       0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
-       0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541069, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000444, 0x0000008a, 0x00000000, 0x07808000, 0xffffffff,
-       0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
-       0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x54117b00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
-       0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
-       0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       },
-       .dfy_size = 1024
-};
-
-static const PWR_Command_Table PwrVirusTable_post[] = {
-    { 0x00000000, mmCP_MEC_CNTL                              },
-    { 0x00000000, mmCP_MEC_CNTL                              },
-    { 0x00000004, mmSRBM_GFX_CNTL                            },
-    { 0x54116f00, mmCP_MQD_BASE_ADDR                         },
-    { 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
-    { 0xb4540fef, mmCP_HQD_PQ_BASE                           },
-    { 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
-    { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
-    { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
-    { 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
-    { 0x00010000, mmCP_HQD_VMID                              },
-    { 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
-    { 0x00000005, mmSRBM_GFX_CNTL                            },
-    { 0x54117300, mmCP_MQD_BASE_ADDR                         },
-    { 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
-    { 0xb4540fef, mmCP_HQD_PQ_BASE                           },
-    { 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
-    { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
-    { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
-    { 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
-    { 0x00010000, mmCP_HQD_VMID                              },
-    { 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
-    { 0x00000006, mmSRBM_GFX_CNTL                            },
-    { 0x54117700, mmCP_MQD_BASE_ADDR                         },
-    { 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
-    { 0xb4540fef, mmCP_HQD_PQ_BASE                           },
-    { 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
-    { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
-    { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
-    { 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
-    { 0x00010000, mmCP_HQD_VMID                              },
-    { 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
-    { 0x00000007, mmSRBM_GFX_CNTL                            },
-    { 0x54117b00, mmCP_MQD_BASE_ADDR                         },
-    { 0x000000b4, mmCP_MQD_BASE_ADDR_HI                      },
-    { 0xb4540fef, mmCP_HQD_PQ_BASE                           },
-    { 0x00000000, mmCP_HQD_PQ_BASE_HI                        },
-    { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR                 },
-    { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI              },
-    { 0x00005301, mmCP_HQD_PERSISTENT_STATE                  },
-    { 0x00010000, mmCP_HQD_VMID                              },
-    { 0xc8318509, mmCP_HQD_PQ_CONTROL                        },
-    { 0x00000004, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000104, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000204, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000304, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000404, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000504, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000604, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000704, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000005, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000105, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000205, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000305, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000405, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000505, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000605, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000705, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000006, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000106, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000206, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000306, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000406, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000506, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000606, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000706, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000007, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000107, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000207, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000307, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000407, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000507, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000607, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000707, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000008, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000108, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000208, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000308, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000408, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000508, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000608, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000708, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000009, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000109, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000209, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000309, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000409, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000509, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000609, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000709, mmSRBM_GFX_CNTL                            },
-    { 0x00000000, mmCP_HQD_ACTIVE                            },
-    { 0x00000000, mmCP_HQD_PQ_RPTR                           },
-    { 0x00000000, mmCP_HQD_PQ_WPTR                           },
-    { 0x00000001, mmCP_HQD_ACTIVE                            },
-    { 0x00000004, mmSRBM_GFX_CNTL                            },
-    { 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1                    },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, mmGRBM_STATUS                              },
-    { 0x00000000, 0xffffffff                                 },
-};
-
-#endif
index 126b44d47a99beab4672fa548c96220bd28f2a2a..004a40e88bdef05a675812e049ab805f3dbe4de0 100644 (file)
@@ -363,6 +363,12 @@ struct pp_hwmgr_func {
        int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
        int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
        int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
+       int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
+                                       uint32_t virtual_addr_low,
+                                       uint32_t virtual_addr_hi,
+                                       uint32_t mc_addr_low,
+                                       uint32_t mc_addr_hi,
+                                       uint32_t size);
 };
 
 struct pp_table_func {
index 7c9aba81cd6a246acee264d52b327a0ca221d88e..b1b27b2128f68aec08346dbbfc090eb064357622 100644 (file)
@@ -75,6 +75,11 @@ enum SMU_MEMBER {
        VceBootLevel,
        SamuBootLevel,
        LowSclkInterruptThreshold,
+       DRAM_LOG_ADDR_H,
+       DRAM_LOG_ADDR_L,
+       DRAM_LOG_PHY_ADDR_H,
+       DRAM_LOG_PHY_ADDR_L,
+       DRAM_LOG_BUFF_SIZE,
 };
 
 
index cb070ebc7de196f185415c6c02afd27c66e24a6d..d06ece4ac47df0dbea52532df2df8a82051b6f3c 100644 (file)
@@ -124,6 +124,8 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_NumOfDisplays                  0x56
 #define PPSMC_MSG_ReadSerialNumTop32             0x58
 #define PPSMC_MSG_ReadSerialNumBottom32          0x59
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh   0x5A
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow    0x5B
 #define PPSMC_MSG_RunAcgBtc                      0x5C
 #define PPSMC_MSG_RunAcgInClosedLoop             0x5D
 #define PPSMC_MSG_RunAcgInOpenLoop               0x5E
index 4e2988825ff6547a9ba15fcafd0b73935356d4a6..b24b0f203a51575667fc55486f111caf7e9eb634 100644 (file)
@@ -2,9 +2,9 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
-         polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
-         smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+         polaris10_smumgr.o iceland_smumgr.o \
+         smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c
deleted file mode 100644 (file)
index 0017b9e..0000000
+++ /dev/null
@@ -1,2808 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-#include <linux/types.h>
-
-#include "smumgr.h"
-#include "pp_debug.h"
-#include "ci_smumgr.h"
-#include "ppsmc.h"
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "pppcielanes.h"
-
-#include "smu/smu_7_0_1_d.h"
-#include "smu/smu_7_0_1_sh_mask.h"
-
-#include "dce/dce_8_0_d.h"
-#include "dce/dce_8_0_sh_mask.h"
-
-#include "bif/bif_4_1_d.h"
-#include "bif/bif_4_1_sh_mask.h"
-
-#include "gca/gfx_7_2_d.h"
-#include "gca/gfx_7_2_sh_mask.h"
-
-#include "gmc/gmc_7_1_d.h"
-#include "gmc/gmc_7_1_sh_mask.h"
-
-#include "processpptables.h"
-
-#define MC_CG_ARB_FREQ_F0           0x0a
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define MC_CG_ARB_FREQ_F2           0x0c
-#define MC_CG_ARB_FREQ_F3           0x0d
-
-#define SMC_RAM_END 0x40000
-
-#define VOLTAGE_SCALE               4
-#define VOLTAGE_VID_OFFSET_SCALE1    625
-#define VOLTAGE_VID_OFFSET_SCALE2    100
-#define CISLAND_MINIMUM_ENGINE_CLOCK 800
-#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
-
-static const struct ci_pt_defaults defaults_hawaii_xt = {
-       1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
-       { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
-       { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-static const struct ci_pt_defaults defaults_hawaii_pro = {
-       1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
-       { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
-       { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-static const struct ci_pt_defaults defaults_bonaire_xt = {
-       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
-       { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
-       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
-};
-
-
-static const struct ci_pt_defaults defaults_saturn_xt = {
-       1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
-       { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
-       { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-
-static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
-                                       uint32_t smc_addr, uint32_t limit)
-{
-       if ((0 != (3 & smc_addr))
-               || ((smc_addr + 3) >= limit)) {
-               pr_err("smc_addr invalid \n");
-               return -EINVAL;
-       }
-
-       cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
-       PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-       return 0;
-}
-
-static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
-                               const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
-       int result;
-       uint32_t data = 0;
-       uint32_t original_data;
-       uint32_t addr = 0;
-       uint32_t extra_shift;
-
-       if ((3 & smc_start_address)
-               || ((smc_start_address + byte_count) >= limit)) {
-               pr_err("smc_start_address invalid \n");
-               return -EINVAL;
-       }
-
-       addr = smc_start_address;
-
-       while (byte_count >= 4) {
-       /* Bytes are written into the SMC address space with the MSB first. */
-               data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
-               result = ci_set_smc_sram_address(hwmgr, addr, limit);
-
-               if (0 != result)
-                       return result;
-
-               cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
-
-               src += 4;
-               byte_count -= 4;
-               addr += 4;
-       }
-
-       if (0 != byte_count) {
-
-               data = 0;
-
-               result = ci_set_smc_sram_address(hwmgr, addr, limit);
-
-               if (0 != result)
-                       return result;
-
-
-               original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
-
-               extra_shift = 8 * (4 - byte_count);
-
-               while (byte_count > 0) {
-                       /* Bytes are written into the SMC addres space with the MSB first. */
-                       data = (0x100 * data) + *src++;
-                       byte_count--;
-               }
-
-               data <<= extra_shift;
-
-               data |= (original_data & ~((~0UL) << extra_shift));
-
-               result = ci_set_smc_sram_address(hwmgr, addr, limit);
-
-               if (0 != result)
-                       return result;
-
-               cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
-       }
-
-       return 0;
-}
-
-
-static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
-{
-       static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
-
-       ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
-
-       return 0;
-}
-
-bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
-       return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
-       && (0x20100 <= cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
-                               uint32_t *value, uint32_t limit)
-{
-       int result;
-
-       result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
-
-       if (result)
-               return result;
-
-       *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
-       return 0;
-}
-
-static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-       int ret;
-
-       if (!ci_is_smc_ram_running(hwmgr))
-               return -EINVAL;
-
-       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
-
-       PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
-
-       ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
-
-       if (ret != 1)
-               pr_info("\n failed to send message %x ret is %d\n",  msg, ret);
-
-       return 0;
-}
-
-static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-                                       uint16_t msg, uint32_t parameter)
-{
-       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
-       return ci_send_msg_to_smc(hwmgr, msg);
-}
-
-static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       struct cgs_system_info sys_info = {0};
-       uint32_t dev_id;
-
-       sys_info.size = sizeof(struct cgs_system_info);
-       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
-       cgs_query_system_info(hwmgr->device, &sys_info);
-       dev_id = (uint32_t)sys_info.value;
-
-       switch (dev_id) {
-       case 0x67BA:
-       case 0x66B1:
-               smu_data->power_tune_defaults = &defaults_hawaii_pro;
-               break;
-       case 0x67B8:
-       case 0x66B0:
-               smu_data->power_tune_defaults = &defaults_hawaii_xt;
-               break;
-       case 0x6640:
-       case 0x6641:
-       case 0x6646:
-       case 0x6647:
-               smu_data->power_tune_defaults = &defaults_saturn_xt;
-               break;
-       case 0x6649:
-       case 0x6650:
-       case 0x6651:
-       case 0x6658:
-       case 0x665C:
-       case 0x665D:
-       case 0x67A0:
-       case 0x67A1:
-       case 0x67A2:
-       case 0x67A8:
-       case 0x67A9:
-       case 0x67AA:
-       case 0x67B9:
-       case 0x67BE:
-       default:
-               smu_data->power_tune_defaults = &defaults_bonaire_xt;
-               break;
-       }
-}
-
-static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-       struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
-       uint32_t clock, uint32_t *vol)
-{
-       uint32_t i = 0;
-
-       if (allowed_clock_voltage_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
-               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
-                       *vol = allowed_clock_voltage_table->entries[i].v;
-                       return 0;
-               }
-       }
-
-       *vol = allowed_clock_voltage_table->entries[i - 1].v;
-       return 0;
-}
-
-static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t ref_clock;
-       uint32_t ref_divider;
-       uint32_t fbdiv;
-       int result;
-
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error retrieving Engine Clock dividers from VBIOS.",
-                       return result);
-
-       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
-       ref_clock = atomctrl_get_reference_clock(hwmgr);
-       ref_divider = 1 + dividers.uc_pll_ref_div;
-
-       /* low 14 bits is fraction and high 12 bits is divider */
-       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-       /* SPLL_FUNC_CNTL setup */
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_REF_DIV, dividers.uc_pll_ref_div);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-       /* SPLL_FUNC_CNTL_3 setup*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-                       SPLL_FB_DIV, fbdiv);
-
-       /* set to use fractional accumulation*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-                       SPLL_DITHEN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-               struct pp_atomctrl_internal_ss_info ss_info;
-               uint32_t vco_freq = clock * dividers.uc_pll_post_div;
-
-               if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
-                               vco_freq, &ss_info)) {
-                       uint32_t clk_s = ref_clock * 5 /
-                                       (ref_divider * ss_info.speed_spectrum_rate);
-                       uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
-                                       fbdiv / (clk_s * 10000);
-
-                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-                                       CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
-                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-                                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-                       cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
-                                       CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
-               }
-       }
-
-       sclk->SclkFrequency        = clock;
-       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-       return 0;
-}
-
-static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
-                               const struct phm_phase_shedding_limits_table *pl,
-                                       uint32_t sclk, uint32_t *p_shed)
-{
-       unsigned int i;
-
-       /* use the minimum phase shedding */
-       *p_shed = 1;
-
-       for (i = 0; i < pl->count; i++) {
-               if (sclk < pl->entries[i].Sclk) {
-                       *p_shed = i;
-                       break;
-               }
-       }
-}
-
-static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
-                       uint32_t clock_insr)
-{
-       uint8_t i;
-       uint32_t temp;
-       uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
-
-       if (clock < min) {
-               pr_info("Engine clock can't satisfy stutter requirement!\n");
-               return 0;
-       }
-       for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
-               temp = clock >> i;
-
-               if (temp >= min || i == 0)
-                       break;
-       }
-       return i;
-}
-
-static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, uint16_t sclk_al_threshold,
-               struct SMU7_Discrete_GraphicsLevel *level)
-{
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-
-       result = ci_calculate_sclk_params(hwmgr, clock, level);
-
-       /* populate graphics levels */
-       result = ci_get_dependency_volt_by_clk(hwmgr,
-                       hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
-                       (uint32_t *)(&level->MinVddc));
-       if (result) {
-               pr_err("vdd_dep_on_sclk table is NULL\n");
-               return result;
-       }
-
-       level->SclkFrequency = clock;
-       level->MinVddcPhases = 1;
-
-       if (data->vddc_phase_shed_control)
-               ci_populate_phase_value_based_on_sclk(hwmgr,
-                               hwmgr->dyn_state.vddc_phase_shed_limits_table,
-                               clock,
-                               &level->MinVddcPhases);
-
-       level->ActivityLevel = sclk_al_threshold;
-       level->CcPwrDynRm = 0;
-       level->CcPwrDynRm1 = 0;
-       level->EnabledForActivity = 0;
-       /* this level can be used for throttling.*/
-       level->EnabledForThrottle = 1;
-       level->UpH = 0;
-       level->DownH = 0;
-       level->VoltageDownH = 0;
-       level->PowerThrottle = 0;
-
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep))
-               level->DeepSleepDivId =
-                               ci_get_sleep_divider_id_from_clock(clock,
-                                               CISLAND_MINIMUM_ENGINE_CLOCK);
-
-       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
-       level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       if (0 == result) {
-               level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-               CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-       }
-
-       return result;
-}
-
-static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       int result = 0;
-       uint32_t array = smu_data->dpm_table_start +
-                       offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
-                       SMU7_MAX_LEVELS_GRAPHICS;
-       struct SMU7_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               result = ci_populate_single_graphic_level(hwmgr,
-                               dpm_table->sclk_table.dpm_levels[i].value,
-                               (uint16_t)smu_data->activity_target[i],
-                               &levels[i]);
-               if (result)
-                       return result;
-               if (i > 1)
-                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-               if (i == (dpm_table->sclk_table.count - 1))
-                       smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
-                               PPSMC_DISPLAY_WATERMARK_HIGH;
-       }
-
-       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
-       smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-       result = ci_copy_bytes_to_smc(hwmgr, array,
-                                  (u8 *)levels, array_size,
-                                  SMC_RAM_END);
-
-       return result;
-
-}
-
-static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
-       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
-       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
-       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-       return 0;
-}
-
-static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
-       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->tdc_vddc_throttle_release_limit_perc;
-       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
-
-       return 0;
-}
-
-static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
-       uint32_t temp;
-
-       if (ci_read_smc_sram_dword(hwmgr,
-                       fuse_table_offset +
-                       offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, SMC_RAM_END))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-                               return -EINVAL);
-       else
-               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
-
-       return 0;
-}
-
-static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       uint16_t tmp;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-
-       if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
-               || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
-               tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
-       else
-               tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-       smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
-
-       return 0;
-}
-
-static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
-       uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
-       uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
-
-       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
-                           "The CAC Leakage table does not exist!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
-                           "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
-                           "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
-
-       for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
-                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
-                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
-                       hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
-               } else {
-                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
-                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
-               }
-       }
-
-       return 0;
-}
-
-static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint8_t *vid = smu_data->power_tune_table.VddCVid;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
-               "There should never be more than 8 entries for VddcVid!!!",
-               return -EINVAL);
-
-       for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
-               vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
-
-       return 0;
-}
-
-static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
-       u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
-       int i, min, max;
-
-       min = max = hi_vid[0];
-       for (i = 0; i < 8; i++) {
-               if (0 != hi_vid[i]) {
-                       if (min > hi_vid[i])
-                               min = hi_vid[i];
-                       if (max < hi_vid[i])
-                               max = hi_vid[i];
-               }
-
-               if (0 != lo_vid[i]) {
-                       if (min > lo_vid[i])
-                               min = lo_vid[i];
-                       if (max < lo_vid[i])
-                               max = lo_vid[i];
-               }
-       }
-
-       if ((min == 0) || (max == 0))
-               return -EINVAL;
-       smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
-       smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
-
-       return 0;
-}
-
-static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
-
-       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
-       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
-       return 0;
-}
-
-static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint32_t pm_fuse_table_offset;
-       int ret = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, SMC_RAM_END)) {
-                       pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
-                       return -EINVAL;
-               }
-
-               /* DW0 - DW3 */
-               ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
-               /* DW4 - DW5 */
-               ret |= ci_populate_vddc_vid(hwmgr);
-               /* DW6 */
-               ret |= ci_populate_svi_load_line(hwmgr);
-               /* DW7 */
-               ret |= ci_populate_tdc_limit(hwmgr);
-               /* DW8 */
-               ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
-
-               ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
-
-               ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
-
-               ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
-               if (ret)
-                       return ret;
-
-               ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
-                               (uint8_t *)&smu_data->power_tune_table,
-                               sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
-       }
-       return ret;
-}
-
-static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
-       SMU7_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
-       struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
-       struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
-       const uint16_t *def1, *def2;
-       int i, j, k;
-
-       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
-       dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
-       dpm_table->DTETjOffset = 0;
-       dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
-       dpm_table->GpuTjHyst = 8;
-
-       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
-       if (ppm) {
-               dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
-               dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
-       } else {
-               dpm_table->PPM_PkgPwrLimit = 0;
-               dpm_table->PPM_TemperatureLimit = 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
-       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
-
-       dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
-       def1 = defaults->bapmti_r;
-       def2 = defaults->bapmti_rc;
-
-       for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
-               for (j = 0; j < SMU7_DTE_SOURCES; j++) {
-                       for (k = 0; k < SMU7_DTE_SINKS; k++) {
-                               dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
-                               dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
-                               def1++;
-                               def2++;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
-               pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
-               uint16_t *lo)
-{
-       uint16_t v_index;
-       bool vol_found = false;
-       *hi = tab->value * VOLTAGE_SCALE;
-       *lo = tab->value * VOLTAGE_SCALE;
-
-       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
-                       "The SCLK/VDDC Dependency Table does not exist.\n",
-                       return -EINVAL);
-
-       if (NULL == hwmgr->dyn_state.cac_leakage_table) {
-               pr_warn("CAC Leakage Table does not exist, using vddc.\n");
-               return 0;
-       }
-
-       for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
-               if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
-                       vol_found = true;
-                       if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
-                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
-                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
-                       } else {
-                               pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
-                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
-                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
-                       }
-                       break;
-               }
-       }
-
-       if (!vol_found) {
-               for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
-                       if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
-                               vol_found = true;
-                               if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
-                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
-                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
-                               } else {
-                                       pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
-                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
-                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
-                               }
-                               break;
-                       }
-               }
-
-               if (!vol_found)
-                       pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
-       }
-
-       return 0;
-}
-
-static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
-               pp_atomctrl_voltage_table_entry *tab,
-               SMU7_Discrete_VoltageLevel *smc_voltage_tab)
-{
-       int result;
-
-       result = ci_get_std_voltage_value_sidd(hwmgr, tab,
-                       &smc_voltage_tab->StdVoltageHiSidd,
-                       &smc_voltage_tab->StdVoltageLoSidd);
-       if (result) {
-               smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
-               smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
-       }
-
-       smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
-       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
-       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
-
-       return 0;
-}
-
-static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
-                       SMU7_Discrete_DpmTable *table)
-{
-       unsigned int count;
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       table->VddcLevelCount = data->vddc_voltage_table.count;
-       for (count = 0; count < table->VddcLevelCount; count++) {
-               result = ci_populate_smc_voltage_table(hwmgr,
-                               &(data->vddc_voltage_table.entries[count]),
-                               &(table->VddcLevel[count]));
-               PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
-
-               /* GPIO voltage control */
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
-                       table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
-               else
-                       table->VddcLevel[count].Smio = 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-
-       return 0;
-}
-
-static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
-                       SMU7_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-       int result;
-
-       table->VddciLevelCount = data->vddci_voltage_table.count;
-
-       for (count = 0; count < table->VddciLevelCount; count++) {
-               result = ci_populate_smc_voltage_table(hwmgr,
-                               &(data->vddci_voltage_table.entries[count]),
-                               &(table->VddciLevel[count]));
-               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
-               else
-                       table->VddciLevel[count].Smio |= 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
-       return 0;
-}
-
-static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-                       SMU7_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-       int result;
-
-       table->MvddLevelCount = data->mvdd_voltage_table.count;
-
-       for (count = 0; count < table->MvddLevelCount; count++) {
-               result = ci_populate_smc_voltage_table(hwmgr,
-                               &(data->mvdd_voltage_table.entries[count]),
-                               &table->MvddLevel[count]);
-               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
-                       table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
-               else
-                       table->MvddLevel[count].Smio |= 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
-
-       return 0;
-}
-
-
-static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-       SMU7_Discrete_DpmTable *table)
-{
-       int result;
-
-       result = ci_populate_smc_vddc_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDC voltage table to SMC", return -EINVAL);
-
-       result = ci_populate_smc_vdd_ci_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDCI voltage table to SMC", return -EINVAL);
-
-       result = ci_populate_smc_mvdd_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate MVDD voltage table to SMC", return -EINVAL);
-
-       return 0;
-}
-
-static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU7_Discrete_Ulv *state)
-{
-       uint32_t voltage_response_time, ulv_voltage;
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
-       PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
-
-       if (ulv_voltage == 0) {
-               data->ulv_supported = false;
-               return 0;
-       }
-
-       if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
-               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
-               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
-                       state->VddcOffset = 0;
-               else
-                       /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
-                       state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
-       } else {
-               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
-               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
-                       state->VddcOffsetVid = 0;
-               else  /* used in SVI2 Mode */
-                       state->VddcOffsetVid = (uint8_t)(
-                                       (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
-                                               * VOLTAGE_VID_OFFSET_SCALE2
-                                               / VOLTAGE_VID_OFFSET_SCALE1);
-       }
-       state->VddcPhase = 1;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
-       return 0;
-}
-
-static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
-                SMU7_Discrete_Ulv *ulv_level)
-{
-       return ci_populate_ulv_level(hwmgr, ulv_level);
-}
-
-static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint32_t i;
-
-/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount =
-                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity = 1;
-               table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
-       }
-
-       smu_data->smc_state_table.LinkLevelCount =
-               (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-static int ci_calculate_mclk_params(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU7_Discrete_MemoryLevel *mclk,
-               bool strobe_mode,
-               bool dllStateOn
-               )
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
-       uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-       uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
-       uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
-       uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
-       uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
-       uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
-       uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
-       uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
-       pp_atomctrl_memory_clock_param mpll_param;
-       int result;
-
-       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
-                               memory_clock, &mpll_param, strobe_mode);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
-       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
-       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
-                                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
-       if (data->is_memory_gddr5) {
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
-               pp_atomctrl_internal_ss_info ss_info;
-               uint32_t freq_nom;
-               uint32_t tmp;
-               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
-               /* for GDDR5 for all modes and DDR3 */
-               if (1 == mpll_param.qdr)
-                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
-               else
-                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
-               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
-               tmp = (freq_nom / reference_clock);
-               tmp = tmp * tmp;
-
-               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
-                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-                       uint32_t clkv =
-                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
-                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
-                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
-                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
-               }
-       }
-
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
-       mclk->MclkFrequency   = memory_clock;
-       mclk->MpllFuncCntl    = mpll_func_cntl;
-       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
-       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
-       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
-       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
-       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
-       mclk->DllCntl         = dll_cntl;
-       mclk->MpllSs1         = mpll_ss1;
-       mclk->MpllSs2         = mpll_ss2;
-
-       return 0;
-}
-
-static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
-               bool strobe_mode)
-{
-       uint8_t mc_para_index;
-
-       if (strobe_mode) {
-               if (memory_clock < 12500)
-                       mc_para_index = 0x00;
-               else if (memory_clock > 47500)
-                       mc_para_index = 0x0f;
-               else
-                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
-       } else {
-               if (memory_clock < 65000)
-                       mc_para_index = 0x00;
-               else if (memory_clock > 135000)
-                       mc_para_index = 0x0f;
-               else
-                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
-       }
-
-       return mc_para_index;
-}
-
-static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
-       uint8_t mc_para_index;
-
-       if (memory_clock < 10000)
-               mc_para_index = 0;
-       else if (memory_clock >= 80000)
-               mc_para_index = 0x0f;
-       else
-               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-
-       return mc_para_index;
-}
-
-static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
-                                       uint32_t memory_clock, uint32_t *p_shed)
-{
-       unsigned int i;
-
-       *p_shed = 1;
-
-       for (i = 0; i < pl->count; i++) {
-               if (memory_clock < pl->entries[i].Mclk) {
-                       *p_shed = i;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int ci_populate_single_memory_level(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU7_Discrete_MemoryLevel *memory_level
-               )
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       int result = 0;
-       bool dll_state_on;
-       struct cgs_display_info info = {0};
-       uint32_t mclk_edc_wr_enable_threshold = 40000;
-       uint32_t mclk_edc_enable_threshold = 40000;
-       uint32_t mclk_strobe_mode_threshold = 40000;
-
-       if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
-               result = ci_get_dependency_volt_by_clk(hwmgr,
-                       hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
-       }
-
-       if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
-               result = ci_get_dependency_volt_by_clk(hwmgr,
-                               hwmgr->dyn_state.vddci_dependency_on_mclk,
-                               memory_clock,
-                               &memory_level->MinVddci);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
-       }
-
-       if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
-               result = ci_get_dependency_volt_by_clk(hwmgr,
-                               hwmgr->dyn_state.mvdd_dependency_on_mclk,
-                               memory_clock,
-                               &memory_level->MinMvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
-       }
-
-       memory_level->MinVddcPhases = 1;
-
-       if (data->vddc_phase_shed_control) {
-               ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
-                               memory_clock, &memory_level->MinVddcPhases);
-       }
-
-       memory_level->EnabledForThrottle = 1;
-       memory_level->EnabledForActivity = 1;
-       memory_level->UpH = 0;
-       memory_level->DownH = 100;
-       memory_level->VoltageDownH = 0;
-
-       /* Indicates maximum activity level for this performance level.*/
-       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       memory_level->StutterEnable = 0;
-       memory_level->StrobeEnable = 0;
-       memory_level->EdcReadEnable = 0;
-       memory_level->EdcWriteEnable = 0;
-       memory_level->RttEnable = 0;
-
-       /* default set to low watermark. Highest level will be set to high later.*/
-       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       data->display_timing.num_existing_displays = info.display_count;
-
-       /* stutter mode not support on ci */
-
-       /* decide strobe mode*/
-       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
-               (memory_clock <= mclk_strobe_mode_threshold);
-
-       /* decide EDC mode and memory clock ratio*/
-       if (data->is_memory_gddr5) {
-               memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
-                                       memory_level->StrobeEnable);
-
-               if ((mclk_edc_enable_threshold != 0) &&
-                               (memory_clock > mclk_edc_enable_threshold)) {
-                       memory_level->EdcReadEnable = 1;
-               }
-
-               if ((mclk_edc_wr_enable_threshold != 0) &&
-                               (memory_clock > mclk_edc_wr_enable_threshold)) {
-                       memory_level->EdcWriteEnable = 1;
-               }
-
-               if (memory_level->StrobeEnable) {
-                       if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
-                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
-                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-                       else
-                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
-               } else
-                       dll_state_on = data->dll_default_on;
-       } else {
-               memory_level->StrobeRatio =
-                       ci_get_ddr3_mclk_frequency_ratio(memory_clock);
-               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-       }
-
-       result = ci_calculate_mclk_params(hwmgr,
-               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
-
-       if (0 == result) {
-               memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
-               memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
-               memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
-               /* MCLK frequency in units of 10KHz*/
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
-               /* Indicates maximum activity level for this performance level.*/
-               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
-       }
-
-       return result;
-}
-
-static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-       struct cgs_system_info sys_info = {0};
-       uint32_t dev_id;
-
-       uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
-       uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
-       SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                       "can not populate memory level as memory clock is zero", return -EINVAL);
-               result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
-                       &(smu_data->smc_state_table.MemoryLevel[i]));
-               if (0 != result)
-                       return result;
-       }
-
-       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
-       sys_info.size = sizeof(struct cgs_system_info);
-       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
-       cgs_query_system_info(hwmgr->device, &sys_info);
-       dev_id = (uint32_t)sys_info.value;
-
-       if ((dpm_table->mclk_table.count >= 2)
-               && ((dev_id == 0x67B0) ||  (dev_id == 0x67B1))) {
-               smu_data->smc_state_table.MemoryLevel[1].MinVddci =
-                               smu_data->smc_state_table.MemoryLevel[0].MinVddci;
-               smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
-                               smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
-       }
-       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
-       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       result = ci_copy_bytes_to_smc(hwmgr,
-               level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
-               SMC_RAM_END);
-
-       return result;
-}
-
-static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
-                                       SMU7_Discrete_VoltageLevel *voltage)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       uint32_t i = 0;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
-                       if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
-                               /* Always round to higher voltage. */
-                               voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-
-               PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
-                       "MVDD Voltage is outside the supported range.", return -EINVAL);
-
-       } else {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-       SMU7_Discrete_DpmTable *table)
-{
-       int result = 0;
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-
-       SMU7_Discrete_VoltageLevel voltage_level;
-       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
-       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
-
-       /* The ACPI state should not do DPM on DC (or ever).*/
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       if (data->acpi_vddc)
-               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
-       else
-               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
-
-       table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
-       /* assign zero for now*/
-       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-               table->ACPILevel.SclkFrequency,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       /* divider ID for required SCLK*/
-       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-       table->ACPILevel.DeepSleepDivId = 0;
-
-       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-                                                       CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
-       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-                                                       CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
-       spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
-                                                       CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
-
-       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-       /* For various features to be enabled/disabled while this level is active.*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       /* SCLK frequency in units of 10KHz*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-
-       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
-       table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
-       table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
-       else {
-               if (data->acpi_vddci != 0)
-                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
-               else
-                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
-       }
-
-       if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
-               table->MemoryACPILevel.MinMvdd =
-                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
-       else
-               table->MemoryACPILevel.MinMvdd = 0;
-
-       /* Force reset on DLL*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
-       /* Disable DLL in ACPIState*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
-       /* Enable DLL bypass signal*/
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK0_BYPASS, 0);
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK1_BYPASS, 0);
-
-       table->MemoryACPILevel.DllCntl            =
-               PP_HOST_TO_SMC_UL(dll_cntl);
-       table->MemoryACPILevel.MclkPwrmgtCntl     =
-               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
-       table->MemoryACPILevel.MpllAdFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
-       table->MemoryACPILevel.MpllDqFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl       =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl_1     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
-       table->MemoryACPILevel.MpllFuncCntl_2     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
-       table->MemoryACPILevel.MpllSs1            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
-       table->MemoryACPILevel.MpllSs2            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpH = 0;
-       table->MemoryACPILevel.DownH = 100;
-       table->MemoryACPILevel.VoltageDownH = 0;
-       /* Indicates maximum activity level for this performance level.*/
-       table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       table->MemoryACPILevel.StutterEnable = 0;
-       table->MemoryACPILevel.StrobeEnable = 0;
-       table->MemoryACPILevel.EdcReadEnable = 0;
-       table->MemoryACPILevel.EdcWriteEnable = 0;
-       table->MemoryACPILevel.RttEnable = 0;
-
-       return result;
-}
-
-static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-                                       SMU7_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_uvd_clock_voltage_dependency_table *uvd_table =
-               hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
-       table->UvdLevelCount = (uint8_t)(uvd_table->count);
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].VclkFrequency =
-                                       uvd_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency =
-                                       uvd_table->entries[count].dclk;
-               table->UvdLevel[count].MinVddc =
-                                       uvd_table->entries[count].v * VOLTAGE_SCALE;
-               table->UvdLevel[count].MinVddcPhases = 1;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].VclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Vclk clock", return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Dclk clock", return result);
-
-               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
-       }
-
-       return result;
-}
-
-static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU7_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_vce_clock_voltage_dependency_table *vce_table =
-                               hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
-       table->VceLevelCount = (uint8_t)(vce_table->count);
-       table->VceBootLevel = 0;
-
-       for (count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
-               table->VceLevel[count].MinVoltage =
-                               vce_table->entries[count].v * VOLTAGE_SCALE;
-               table->VceLevel[count].MinPhases = 1;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for VCE engine clock",
-                               return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-                                       SMU7_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_acp_clock_voltage_dependency_table *acp_table =
-                               hwmgr->dyn_state.acp_clock_voltage_dependency_table;
-
-       table->AcpLevelCount = (uint8_t)(acp_table->count);
-       table->AcpBootLevel = 0;
-
-       for (count = 0; count < table->AcpLevelCount; count++) {
-               table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
-               table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
-               table->AcpLevel[count].MinPhases = 1;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->AcpLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for engine clock", return result);
-
-               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-                                       SMU7_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_samu_clock_voltage_dependency_table *samu_table =
-                               hwmgr->dyn_state.samu_clock_voltage_dependency_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(samu_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
-               table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
-               table->SamuLevel[count].MinPhases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int ci_populate_memory_timing_parameters(
-               struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock,
-               uint32_t memory_clock,
-               struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
-               )
-{
-       uint32_t dramTiming;
-       uint32_t dramTiming2;
-       uint32_t burstTime;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                               engine_clock, memory_clock);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
-       arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
-       return 0;
-}
-
-static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       int result = 0;
-       SMU7_Discrete_MCArbDramTimingTable  arb_regs;
-       uint32_t i, j;
-
-       memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = ci_populate_memory_timing_parameters
-                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
-                                data->dpm_table.mclk_table.dpm_levels[j].value,
-                                &arb_regs.entries[i][j]);
-
-                       if (0 != result)
-                               break;
-               }
-       }
-
-       if (0 == result) {
-               result = ci_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->arb_table_start,
-                               (uint8_t *)&arb_regs,
-                               sizeof(SMU7_Discrete_MCArbDramTimingTable),
-                               SMC_RAM_END
-                               );
-       }
-
-       return result;
-}
-
-static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-                       SMU7_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table*/
-       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-                       data->vbios_boot_state.sclk_bootup_value,
-                       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
-
-       if (0 != result) {
-               smu_data->smc_state_table.GraphicsBootLevel = 0;
-               pr_err("VBIOS did not find boot engine clock value \
-                       in dependency table. Using Graphics DPM level 0!");
-               result = 0;
-       }
-
-       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-               data->vbios_boot_state.mclk_bootup_value,
-               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
-
-       if (0 != result) {
-               smu_data->smc_state_table.MemoryBootLevel = 0;
-               pr_err("VBIOS did not find boot engine clock value \
-                       in dependency table. Using Memory DPM level 0!");
-               result = 0;
-       }
-
-       table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
-       table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
-       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
-       return result;
-}
-
-static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
-                                SMU7_Discrete_MCRegisters *mc_reg_table)
-{
-       const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
-
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
-               if (smu_data->mc_reg_table.validflag & 1<<j) {
-                       PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
-                               "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
-                       mc_reg_table->address[i].s0 =
-                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
-                       mc_reg_table->address[i].s1 =
-                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
-                       i++;
-               }
-       }
-
-       mc_reg_table->last = (uint8_t)i;
-
-       return 0;
-}
-
-static void ci_convert_mc_registers(
-       const struct ci_mc_reg_entry *entry,
-       SMU7_Discrete_MCRegisterSet *data,
-       uint32_t num_entries, uint32_t valid_flag)
-{
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < num_entries; j++) {
-               if (valid_flag & 1<<j) {
-                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
-                       i++;
-               }
-       }
-}
-
-static int ci_convert_mc_reg_table_entry_to_smc(
-               struct pp_hwmgr *hwmgr,
-               const uint32_t memory_clock,
-               SMU7_Discrete_MCRegisterSet *mc_reg_table_data
-               )
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint32_t i = 0;
-
-       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
-               if (memory_clock <=
-                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
-                       break;
-               }
-       }
-
-       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
-               --i;
-
-       ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
-                               mc_reg_table_data, smu_data->mc_reg_table.last,
-                               smu_data->mc_reg_table.validflag);
-
-       return 0;
-}
-
-static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
-               SMU7_Discrete_MCRegisters *mc_regs)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       int res;
-       uint32_t i;
-
-       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
-               res = ci_convert_mc_reg_table_entry_to_smc(
-                               hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[i].value,
-                               &mc_regs->data[i]
-                               );
-
-               if (0 != res)
-                       result = res;
-       }
-
-       return result;
-}
-
-static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t address;
-       int32_t result;
-
-       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
-               return 0;
-
-
-       memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
-
-       result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
-
-       if (result != 0)
-               return result;
-
-       address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
-
-       return  ci_copy_bytes_to_smc(hwmgr, address,
-                                (uint8_t *)&smu_data->mc_regs.data[0],
-                               sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
-                               SMC_RAM_END);
-}
-
-static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-
-       memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
-       result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
-       result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize MCRegTable for driver state!", return result;);
-
-       return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
-                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
-}
-
-static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       uint8_t count, level;
-
-       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
-
-       for (level = 0; level < count; level++) {
-               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
-                        >= data->vbios_boot_state.sclk_bootup_value) {
-                       smu_data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
-
-       for (level = 0; level < count; level++) {
-               if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
-                       >= data->vbios_boot_state.mclk_bootup_value) {
-                       smu_data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
-                                           SMU7_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
-               table->SVI2Enable = 1;
-       else
-               table->SVI2Enable = 0;
-       return 0;
-}
-
-static int ci_start_smc(struct pp_hwmgr *hwmgr)
-{
-       /* set smc instruct start point at 0x0 */
-       ci_program_jump_on_start(hwmgr);
-
-       /* enable smc clock */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
-
-       PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
-                                INTERRUPTS_ENABLED, 1);
-
-       return 0;
-}
-
-static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       SMU7_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
-       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-       u32 i;
-
-       ci_initialize_power_tune_defaults(hwmgr);
-       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
-               ci_populate_smc_voltage_tables(hwmgr, table);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-       if (data->ulv_supported) {
-               result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
-               PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ULV state!", return result);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_ULV_PARAMETER, 0x40035);
-       }
-
-       result = ci_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Graphics Level!", return result);
-
-       result = ci_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Memory Level!", return result);
-
-       result = ci_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Link Level!", return result);
-
-       result = ci_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize ACPI Level!", return result);
-
-       result = ci_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize VCE Level!", return result);
-
-       result = ci_populate_smc_acp_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize ACP Level!", return result);
-
-       result = ci_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result);
-
-       /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
-       /* need to populate the  ARB settings for the initial state. */
-       result = ci_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to Write ARB settings for the initial state.", return result);
-
-       result = ci_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize UVD Level!", return result);
-
-       table->UvdBootLevel  = 0;
-       table->VceBootLevel  = 0;
-       table->AcpBootLevel  = 0;
-       table->SamuBootLevel  = 0;
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       result = ci_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Boot Level!", return result);
-
-       result = ci_populate_smc_initial_state(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
-
-       result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
-
-       table->UVDInterval = 1;
-       table->VCEInterval = 1;
-       table->ACPInterval = 1;
-       table->SAMUInterval = 1;
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-
-       table->TemperatureLimitHigh =
-               (data->thermal_temp_setting.temperature_high *
-                SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       table->TemperatureLimitLow =
-               (data->thermal_temp_setting.temperature_low *
-               SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       table->MemoryVoltageChangeEnable  = 1;
-       table->MemoryInterval  = 1;
-       table->VoltageResponseTime  = 0;
-       table->VddcVddciDelta = 4000;
-       table->PhaseResponseTime  = 0;
-       table->MemoryThermThrottleEnable  = 1;
-
-       PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
-                       "There must be 1 or more PCIE levels defined in PPTable.",
-                       return -EINVAL);
-
-       table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
-       table->PCIeGenInterval = 1;
-
-       ci_populate_smc_svi2_config(hwmgr, table);
-
-       for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
-               CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
-
-       table->ThermGpio  = 17;
-       table->SclkStepSize = 0x4000;
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       } else {
-               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       }
-
-       table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
-       table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
-       table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
-                                       offsetof(SMU7_Discrete_DpmTable, SystemFlags),
-                                       (uint8_t *)&(table->SystemFlags),
-                                       sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
-                                       SMC_RAM_END);
-
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to upload dpm data to SMC memory!", return result;);
-
-       result = ci_populate_initial_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == result),
-               "Failed to populate initialize MC Reg table!", return result);
-
-       result = ci_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to  populate PM fuses to SMC memory!", return result);
-
-       ci_start_smc(hwmgr);
-
-       return 0;
-}
-
-static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
-       struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
-               return 0;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       if (0 == ci_data->fan_table_start) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (0 == duty100) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = smu7_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
-
-       return 0;
-}
-
-static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return ci_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = ci_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->dpm_table_start +
-                               offsetof(SMU7_Discrete_DpmTable,
-                                       LowSclkInterruptT),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               SMC_RAM_END);
-       }
-
-       result = ci_update_and_upload_mc_reg_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
-
-       result = ci_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((result == 0),
-                       "Failed to program memory timing parameters!",
-                       );
-
-       return result;
-}
-
-static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
-{
-       switch (type) {
-       case SMU_SoftRegisters:
-               switch (member) {
-               case HandshakeDisables:
-                       return offsetof(SMU7_SoftRegisters, HandshakeDisables);
-               case VoltageChangeTimeout:
-                       return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
-               case AverageGraphicsActivity:
-                       return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
-               case PreVBlankGap:
-                       return offsetof(SMU7_SoftRegisters, PreVBlankGap);
-               case VBlankTimeout:
-                       return offsetof(SMU7_SoftRegisters, VBlankTimeout);
-               }
-       case SMU_Discrete_DpmTable:
-               switch (member) {
-               case LowSclkInterruptThreshold:
-                       return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
-               }
-       }
-       pr_debug("can't get the offset of type %x member %x\n", type, member);
-       return 0;
-}
-
-static uint32_t ci_get_mac_definition(uint32_t value)
-{
-       switch (value) {
-       case SMU_MAX_LEVELS_GRAPHICS:
-               return SMU7_MAX_LEVELS_GRAPHICS;
-       case SMU_MAX_LEVELS_MEMORY:
-               return SMU7_MAX_LEVELS_MEMORY;
-       case SMU_MAX_LEVELS_LINK:
-               return SMU7_MAX_LEVELS_LINK;
-       case SMU_MAX_ENTRIES_SMIO:
-               return SMU7_MAX_ENTRIES_SMIO;
-       case SMU_MAX_LEVELS_VDDC:
-               return SMU7_MAX_LEVELS_VDDC;
-       case SMU_MAX_LEVELS_VDDCI:
-               return SMU7_MAX_LEVELS_VDDCI;
-       case SMU_MAX_LEVELS_MVDD:
-               return SMU7_MAX_LEVELS_MVDD;
-       }
-
-       pr_debug("can't get the mac of %x\n", value);
-       return 0;
-}
-
-static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
-{
-       uint32_t byte_count, start_addr;
-       uint8_t *src;
-       uint32_t data;
-
-       struct cgs_firmware_info info = {0};
-
-       cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
-
-       hwmgr->is_kicker = info.is_kicker;
-       byte_count = info.image_size;
-       src = (uint8_t *)info.kptr;
-       start_addr = info.ucode_start_address;
-
-       if  (byte_count > SMC_RAM_END) {
-               pr_err("SMC address is beyond the SMC RAM area.\n");
-               return -EINVAL;
-       }
-
-       cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
-       PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
-       for (; byte_count >= 4; byte_count -= 4) {
-               data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-               cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
-               src += 4;
-       }
-       PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
-       if (0 != byte_count) {
-               pr_err("SMC size must be divisible by 4\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
-{
-       if (ci_is_smc_ram_running(hwmgr)) {
-               pr_info("smc is running, no need to load smc firmware\n");
-               return 0;
-       }
-       PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
-                       boot_seq_done, 1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
-                       pre_fetcher_en, 1);
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
-       return ci_load_smc_ucode(hwmgr);
-}
-
-static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-
-       uint32_t tmp = 0;
-       int result;
-       bool error = false;
-
-       if (ci_upload_firmware(hwmgr))
-               return -EINVAL;
-
-       result = ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, DpmTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               ci_data->dpm_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, SoftRegisters),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               data->soft_regs_start = tmp;
-               ci_data->soft_regs_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, mcRegisterTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               ci_data->mc_reg_table_start = tmp;
-
-       result = ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, FanTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               ci_data->fan_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               ci_data->arb_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = ci_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU7_Firmware_Header, Version),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               hwmgr->microcode_version_info.SMC = tmp;
-
-       error |= (0 != result);
-
-       return error ? 1 : 0;
-}
-
-static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
-       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
-{
-       bool result = true;
-
-       switch (in_reg) {
-       case  mmMC_SEQ_RAS_TIMING:
-               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
-               break;
-
-       case  mmMC_SEQ_DLL_STBY:
-               *out_reg = mmMC_SEQ_DLL_STBY_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD0:
-               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD1:
-               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CTRL:
-               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
-               break;
-
-       case mmMC_SEQ_CAS_TIMING:
-               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING:
-               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING2:
-               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CMD:
-               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CTL:
-               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D0:
-               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D1:
-               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D0:
-               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D1:
-               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
-               break;
-
-       case mmMC_PMG_CMD_EMRS:
-               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS1:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
-               break;
-
-       case mmMC_SEQ_PMG_TIMING:
-               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS2:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_2:
-               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
-               break;
-
-       default:
-               result = false;
-               break;
-       }
-
-       return result;
-}
-
-static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
-{
-       uint32_t i;
-       uint16_t address;
-
-       for (i = 0; i < table->last; i++) {
-               table->mc_reg_address[i].s0 =
-                       ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
-                       ? address : table->mc_reg_address[i].s1;
-       }
-       return 0;
-}
-
-static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
-                                       struct ci_mc_reg_table *ni_table)
-{
-       uint8_t i, j;
-
-       PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-               "Invalid VramInfo table.", return -EINVAL);
-       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
-               "Invalid VramInfo table.", return -EINVAL);
-
-       for (i = 0; i < table->last; i++)
-               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
-       ni_table->last = table->last;
-
-       for (i = 0; i < table->num_entries; i++) {
-               ni_table->mc_reg_table_entry[i].mclk_max =
-                       table->mc_reg_table_entry[i].mclk_max;
-               for (j = 0; j < table->last; j++) {
-                       ni_table->mc_reg_table_entry[i].mc_data[j] =
-                               table->mc_reg_table_entry[i].mc_data[j];
-               }
-       }
-
-       ni_table->num_entries = table->num_entries;
-
-       return 0;
-}
-
-static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
-                                       struct ci_mc_reg_table *table)
-{
-       uint8_t i, j, k;
-       uint32_t temp_reg;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       for (i = 0, j = table->last; i < table->last; i++) {
-               PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                       "Invalid VramInfo table.", return -EINVAL);
-
-               switch (table->mc_reg_address[i].s1) {
-
-               case mmMC_SEQ_MISC1:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       ((temp_reg & 0xffff0000)) |
-                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
-                               if (!data->is_memory_gddr5)
-                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-
-                       if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
-                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
-                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
-                               for (k = 0; k < table->num_entries; k++) {
-                                       table->mc_reg_table_entry[k].mc_data[j] =
-                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
-                               }
-                               j++;
-                               PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                                       "Invalid VramInfo table.", return -EINVAL);
-                       }
-
-                       break;
-
-               case mmMC_SEQ_RESERVE_M:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-                       break;
-
-               default:
-                       break;
-               }
-
-       }
-
-       table->last = j;
-
-       return 0;
-}
-
-static int ci_set_valid_flag(struct ci_mc_reg_table *table)
-{
-       uint8_t i, j;
-
-       for (i = 0; i < table->last; i++) {
-               for (j = 1; j < table->num_entries; j++) {
-                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
-                               table->mc_reg_table_entry[j].mc_data[i]) {
-                               table->validflag |= (1 << i);
-                               break;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
-       pp_atomctrl_mc_reg_table *table;
-       struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
-       uint8_t module_index = ci_get_memory_modile_index(hwmgr);
-
-       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       /* Program additional LP registers that are no longer programmed by VBIOS */
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
-       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
-       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
-       if (0 == result)
-               result = ci_copy_vbios_smc_reg_table(table, ni_table);
-
-       if (0 == result) {
-               ci_set_s0_mc_reg_index(ni_table);
-               result = ci_set_mc_special_registers(hwmgr, ni_table);
-       }
-
-       if (0 == result)
-               ci_set_valid_flag(ni_table);
-
-       kfree(table);
-
-       return result;
-}
-
-static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return ci_is_smc_ram_running(hwmgr);
-}
-
-static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-                                               struct amd_pp_profile *request)
-{
-       struct ci_smumgr *smu_data = (struct ci_smumgr *)
-                       (hwmgr->smu_backend);
-       struct SMU7_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t array = smu_data->dpm_table_start +
-                       offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
-                       SMU7_MAX_LEVELS_GRAPHICS;
-       uint32_t i;
-
-       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
-               levels[i].ActivityLevel =
-                               cpu_to_be16(request->activity_threshold);
-               levels[i].EnabledForActivity = 1;
-               levels[i].UpH = request->up_hyst;
-               levels[i].DownH = request->down_hyst;
-       }
-
-       return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                               array_size, SMC_RAM_END);
-}
-
-
-static int ci_smu_init(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct ci_smumgr *ci_priv = NULL;
-
-       ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
-
-       if (ci_priv == NULL)
-               return -ENOMEM;
-
-       for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
-               ci_priv->activity_target[i] = 30;
-
-       hwmgr->smu_backend = ci_priv;
-
-       return 0;
-}
-
-static int ci_smu_fini(struct pp_hwmgr *hwmgr)
-{
-       kfree(hwmgr->smu_backend);
-       hwmgr->smu_backend = NULL;
-       cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
-       return 0;
-}
-
-static int ci_start_smu(struct pp_hwmgr *hwmgr)
-{
-       return 0;
-}
-
-const struct pp_smumgr_func ci_smu_funcs = {
-       .smu_init = ci_smu_init,
-       .smu_fini = ci_smu_fini,
-       .start_smu = ci_start_smu,
-       .check_fw_load_finish = NULL,
-       .request_smu_load_fw = NULL,
-       .request_smu_load_specific_fw = NULL,
-       .send_msg_to_smc = ci_send_msg_to_smc,
-       .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
-       .download_pptable_settings = NULL,
-       .upload_pptable_settings = NULL,
-       .get_offsetof = ci_get_offsetof,
-       .process_firmware_header = ci_process_firmware_header,
-       .init_smc_table = ci_init_smc_table,
-       .update_sclk_threshold = ci_update_sclk_threshold,
-       .thermal_setup_fan_table = ci_thermal_setup_fan_table,
-       .populate_all_graphic_levels = ci_populate_all_graphic_levels,
-       .populate_all_memory_levels = ci_populate_all_memory_levels,
-       .get_mac_definition = ci_get_mac_definition,
-       .initialize_mc_reg_table = ci_initialize_mc_reg_table,
-       .is_dpm_running = ci_is_dpm_running,
-       .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
-};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
new file mode 100644 (file)
index 0000000..4d672cd
--- /dev/null
@@ -0,0 +1,2818 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+#include <linux/types.h>
+
+#include "smumgr.h"
+#include "pp_debug.h"
+#include "ci_smumgr.h"
+#include "ppsmc.h"
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "processpptables.h"
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1    625
+#define VOLTAGE_VID_OFFSET_SCALE2    100
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+
+static const struct ci_pt_defaults defaults_hawaii_xt = {
+       1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+       { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+       { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro = {
+       1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+       { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+       { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt = {
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+       { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+
+static const struct ci_pt_defaults defaults_saturn_xt = {
+       1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+       { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
+       { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+
+static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+                                       uint32_t smc_addr, uint32_t limit)
+{
+       if ((0 != (3 & smc_addr))
+               || ((smc_addr + 3) >= limit)) {
+               pr_err("smc_addr invalid \n");
+               return -EINVAL;
+       }
+
+       cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
+       PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+       return 0;
+}
+
+static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
+                               const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+       int result;
+       uint32_t data = 0;
+       uint32_t original_data;
+       uint32_t addr = 0;
+       uint32_t extra_shift;
+
+       if ((3 & smc_start_address)
+               || ((smc_start_address + byte_count) >= limit)) {
+               pr_err("smc_start_address invalid \n");
+               return -EINVAL;
+       }
+
+       addr = smc_start_address;
+
+       while (byte_count >= 4) {
+       /* Bytes are written into the SMC address space with the MSB first. */
+               data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+               result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+               if (0 != result)
+                       return result;
+
+               cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+
+               src += 4;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       if (0 != byte_count) {
+
+               data = 0;
+
+               result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+               if (0 != result)
+                       return result;
+
+
+               original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+
+               extra_shift = 8 * (4 - byte_count);
+
+               while (byte_count > 0) {
+                       /* Bytes are written into the SMC addres space with the MSB first. */
+                       data = (0x100 * data) + *src++;
+                       byte_count--;
+               }
+
+               data <<= extra_shift;
+
+               data |= (original_data & ~((~0UL) << extra_shift));
+
+               result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+               if (0 != result)
+                       return result;
+
+               cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+       }
+
+       return 0;
+}
+
+
+static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
+{
+       static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+       ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
+
+       return 0;
+}
+
+bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+       return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+       && (0x20100 <= cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
+                               uint32_t *value, uint32_t limit)
+{
+       int result;
+
+       result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
+
+       if (result)
+               return result;
+
+       *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+       return 0;
+}
+
+static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+       int ret;
+
+       if (!ci_is_smc_ram_running(hwmgr))
+               return -EINVAL;
+
+       cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
+
+       PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+
+       ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
+
+       if (ret != 1)
+               pr_info("\n failed to send message %x ret is %d\n",  msg, ret);
+
+       return 0;
+}
+
+static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+                                       uint16_t msg, uint32_t parameter)
+{
+       cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
+       return ci_send_msg_to_smc(hwmgr, msg);
+}
+
+static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       struct cgs_system_info sys_info = {0};
+       uint32_t dev_id;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       dev_id = (uint32_t)sys_info.value;
+
+       switch (dev_id) {
+       case 0x67BA:
+       case 0x66B1:
+               smu_data->power_tune_defaults = &defaults_hawaii_pro;
+               break;
+       case 0x67B8:
+       case 0x66B0:
+               smu_data->power_tune_defaults = &defaults_hawaii_xt;
+               break;
+       case 0x6640:
+       case 0x6641:
+       case 0x6646:
+       case 0x6647:
+               smu_data->power_tune_defaults = &defaults_saturn_xt;
+               break;
+       case 0x6649:
+       case 0x6650:
+       case 0x6651:
+       case 0x6658:
+       case 0x665C:
+       case 0x665D:
+       case 0x67A0:
+       case 0x67A1:
+       case 0x67A2:
+       case 0x67A8:
+       case 0x67A9:
+       case 0x67AA:
+       case 0x67B9:
+       case 0x67BE:
+       default:
+               smu_data->power_tune_defaults = &defaults_bonaire_xt;
+               break;
+       }
+}
+
+static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+       struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+       uint32_t clock, uint32_t *vol)
+{
+       uint32_t i = 0;
+
+       if (allowed_clock_voltage_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+                       *vol = allowed_clock_voltage_table->entries[i].v;
+                       return 0;
+               }
+       }
+
+       *vol = allowed_clock_voltage_table->entries[i - 1].v;
+       return 0;
+}
+
+static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t ref_clock;
+       uint32_t ref_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error retrieving Engine Clock dividers from VBIOS.",
+                       return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+       ref_clock = atomctrl_get_reference_clock(hwmgr);
+       ref_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider */
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup */
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+                       SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+                       SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               struct pp_atomctrl_internal_ss_info ss_info;
+               uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+
+               if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+                               vco_freq, &ss_info)) {
+                       uint32_t clk_s = ref_clock * 5 /
+                                       (ref_divider * ss_info.speed_spectrum_rate);
+                       uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
+                                       fbdiv / (clk_s * 10000);
+
+                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+                                       CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+                                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+                                       CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+               }
+       }
+
+       sclk->SclkFrequency        = clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+                               const struct phm_phase_shedding_limits_table *pl,
+                                       uint32_t sclk, uint32_t *p_shed)
+{
+       unsigned int i;
+
+       /* use the minimum phase shedding */
+       *p_shed = 1;
+
+       for (i = 0; i < pl->count; i++) {
+               if (sclk < pl->entries[i].Sclk) {
+                       *p_shed = i;
+                       break;
+               }
+       }
+}
+
+static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
+                       uint32_t clock_insr)
+{
+       uint8_t i;
+       uint32_t temp;
+       uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
+
+       if (clock < min) {
+               pr_info("Engine clock can't satisfy stutter requirement!\n");
+               return 0;
+       }
+       for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+               temp = clock >> i;
+
+               if (temp >= min || i == 0)
+                       break;
+       }
+       return i;
+}
+
+static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, uint16_t sclk_al_threshold,
+               struct SMU7_Discrete_GraphicsLevel *level)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+
+       result = ci_calculate_sclk_params(hwmgr, clock, level);
+
+       /* populate graphics levels */
+       result = ci_get_dependency_volt_by_clk(hwmgr,
+                       hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
+                       (uint32_t *)(&level->MinVddc));
+       if (result) {
+               pr_err("vdd_dep_on_sclk table is NULL\n");
+               return result;
+       }
+
+       level->SclkFrequency = clock;
+       level->MinVddcPhases = 1;
+
+       if (data->vddc_phase_shed_control)
+               ci_populate_phase_value_based_on_sclk(hwmgr,
+                               hwmgr->dyn_state.vddc_phase_shed_limits_table,
+                               clock,
+                               &level->MinVddcPhases);
+
+       level->ActivityLevel = sclk_al_threshold;
+       level->CcPwrDynRm = 0;
+       level->CcPwrDynRm1 = 0;
+       level->EnabledForActivity = 0;
+       /* this level can be used for throttling.*/
+       level->EnabledForThrottle = 1;
+       level->UpH = 0;
+       level->DownH = 0;
+       level->VoltageDownH = 0;
+       level->PowerThrottle = 0;
+
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep))
+               level->DeepSleepDivId =
+                               ci_get_sleep_divider_id_from_clock(clock,
+                                               CISLAND_MINIMUM_ENGINE_CLOCK);
+
+       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+       level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       if (0 == result) {
+               level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+       }
+
+       return result;
+}
+
+static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result = 0;
+       uint32_t array = smu_data->dpm_table_start +
+                       offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+                       SMU7_MAX_LEVELS_GRAPHICS;
+       struct SMU7_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t i;
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = ci_populate_single_graphic_level(hwmgr,
+                               dpm_table->sclk_table.dpm_levels[i].value,
+                               (uint16_t)smu_data->activity_target[i],
+                               &levels[i]);
+               if (result)
+                       return result;
+               if (i > 1)
+                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+               if (i == (dpm_table->sclk_table.count - 1))
+                       smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+                               PPSMC_DISPLAY_WATERMARK_HIGH;
+       }
+
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       result = ci_copy_bytes_to_smc(hwmgr, array,
+                                  (u8 *)levels, array_size,
+                                  SMC_RAM_END);
+
+       return result;
+
+}
+
+static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->tdc_vddc_throttle_release_limit_perc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+       return 0;
+}
+
+static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (ci_read_smc_sram_dword(hwmgr,
+                       fuse_table_offset +
+                       offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+       return 0;
+}
+
+static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       uint16_t tmp;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+               || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+               tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
+       else
+               tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
+
+       return 0;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+       uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+       uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
+
+       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+                           "The CAC Leakage table does not exist!", return -EINVAL);
+       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+                           "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+                           "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+       for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+                       hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
+               } else {
+                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
+                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
+               }
+       }
+
+       return 0;
+}
+
+static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint8_t *vid = smu_data->power_tune_table.VddCVid;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+               "There should never be more than 8 entries for VddcVid!!!",
+               return -EINVAL);
+
+       for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
+               vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+
+       return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+       u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+       int i, min, max;
+
+       min = max = hi_vid[0];
+       for (i = 0; i < 8; i++) {
+               if (0 != hi_vid[i]) {
+                       if (min > hi_vid[i])
+                               min = hi_vid[i];
+                       if (max < hi_vid[i])
+                               max = hi_vid[i];
+               }
+
+               if (0 != lo_vid[i]) {
+                       if (min > lo_vid[i])
+                               min = lo_vid[i];
+                       if (max < lo_vid[i])
+                               max = lo_vid[i];
+               }
+       }
+
+       if ((min == 0) || (max == 0))
+               return -EINVAL;
+       smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
+       smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
+
+       return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+       return 0;
+}
+
+static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint32_t pm_fuse_table_offset;
+       int ret = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END)) {
+                       pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
+                       return -EINVAL;
+               }
+
+               /* DW0 - DW3 */
+               ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
+               /* DW4 - DW5 */
+               ret |= ci_populate_vddc_vid(hwmgr);
+               /* DW6 */
+               ret |= ci_populate_svi_load_line(hwmgr);
+               /* DW7 */
+               ret |= ci_populate_tdc_limit(hwmgr);
+               /* DW8 */
+               ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
+
+               ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
+
+               ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
+
+               ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
+               if (ret)
+                       return ret;
+
+               ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
+       }
+       return ret;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU7_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+       struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+       struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+       const uint16_t *def1, *def2;
+       int i, j, k;
+
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+       dpm_table->DTETjOffset = 0;
+       dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+       if (ppm) {
+               dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+               dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+       } else {
+               dpm_table->PPM_PkgPwrLimit = 0;
+               dpm_table->PPM_TemperatureLimit = 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+       dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+       def1 = defaults->bapmti_r;
+       def2 = defaults->bapmti_rc;
+
+       for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU7_DTE_SINKS; k++) {
+                               dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+                               dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+                               def1++;
+                               def2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+               pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+               uint16_t *lo)
+{
+       uint16_t v_index;
+       bool vol_found = false;
+       *hi = tab->value * VOLTAGE_SCALE;
+       *lo = tab->value * VOLTAGE_SCALE;
+
+       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+                       "The SCLK/VDDC Dependency Table does not exist.\n",
+                       return -EINVAL);
+
+       if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+               pr_warn("CAC Leakage Table does not exist, using vddc.\n");
+               return 0;
+       }
+
+       for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+               if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+                       vol_found = true;
+                       if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+                       } else {
+                               pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+                       }
+                       break;
+               }
+       }
+
+       if (!vol_found) {
+               for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+                       if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+                               vol_found = true;
+                               if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+                               } else {
+                                       pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+                               }
+                               break;
+                       }
+               }
+
+               if (!vol_found)
+                       pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+       }
+
+       return 0;
+}
+
+static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+               pp_atomctrl_voltage_table_entry *tab,
+               SMU7_Discrete_VoltageLevel *smc_voltage_tab)
+{
+       int result;
+
+       result = ci_get_std_voltage_value_sidd(hwmgr, tab,
+                       &smc_voltage_tab->StdVoltageHiSidd,
+                       &smc_voltage_tab->StdVoltageLoSidd);
+       if (result) {
+               smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+               smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+       }
+
+       smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
+
+       return 0;
+}
+
+static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+                       SMU7_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->VddcLevelCount = data->vddc_voltage_table.count;
+       for (count = 0; count < table->VddcLevelCount; count++) {
+               result = ci_populate_smc_voltage_table(hwmgr,
+                               &(data->vddc_voltage_table.entries[count]),
+                               &(table->VddcLevel[count]));
+               PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+               /* GPIO voltage control */
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+                       table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+               else
+                       table->VddcLevel[count].Smio = 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+       return 0;
+}
+
+static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+                       SMU7_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+       int result;
+
+       table->VddciLevelCount = data->vddci_voltage_table.count;
+
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               result = ci_populate_smc_voltage_table(hwmgr,
+                               &(data->vddci_voltage_table.entries[count]),
+                               &(table->VddciLevel[count]));
+               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+               else
+                       table->VddciLevel[count].Smio |= 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+       return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU7_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+       int result;
+
+       table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+       for (count = 0; count < table->MvddLevelCount; count++) {
+               result = ci_populate_smc_voltage_table(hwmgr,
+                               &(data->mvdd_voltage_table.entries[count]),
+                               &table->MvddLevel[count]);
+               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+                       table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+               else
+                       table->MvddLevel[count].Smio |= 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+       return 0;
+}
+
+
+static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+       SMU7_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = ci_populate_smc_vddc_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+       result = ci_populate_smc_vdd_ci_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+       result = ci_populate_smc_mvdd_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+       return 0;
+}
+
+static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU7_Discrete_Ulv *state)
+{
+       uint32_t voltage_response_time, ulv_voltage;
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+       PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+       if (ulv_voltage == 0) {
+               data->ulv_supported = false;
+               return 0;
+       }
+
+       if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+                       state->VddcOffset = 0;
+               else
+                       /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+                       state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+       } else {
+               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+                       state->VddcOffsetVid = 0;
+               else  /* used in SVI2 Mode */
+                       state->VddcOffsetVid = (uint8_t)(
+                                       (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+                                               * VOLTAGE_VID_OFFSET_SCALE2
+                                               / VOLTAGE_VID_OFFSET_SCALE1);
+       }
+       state->VddcPhase = 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
+                SMU7_Discrete_Ulv *ulv_level)
+{
+       return ci_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint32_t i;
+
+/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount =
+                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity = 1;
+               table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+               (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+static int ci_calculate_mclk_params(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU7_Discrete_MemoryLevel *mclk,
+               bool strobe_mode,
+               bool dllStateOn
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
+       uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+       uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+       uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+       uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+       uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+       uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+       uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
+       uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+       pp_atomctrl_memory_clock_param mpll_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+                               memory_clock, &mpll_param, strobe_mode);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+                                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+       if (data->is_memory_gddr5) {
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+               pp_atomctrl_internal_ss_info ss_info;
+               uint32_t freq_nom;
+               uint32_t tmp;
+               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+               /* for GDDR5 for all modes and DDR3 */
+               if (1 == mpll_param.qdr)
+                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+               else
+                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+               tmp = (freq_nom / reference_clock);
+               tmp = tmp * tmp;
+
+               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+                       uint32_t clkv =
+                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+               }
+       }
+
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+       mclk->MclkFrequency   = memory_clock;
+       mclk->MpllFuncCntl    = mpll_func_cntl;
+       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+       mclk->DllCntl         = dll_cntl;
+       mclk->MpllSs1         = mpll_ss1;
+       mclk->MpllSs2         = mpll_ss2;
+
+       return 0;
+}
+
+static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
+               bool strobe_mode)
+{
+       uint8_t mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 47500)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+       } else {
+               if (memory_clock < 65000)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 135000)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+       }
+
+       return mc_para_index;
+}
+
+static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+       uint8_t mc_para_index;
+
+       if (memory_clock < 10000)
+               mc_para_index = 0;
+       else if (memory_clock >= 80000)
+               mc_para_index = 0x0f;
+       else
+               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+       return mc_para_index;
+}
+
+static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+                                       uint32_t memory_clock, uint32_t *p_shed)
+{
+       unsigned int i;
+
+       *p_shed = 1;
+
+       for (i = 0; i < pl->count; i++) {
+               if (memory_clock < pl->entries[i].Mclk) {
+                       *p_shed = i;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int ci_populate_single_memory_level(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU7_Discrete_MemoryLevel *memory_level
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int result = 0;
+       bool dll_state_on;
+       struct cgs_display_info info = {0};
+       uint32_t mclk_edc_wr_enable_threshold = 40000;
+       uint32_t mclk_edc_enable_threshold = 40000;
+       uint32_t mclk_strobe_mode_threshold = 40000;
+
+       if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+               result = ci_get_dependency_volt_by_clk(hwmgr,
+                       hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+       }
+
+       if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+               result = ci_get_dependency_volt_by_clk(hwmgr,
+                               hwmgr->dyn_state.vddci_dependency_on_mclk,
+                               memory_clock,
+                               &memory_level->MinVddci);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+       }
+
+       if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
+               result = ci_get_dependency_volt_by_clk(hwmgr,
+                               hwmgr->dyn_state.mvdd_dependency_on_mclk,
+                               memory_clock,
+                               &memory_level->MinMvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
+       }
+
+       memory_level->MinVddcPhases = 1;
+
+       if (data->vddc_phase_shed_control) {
+               ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+                               memory_clock, &memory_level->MinVddcPhases);
+       }
+
+       memory_level->EnabledForThrottle = 1;
+       memory_level->EnabledForActivity = 1;
+       memory_level->UpH = 0;
+       memory_level->DownH = 100;
+       memory_level->VoltageDownH = 0;
+
+       /* Indicates maximum activity level for this performance level.*/
+       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       memory_level->StutterEnable = 0;
+       memory_level->StrobeEnable = 0;
+       memory_level->EdcReadEnable = 0;
+       memory_level->EdcWriteEnable = 0;
+       memory_level->RttEnable = 0;
+
+       /* default set to low watermark. Highest level will be set to high later.*/
+       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       data->display_timing.num_existing_displays = info.display_count;
+
+       /* stutter mode not support on ci */
+
+       /* decide strobe mode*/
+       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+               (memory_clock <= mclk_strobe_mode_threshold);
+
+       /* decide EDC mode and memory clock ratio*/
+       if (data->is_memory_gddr5) {
+               memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
+                                       memory_level->StrobeEnable);
+
+               if ((mclk_edc_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_enable_threshold)) {
+                       memory_level->EdcReadEnable = 1;
+               }
+
+               if ((mclk_edc_wr_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_wr_enable_threshold)) {
+                       memory_level->EdcWriteEnable = 1;
+               }
+
+               if (memory_level->StrobeEnable) {
+                       if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
+                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+                       else
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+               } else
+                       dll_state_on = data->dll_default_on;
+       } else {
+               memory_level->StrobeRatio =
+                       ci_get_ddr3_mclk_frequency_ratio(memory_clock);
+               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+       }
+
+       result = ci_calculate_mclk_params(hwmgr,
+               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+       if (0 == result) {
+               memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+               memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+               memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+               /* MCLK frequency in units of 10KHz*/
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+               /* Indicates maximum activity level for this performance level.*/
+               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+       }
+
+       return result;
+}
+
+static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+       struct cgs_system_info sys_info = {0};
+       uint32_t dev_id;
+
+       uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+       uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
+       SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                       "can not populate memory level as memory clock is zero", return -EINVAL);
+               result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+                       &(smu_data->smc_state_table.MemoryLevel[i]));
+               if (0 != result)
+                       return result;
+       }
+
+       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       dev_id = (uint32_t)sys_info.value;
+
+       if ((dpm_table->mclk_table.count >= 2)
+               && ((dev_id == 0x67B0) ||  (dev_id == 0x67B1))) {
+               smu_data->smc_state_table.MemoryLevel[1].MinVddci =
+                               smu_data->smc_state_table.MemoryLevel[0].MinVddci;
+               smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
+                               smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
+       }
+       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       result = ci_copy_bytes_to_smc(hwmgr,
+               level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+               SMC_RAM_END);
+
+       return result;
+}
+
+static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+                                       SMU7_Discrete_VoltageLevel *voltage)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+                       if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+                               /* Always round to higher voltage. */
+                               voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+
+               PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+                       "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+       SMU7_Discrete_DpmTable *table)
+{
+       int result = 0;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+
+       SMU7_Discrete_VoltageLevel voltage_level;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+       /* The ACPI state should not do DPM on DC (or ever).*/
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (data->acpi_vddc)
+               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+       else
+               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+       table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
+       /* assign zero for now*/
+       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+               table->ACPILevel.SclkFrequency,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* divider ID for required SCLK*/
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+                                                       CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
+       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+                                                       CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
+       spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
+                                                       CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+       /* For various features to be enabled/disabled while this level is active.*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       /* SCLK frequency in units of 10KHz*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+
+       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+       table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+       table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+       else {
+               if (data->acpi_vddci != 0)
+                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+               else
+                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+       }
+
+       if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
+               table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       /* Force reset on DLL*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+       /* Disable DLL in ACPIState*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+       /* Enable DLL bypass signal*/
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK0_BYPASS, 0);
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK1_BYPASS, 0);
+
+       table->MemoryACPILevel.DllCntl            =
+               PP_HOST_TO_SMC_UL(dll_cntl);
+       table->MemoryACPILevel.MclkPwrmgtCntl     =
+               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+       table->MemoryACPILevel.MpllAdFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+       table->MemoryACPILevel.MpllDqFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl       =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl_1     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+       table->MemoryACPILevel.MpllFuncCntl_2     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+       table->MemoryACPILevel.MpllSs1            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+       table->MemoryACPILevel.MpllSs2            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpH = 0;
+       table->MemoryACPILevel.DownH = 100;
+       table->MemoryACPILevel.VoltageDownH = 0;
+       /* Indicates maximum activity level for this performance level.*/
+       table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = 0;
+       table->MemoryACPILevel.StrobeEnable = 0;
+       table->MemoryACPILevel.EdcReadEnable = 0;
+       table->MemoryACPILevel.EdcWriteEnable = 0;
+       table->MemoryACPILevel.RttEnable = 0;
+
+       return result;
+}
+
+static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+                                       SMU7_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+               hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+       table->UvdLevelCount = (uint8_t)(uvd_table->count);
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].VclkFrequency =
+                                       uvd_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency =
+                                       uvd_table->entries[count].dclk;
+               table->UvdLevel[count].MinVddc =
+                                       uvd_table->entries[count].v * VOLTAGE_SCALE;
+               table->UvdLevel[count].MinVddcPhases = 1;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].VclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Vclk clock", return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Dclk clock", return result);
+
+               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
+       }
+
+       return result;
+}
+
+static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU7_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_vce_clock_voltage_dependency_table *vce_table =
+                               hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+       table->VceLevelCount = (uint8_t)(vce_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
+               table->VceLevel[count].MinVoltage =
+                               vce_table->entries[count].v * VOLTAGE_SCALE;
+               table->VceLevel[count].MinPhases = 1;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+                                       SMU7_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_acp_clock_voltage_dependency_table *acp_table =
+                               hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+
+       table->AcpLevelCount = (uint8_t)(acp_table->count);
+       table->AcpBootLevel = 0;
+
+       for (count = 0; count < table->AcpLevelCount; count++) {
+               table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
+               table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
+               table->AcpLevel[count].MinPhases = 1;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->AcpLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for engine clock", return result);
+
+               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+                                       SMU7_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_samu_clock_voltage_dependency_table *samu_table =
+                               hwmgr->dyn_state.samu_clock_voltage_dependency_table;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t)(samu_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
+               table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
+               table->SamuLevel[count].MinPhases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int ci_populate_memory_timing_parameters(
+               struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock,
+               uint32_t memory_clock,
+               struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
+               )
+{
+       uint32_t dramTiming;
+       uint32_t dramTiming2;
+       uint32_t burstTime;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                               engine_clock, memory_clock);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+       arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+       return 0;
+}
+
+static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       int result = 0;
+       SMU7_Discrete_MCArbDramTimingTable  arb_regs;
+       uint32_t i, j;
+
+       memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = ci_populate_memory_timing_parameters
+                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+                                data->dpm_table.mclk_table.dpm_levels[j].value,
+                                &arb_regs.entries[i][j]);
+
+                       if (0 != result)
+                               break;
+               }
+       }
+
+       if (0 == result) {
+               result = ci_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU7_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END
+                               );
+       }
+
+       return result;
+}
+
+static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+                       SMU7_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table*/
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+       if (0 != result) {
+               smu_data->smc_state_table.GraphicsBootLevel = 0;
+               pr_err("VBIOS did not find boot engine clock value \
+                       in dependency table. Using Graphics DPM level 0!");
+               result = 0;
+       }
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+               data->vbios_boot_state.mclk_bootup_value,
+               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+       if (0 != result) {
+               smu_data->smc_state_table.MemoryBootLevel = 0;
+               pr_err("VBIOS did not find boot engine clock value \
+                       in dependency table. Using Memory DPM level 0!");
+               result = 0;
+       }
+
+       table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+       table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+       return result;
+}
+
+static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+                                SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+       const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
+
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+               if (smu_data->mc_reg_table.validflag & 1<<j) {
+                       PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+                               "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+                       mc_reg_table->address[i].s0 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+
+       mc_reg_table->last = (uint8_t)i;
+
+       return 0;
+}
+
+static void ci_convert_mc_registers(
+       const struct ci_mc_reg_entry *entry,
+       SMU7_Discrete_MCRegisterSet *data,
+       uint32_t num_entries, uint32_t valid_flag)
+{
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & 1<<j) {
+                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static int ci_convert_mc_reg_table_entry_to_smc(
+               struct pp_hwmgr *hwmgr,
+               const uint32_t memory_clock,
+               SMU7_Discrete_MCRegisterSet *mc_reg_table_data
+               )
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint32_t i = 0;
+
+       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+               if (memory_clock <=
+                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+                       break;
+               }
+       }
+
+       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, smu_data->mc_reg_table.last,
+                               smu_data->mc_reg_table.validflag);
+
+       return 0;
+}
+
+static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+               SMU7_Discrete_MCRegisters *mc_regs)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int res;
+       uint32_t i;
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               res = ci_convert_mc_reg_table_entry_to_smc(
+                               hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[i].value,
+                               &mc_regs->data[i]
+                               );
+
+               if (0 != res)
+                       result = res;
+       }
+
+       return result;
+}
+
+static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t address;
+       int32_t result;
+
+       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+               return 0;
+
+
+       memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+       result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+       if (result != 0)
+               return result;
+
+       address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
+
+       return  ci_copy_bytes_to_smc(hwmgr, address,
+                                (uint8_t *)&smu_data->mc_regs.data[0],
+                               sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+                               SMC_RAM_END);
+}
+
+static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+       memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
+       result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+       result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize MCRegTable for driver state!", return result;);
+
+       return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
+                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       uint8_t count, level;
+
+       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+                        >= data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+                       >= data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+                                           SMU7_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+               table->SVI2Enable = 1;
+       else
+               table->SVI2Enable = 0;
+       return 0;
+}
+
+static int ci_start_smc(struct pp_hwmgr *hwmgr)
+{
+       /* set smc instruct start point at 0x0 */
+       ci_program_jump_on_start(hwmgr);
+
+       /* enable smc clock */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+       PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
+                                INTERRUPTS_ENABLED, 1);
+
+       return 0;
+}
+
+static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       SMU7_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+       u32 i;
+
+       ci_initialize_power_tune_defaults(hwmgr);
+       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+               ci_populate_smc_voltage_tables(hwmgr, table);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (data->ulv_supported) {
+               result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
+               PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ULV state!", return result);
+
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = ci_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Graphics Level!", return result);
+
+       result = ci_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Memory Level!", return result);
+
+       result = ci_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Link Level!", return result);
+
+       result = ci_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize ACPI Level!", return result);
+
+       result = ci_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize VCE Level!", return result);
+
+       result = ci_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize ACP Level!", return result);
+
+       result = ci_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize SAMU Level!", return result);
+
+       /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+       /* need to populate the  ARB settings for the initial state. */
+       result = ci_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to Write ARB settings for the initial state.", return result);
+
+       result = ci_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize UVD Level!", return result);
+
+       table->UvdBootLevel  = 0;
+       table->VceBootLevel  = 0;
+       table->AcpBootLevel  = 0;
+       table->SamuBootLevel  = 0;
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       result = ci_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Boot Level!", return result);
+
+       result = ci_populate_smc_initial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+       result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+       table->UVDInterval = 1;
+       table->VCEInterval = 1;
+       table->ACPInterval = 1;
+       table->SAMUInterval = 1;
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+
+       table->TemperatureLimitHigh =
+               (data->thermal_temp_setting.temperature_high *
+                SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       table->TemperatureLimitLow =
+               (data->thermal_temp_setting.temperature_low *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+       table->MemoryVoltageChangeEnable  = 1;
+       table->MemoryInterval  = 1;
+       table->VoltageResponseTime  = 0;
+       table->VddcVddciDelta = 4000;
+       table->PhaseResponseTime  = 0;
+       table->MemoryThermThrottleEnable  = 1;
+
+       PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+                       "There must be 1 or more PCIE levels defined in PPTable.",
+                       return -EINVAL);
+
+       table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
+       table->PCIeGenInterval = 1;
+
+       ci_populate_smc_svi2_config(hwmgr, table);
+
+       for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
+               CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
+
+       table->ThermGpio  = 17;
+       table->SclkStepSize = 0x4000;
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       }
+
+       table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+       table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+       table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
+                                       offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+                                       (uint8_t *)&(table->SystemFlags),
+                                       sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
+                                       SMC_RAM_END);
+
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to upload dpm data to SMC memory!", return result;);
+
+       result = ci_populate_initial_mc_reg_table(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == result),
+               "Failed to populate initialize MC Reg table!", return result);
+
+       result = ci_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+
+       ci_start_smc(hwmgr);
+
+       return 0;
+}
+
+static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+               return 0;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       if (0 == ci_data->fan_table_start) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (0 == duty100) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+       return 0;
+}
+
+static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+                       (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return ci_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = ci_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->dpm_table_start +
+                               offsetof(SMU7_Discrete_DpmTable,
+                                       LowSclkInterruptT),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+
+       result = ci_update_and_upload_mc_reg_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+       result = ci_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+
+       return result;
+}
+
+static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU7_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
+               case PreVBlankGap:
+                       return offsetof(SMU7_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU7_SoftRegisters, VBlankTimeout);
+               case DRAM_LOG_ADDR_H:
+                       return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
+               case DRAM_LOG_ADDR_L:
+                       return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
+               case DRAM_LOG_PHY_ADDR_H:
+                       return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+               case DRAM_LOG_PHY_ADDR_L:
+                       return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+               case DRAM_LOG_BUFF_SIZE:
+                       return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
+               }
+       }
+       pr_debug("can't get the offset of type %x member %x\n", type, member);
+       return 0;
+}
+
+static uint32_t ci_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU7_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU7_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU7_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU7_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU7_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU7_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU7_MAX_LEVELS_MVDD;
+       }
+
+       pr_debug("can't get the mac of %x\n", value);
+       return 0;
+}
+
+static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
+{
+       uint32_t byte_count, start_addr;
+       uint8_t *src;
+       uint32_t data;
+
+       struct cgs_firmware_info info = {0};
+
+       cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+       hwmgr->is_kicker = info.is_kicker;
+       byte_count = info.image_size;
+       src = (uint8_t *)info.kptr;
+       start_addr = info.ucode_start_address;
+
+       if  (byte_count > SMC_RAM_END) {
+               pr_err("SMC address is beyond the SMC RAM area.\n");
+               return -EINVAL;
+       }
+
+       cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+       PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+       for (; byte_count >= 4; byte_count -= 4) {
+               data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+               cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+               src += 4;
+       }
+       PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+       if (0 != byte_count) {
+               pr_err("SMC size must be divisible by 4\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
+{
+       if (ci_is_smc_ram_running(hwmgr)) {
+               pr_info("smc is running, no need to load smc firmware\n");
+               return 0;
+       }
+       PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
+                       boot_seq_done, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
+                       pre_fetcher_en, 1);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+       return ci_load_smc_ucode(hwmgr);
+}
+
+static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+       uint32_t tmp = 0;
+       int result;
+       bool error = false;
+
+       if (ci_upload_firmware(hwmgr))
+               return -EINVAL;
+
+       result = ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, DpmTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               ci_data->dpm_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, SoftRegisters),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               data->soft_regs_start = tmp;
+               ci_data->soft_regs_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, mcRegisterTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               ci_data->mc_reg_table_start = tmp;
+
+       result = ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, FanTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               ci_data->fan_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               ci_data->arb_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = ci_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU7_Firmware_Header, Version),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (0 != result);
+
+       return error ? 1 : 0;
+}
+
+static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+       bool result = true;
+
+       switch (in_reg) {
+       case  mmMC_SEQ_RAS_TIMING:
+               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+               break;
+
+       case  mmMC_SEQ_DLL_STBY:
+               *out_reg = mmMC_SEQ_DLL_STBY_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD0:
+               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD1:
+               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CTRL:
+               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+               break;
+
+       case mmMC_SEQ_CAS_TIMING:
+               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING:
+               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING2:
+               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CMD:
+               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CTL:
+               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D0:
+               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D1:
+               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D0:
+               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D1:
+               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+               break;
+
+       case mmMC_PMG_CMD_EMRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS1:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+
+       case mmMC_SEQ_PMG_TIMING:
+               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS2:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_2:
+               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+               break;
+
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+       uint32_t i;
+       uint16_t address;
+
+       for (i = 0; i < table->last; i++) {
+               table->mc_reg_address[i].s0 =
+                       ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+                       ? address : table->mc_reg_address[i].s1;
+       }
+       return 0;
+}
+
+static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+                                       struct ci_mc_reg_table *ni_table)
+{
+       uint8_t i, j;
+
+       PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+               "Invalid VramInfo table.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+               "Invalid VramInfo table.", return -EINVAL);
+
+       for (i = 0; i < table->last; i++)
+               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+       ni_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               ni_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       ni_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+
+       ni_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+                                       struct ci_mc_reg_table *table)
+{
+       uint8_t i, j, k;
+       uint32_t temp_reg;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                       "Invalid VramInfo table.", return -EINVAL);
+
+               switch (table->mc_reg_address[i].s1) {
+
+               case mmMC_SEQ_MISC1:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+                               if (!data->is_memory_gddr5)
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++) {
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               }
+                               j++;
+                               PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                                       "Invalid VramInfo table.", return -EINVAL);
+                       }
+
+                       break;
+
+               case mmMC_SEQ_RESERVE_M:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+                       break;
+
+               default:
+                       break;
+               }
+
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static int ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+       uint8_t i, j;
+
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+                               table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->validflag |= (1 << i);
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+       pp_atomctrl_mc_reg_table *table;
+       struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+       uint8_t module_index = ci_get_memory_modile_index(hwmgr);
+
+       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       /* Program additional LP registers that are no longer programmed by VBIOS */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+       if (0 == result)
+               result = ci_copy_vbios_smc_reg_table(table, ni_table);
+
+       if (0 == result) {
+               ci_set_s0_mc_reg_index(ni_table);
+               result = ci_set_mc_special_registers(hwmgr, ni_table);
+       }
+
+       if (0 == result)
+               ci_set_valid_flag(ni_table);
+
+       kfree(table);
+
+       return result;
+}
+
+static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return ci_is_smc_ram_running(hwmgr);
+}
+
+static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+                                               struct amd_pp_profile *request)
+{
+       struct ci_smumgr *smu_data = (struct ci_smumgr *)
+                       (hwmgr->smu_backend);
+       struct SMU7_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t array = smu_data->dpm_table_start +
+                       offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+                       SMU7_MAX_LEVELS_GRAPHICS;
+       uint32_t i;
+
+       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+               levels[i].ActivityLevel =
+                               cpu_to_be16(request->activity_threshold);
+               levels[i].EnabledForActivity = 1;
+               levels[i].UpH = request->up_hyst;
+               levels[i].DownH = request->down_hyst;
+       }
+
+       return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                               array_size, SMC_RAM_END);
+}
+
+
+static int ci_smu_init(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct ci_smumgr *ci_priv = NULL;
+
+       ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
+
+       if (ci_priv == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+               ci_priv->activity_target[i] = 30;
+
+       hwmgr->smu_backend = ci_priv;
+
+       return 0;
+}
+
+static int ci_smu_fini(struct pp_hwmgr *hwmgr)
+{
+       kfree(hwmgr->smu_backend);
+       hwmgr->smu_backend = NULL;
+       cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+       return 0;
+}
+
+static int ci_start_smu(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+const struct pp_smumgr_func ci_smu_funcs = {
+       .smu_init = ci_smu_init,
+       .smu_fini = ci_smu_fini,
+       .start_smu = ci_start_smu,
+       .check_fw_load_finish = NULL,
+       .request_smu_load_fw = NULL,
+       .request_smu_load_specific_fw = NULL,
+       .send_msg_to_smc = ci_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+       .download_pptable_settings = NULL,
+       .upload_pptable_settings = NULL,
+       .get_offsetof = ci_get_offsetof,
+       .process_firmware_header = ci_process_firmware_header,
+       .init_smc_table = ci_init_smc_table,
+       .update_sclk_threshold = ci_update_sclk_threshold,
+       .thermal_setup_fan_table = ci_thermal_setup_fan_table,
+       .populate_all_graphic_levels = ci_populate_all_graphic_levels,
+       .populate_all_memory_levels = ci_populate_all_memory_levels,
+       .get_mac_definition = ci_get_mac_definition,
+       .initialize_mc_reg_table = ci_initialize_mc_reg_table,
+       .is_dpm_running = ci_is_dpm_running,
+       .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
deleted file mode 100644 (file)
index b1a66b5..0000000
+++ /dev/null
@@ -1,2486 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "fiji_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "fiji_smumgr.h"
-#include "pppcielanes.h"
-#include "smu7_ppsmc.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX    1
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-#define VDDC_VDDCI_DELTA            300
-#define MC_CG_ARB_FREQ_F1           0x0b
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
-                               {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
-       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
-                               {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-               /*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
-               {1,               0xF,             0xFD,
-               /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
-               0x19,        5,               45}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- *  This is the unit expected by SMC firmware
- */
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
-               uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
-{
-       uint32_t i;
-       uint16_t vddci;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       *voltage = *mvdd = 0;
-
-
-       /* clock - voltage dependency table is empty table */
-       if (dep_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < dep_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (dep_table->entries[i].clk >= clock) {
-                       *voltage |= (dep_table->entries[i].vddc *
-                                       VOLTAGE_SCALE) << VDDC_SHIFT;
-                       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else if (dep_table->entries[i].vddci)
-                               *voltage |= (dep_table->entries[i].vddci *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else {
-                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               (dep_table->entries[i].vddc -
-                                                               VDDC_VDDCI_DELTA));
-                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       }
-
-                       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
-                                       VOLTAGE_SCALE;
-                       else if (dep_table->entries[i].mvdd)
-                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
-                                       VOLTAGE_SCALE;
-
-                       *voltage |= 1 << PHASES_SHIFT;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-       else if (dep_table->entries[i-1].vddci) {
-               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                               (dep_table->entries[i].vddc -
-                                               VDDC_VDDCI_DELTA));
-               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-       }
-
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
-       else if (dep_table->entries[i].mvdd)
-               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
-       return 0;
-}
-
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
-       uint32_t tmp;
-       tmp = raw_setting * 4096 / 100;
-       return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
-{
-       switch (line) {
-       case SMU7_I2CLineID_DDC1:
-               *scl = SMU7_I2C_DDC1CLK;
-               *sda = SMU7_I2C_DDC1DATA;
-               break;
-       case SMU7_I2CLineID_DDC2:
-               *scl = SMU7_I2C_DDC2CLK;
-               *sda = SMU7_I2C_DDC2DATA;
-               break;
-       case SMU7_I2CLineID_DDC3:
-               *scl = SMU7_I2C_DDC3CLK;
-               *sda = SMU7_I2C_DDC3DATA;
-               break;
-       case SMU7_I2CLineID_DDC4:
-               *scl = SMU7_I2C_DDC4CLK;
-               *sda = SMU7_I2C_DDC4DATA;
-               break;
-       case SMU7_I2CLineID_DDC5:
-               *scl = SMU7_I2C_DDC5CLK;
-               *sda = SMU7_I2C_DDC5DATA;
-               break;
-       case SMU7_I2CLineID_DDC6:
-               *scl = SMU7_I2C_DDC6CLK;
-               *sda = SMU7_I2C_DDC6DATA;
-               break;
-       case SMU7_I2CLineID_SCLSDA:
-               *scl = SMU7_I2C_SCL;
-               *sda = SMU7_I2C_SDA;
-               break;
-       case SMU7_I2CLineID_DDCVGA:
-               *scl = SMU7_I2C_DDCVGACLK;
-               *sda = SMU7_I2C_DDCVGADATA;
-               break;
-       default:
-               *scl = 0;
-               *sda = 0;
-               break;
-       }
-}
-
-static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (table_info &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID)
-               smu_data->power_tune_defaults =
-                               &fiji_power_tune_data_set_array
-                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-       else
-               smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
-}
-
-static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       SMU73_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
-
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-       struct pp_advance_fan_control_parameters *fan_table =
-                       &hwmgr->thermal_controller.advanceFanControlParameters;
-       uint8_t uc_scl, uc_sda;
-
-       /* TDP number of fraction bits are changed from 8 to 7 for Fiji
-        * as requested by SMC team
-        */
-       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
-                       (uint16_t)(cac_dtp_table->usTDP * 128));
-       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
-                       (uint16_t)(cac_dtp_table->usTDP * 128));
-
-       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-                       "Target Operating Temp is out of Range!",
-                       );
-
-       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
-       dpm_table->GpuTjHyst = 8;
-
-       dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
-       /* The following are for new Fiji Multi-input fan/thermal control */
-       dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTargetOperatingTemp * 256);
-       dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitHotspot * 256);
-       dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitLiquid1 * 256);
-       dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitLiquid2 * 256);
-       dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitVrVddc * 256);
-       dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitVrMvdd * 256);
-       dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitPlx * 256);
-
-       dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainEdge));
-       dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
-       dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainLiquid));
-       dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainVrVddc));
-       dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
-       dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainPlx));
-       dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainHbm));
-
-       dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
-       dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
-       dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
-       dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
-       get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
-       dpm_table->Liquid_I2C_LineSCL = uc_scl;
-       dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
-       get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
-       dpm_table->Vr_I2C_LineSCL = uc_scl;
-       dpm_table->Vr_I2C_LineSDA = uc_sda;
-
-       get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
-       dpm_table->Plx_I2C_LineSCL = uc_scl;
-       dpm_table->Plx_I2C_LineSDA = uc_sda;
-
-       return 0;
-}
-
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
-       smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
-       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
-       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-       return 0;
-}
-
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       /* TDC number of fraction bits are changed from 8 to 7
-        * for Fiji as requested by SMC team
-        */
-       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
-       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
-       smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
-       return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-       uint32_t temp;
-
-       if (smu7_read_smc_sram_dword(hwmgr,
-                       fuse_table_offset +
-                       offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, SMC_RAM_END))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-                               return -EINVAL);
-       else {
-               smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
-               smu_data->power_tune_table.LPMLTemperatureMin =
-                               (uint8_t)((temp >> 16) & 0xff);
-               smu_data->power_tune_table.LPMLTemperatureMax =
-                               (uint8_t)((temp >> 8) & 0xff);
-               smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
-       }
-       return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-       return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       if ((hwmgr->thermal_controller.advanceFanControlParameters.
-                       usFanOutputSensitivity & (1 << 15)) ||
-                       0 == hwmgr->thermal_controller.advanceFanControlParameters.
-                       usFanOutputSensitivity)
-               hwmgr->thermal_controller.advanceFanControlParameters.
-               usFanOutputSensitivity = hwmgr->thermal_controller.
-                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-       smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
-                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usFanOutputSensitivity);
-       return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               smu_data->power_tune_table.GnbLPML[i] = 0;
-
-       return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
-       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
-       return 0;
-}
-
-static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       uint32_t pm_fuse_table_offset;
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (smu7_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU73_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to get pm_fuse_table_offset Failed!",
-                                       return -EINVAL);
-
-               /* DW6 */
-               if (fiji_populate_svi_load_line(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate SviLoadLine Failed!",
-                                       return -EINVAL);
-               /* DW7 */
-               if (fiji_populate_tdc_limit(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
-               /* DW8 */
-               if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TdcWaterfallCtl, "
-                                       "LPMLTemperature Min and Max Failed!",
-                                       return -EINVAL);
-
-               /* DW9-DW12 */
-               if (0 != fiji_populate_temperature_scaler(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate LPMLTemperatureScaler Failed!",
-                                       return -EINVAL);
-
-               /* DW13-DW14 */
-               if (fiji_populate_fuzzy_fan(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
-                                       return -EINVAL);
-
-               /* DW15-DW18 */
-               if (fiji_populate_gnb_lpml(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Failed!",
-                                       return -EINVAL);
-
-               /* DW20 */
-               if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
-                                       "Sidd Failed!", return -EINVAL);
-
-               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
-                               (uint8_t *)&smu_data->power_tune_table,
-                               sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to download PmFuseTable Failed!",
-                                       return -EINVAL);
-       }
-       return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    table  the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       uint32_t count;
-       uint8_t index;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-                       table_info->vddc_lookup_table;
-       /* tables is already swapped, so in order to use the value from it,
-        * we need to swap it back.
-        * We are populating vddc CAC data to BapmVddc table
-        * in split and merged mode
-        */
-
-       for (count = 0; count < lookup_table->count; count++) {
-               index = phm_get_voltage_index(lookup_table,
-                               data->vddc_voltage_table.entries[count].value);
-               table->BapmVddcVidLoSidd[count] =
-                       convert_to_vid(lookup_table->entries[index].us_cac_low);
-               table->BapmVddcVidHiSidd[count] =
-                       convert_to_vid(lookup_table->entries[index].us_cac_high);
-       }
-
-       return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always  0
-*/
-
-static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       int result;
-
-       result = fiji_populate_cac_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate CAC voltage tables to SMC",
-                       return -EINVAL);
-
-       return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_Ulv *state)
-{
-       int result = 0;
-
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
-                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
-       state->VddcPhase = 1;
-
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-               CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-       }
-       return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       int i;
-
-       /* Index (dpm_table->pcie_speed_table.count)
-        * is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
-                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity = 1;
-               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
-       }
-
-       smu_data->smc_state_table.LinkLevelCount =
-                       (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    clock  the engine clock to use to populate the structure
-* @param    sclk   the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t ref_clock;
-       uint32_t ref_divider;
-       uint32_t fbdiv;
-       int result;
-
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error retrieving Engine Clock dividers from VBIOS.",
-                       return result);
-
-       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
-       ref_clock = atomctrl_get_reference_clock(hwmgr);
-       ref_divider = 1 + dividers.uc_pll_ref_div;
-
-       /* low 14 bits is fraction and high 12 bits is divider */
-       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-       /* SPLL_FUNC_CNTL setup */
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_REF_DIV, dividers.uc_pll_ref_div);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-       /* SPLL_FUNC_CNTL_3 setup*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-                       SPLL_FB_DIV, fbdiv);
-
-       /* set to use fractional accumulation*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
-                       SPLL_DITHEN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-               struct pp_atomctrl_internal_ss_info ssInfo;
-
-               uint32_t vco_freq = clock * dividers.uc_pll_post_div;
-               if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
-                               vco_freq, &ssInfo)) {
-                       /*
-                        * ss_info.speed_spectrum_percentage -- in unit of 0.01%
-                        * ss_info.speed_spectrum_rate -- in unit of khz
-                        *
-                        * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
-                        */
-                       uint32_t clk_s = ref_clock * 5 /
-                                       (ref_divider * ssInfo.speed_spectrum_rate);
-                       /* clkv = 2 * D * fbdiv / NS */
-                       uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
-                                       fbdiv / (clk_s * 10000);
-
-                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-                                       CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
-                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
-                                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-                       cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
-                                       CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
-               }
-       }
-
-       sclk->SclkFrequency        = clock;
-       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-       return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    clock the engine clock to use to populate the structure
-* @param    sclk        the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, uint16_t sclk_al_threshold,
-               struct SMU73_Discrete_GraphicsLevel *level)
-{
-       int result;
-       /* PP_Clocks minClocks; */
-       uint32_t threshold, mvdd;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
-       /* populate graphics levels */
-       result = fiji_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_sclk, clock,
-                       (uint32_t *)(&level->MinVoltage), &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find VDDC voltage value for "
-                       "VDDC engine clock dependency table",
-                       return result);
-
-       level->SclkFrequency = clock;
-       level->ActivityLevel = sclk_al_threshold;
-       level->CcPwrDynRm = 0;
-       level->CcPwrDynRm1 = 0;
-       level->EnabledForActivity = 0;
-       level->EnabledForThrottle = 1;
-       level->UpHyst = 10;
-       level->DownHyst = 0;
-       level->VoltageDownHyst = 0;
-       level->PowerThrottle = 0;
-
-       threshold = clock * data->fast_watermark_threshold / 100;
-
-       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-               level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
-                                                               hwmgr->display_config.min_core_set_clock_in_sr);
-
-
-       /* Default to slow, highest DPM level will be
-        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
-        */
-       level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
-       return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
-       int result = 0;
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
-                       SMU73_MAX_LEVELS_GRAPHICS;
-       struct SMU73_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t i, max_entry;
-       uint8_t hightest_pcie_level_enabled = 0,
-                       lowest_pcie_level_enabled = 0,
-                       mid_pcie_level_enabled = 0,
-                       count = 0;
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               result = fiji_populate_single_graphic_level(hwmgr,
-                               dpm_table->sclk_table.dpm_levels[i].value,
-                               (uint16_t)smu_data->activity_target[i],
-                               &levels[i]);
-               if (result)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       levels[i].DeepSleepDivId = 0;
-       }
-
-       /* Only enable level 0 for now.*/
-       levels[0].EnabledForActivity = 1;
-
-       /* set highest level watermark to high */
-       levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       smu_data->smc_state_table.GraphicsDpmLevelCount =
-                       (uint8_t)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-       if (pcie_table != NULL) {
-               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
-                               "There must be 1 or more PCIE levels defined in PPTable.",
-                               return -EINVAL);
-               max_entry = pcie_entry_cnt - 1;
-               for (i = 0; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel =
-                                       (uint8_t) ((i < max_entry) ? i : max_entry);
-       } else {
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
-                       hightest_pcie_level_enabled++;
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << lowest_pcie_level_enabled)) == 0))
-                       lowest_pcie_level_enabled++;
-
-               while ((count < hightest_pcie_level_enabled) &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
-                       count++;
-
-               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
-                               hightest_pcie_level_enabled ?
-                                               (lowest_pcie_level_enabled + 1 + count) :
-                                               hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to hightest_pcie_level_enabled */
-               for (i = 2; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to lowest_pcie_level_enabled */
-               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to mid_pcie_level_enabled */
-               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
-       }
-       /* level count will send to smc once at init smc table and never change */
-       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, SMC_RAM_END);
-
-       return result;
-}
-
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP  Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz,       450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz,       500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz,       550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz,       600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz,       650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz,       700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz,       750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz,       800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
-       if (mem_clock <= 10000)
-               return 0x0;
-       if (mem_clock <= 15000)
-               return 0x1;
-       if (mem_clock <= 20000)
-               return 0x2;
-       if (mem_clock <= 25000)
-               return 0x3;
-       if (mem_clock <= 30000)
-               return 0x4;
-       if (mem_clock <= 35000)
-               return 0x5;
-       if (mem_clock <= 40000)
-               return 0x6;
-       if (mem_clock <= 45000)
-               return 0x7;
-       if (mem_clock <= 50000)
-               return 0x8;
-       if (mem_clock <= 55000)
-               return 0x9;
-       if (mem_clock <= 60000)
-               return 0xa;
-       if (mem_clock <= 65000)
-               return 0xb;
-       if (mem_clock <= 70000)
-               return 0xc;
-       if (mem_clock <= 75000)
-               return 0xd;
-       if (mem_clock <= 80000)
-               return 0xe;
-       /* mem_clock > 800MHz */
-       return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    clock   the memory clock to use to populate the structure
-* @param    sclk    the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
-    uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
-       struct pp_atomctrl_memory_clock_param mem_param;
-       int result;
-
-       result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Failed to get Memory PLL Dividers.",
-                       );
-
-       /* Save the result data to outpupt memory level structure */
-       mclk->MclkFrequency   = clock;
-       mclk->MclkDivider     = (uint8_t)mem_param.mpll_post_divider;
-       mclk->FreqRange       = fiji_get_mclk_frequency_ratio(clock);
-
-       return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result = 0;
-       uint32_t mclk_stutter_mode_threshold = 60000;
-
-       if (table_info->vdd_dep_on_mclk) {
-               result = fiji_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk, clock,
-                               (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find MinVddc voltage value from memory "
-                               "VDDC voltage dependency table", return result);
-       }
-
-       mem_level->EnabledForThrottle = 1;
-       mem_level->EnabledForActivity = 0;
-       mem_level->UpHyst = 0;
-       mem_level->DownHyst = 100;
-       mem_level->VoltageDownHyst = 0;
-       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       mem_level->StutterEnable = false;
-
-       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       /* enable stutter mode if all the follow condition applied
-        * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
-        * &(data->DisplayTiming.numExistingDisplays));
-        */
-       data->display_timing.num_existing_displays = 1;
-
-       if (mclk_stutter_mode_threshold &&
-               (clock <= mclk_stutter_mode_threshold) &&
-               (!data->is_uvd_enabled) &&
-               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
-                               STUTTER_ENABLE) & 0x1))
-               mem_level->StutterEnable = true;
-
-       result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
-       }
-       return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
-       uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
-                       SMU73_MAX_LEVELS_MEMORY;
-       struct SMU73_Discrete_MemoryLevel *levels =
-                       smu_data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                               "can not populate memory level as memory clock is zero",
-                               return -EINVAL);
-               result = fiji_populate_single_memory_level(hwmgr,
-                               dpm_table->mclk_table.dpm_levels[i].value,
-                               &levels[i]);
-               if (result)
-                       return result;
-       }
-
-       /* Only enable level 0 for now. */
-       levels[0].EnabledForActivity = 1;
-
-       /* in order to prevent MC activity from stutter mode to push DPM up.
-        * the UVD change complements this by putting the MCLK in
-        * a higher state by default such that we are not effected by
-        * up threshold or and MCLK DPM latency.
-        */
-       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
-       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
-       smu_data->smc_state_table.MemoryDpmLevelCount =
-                       (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-       /* set highest level watermark to high */
-       levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       /* level count will send to smc once at init smc table and never change */
-       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, SMC_RAM_END);
-
-       return result;
-}
-
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
-* @param    voltage     the SMC VOLTAGE structure to be populated
-*/
-static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-               uint32_t mclk, SMIO_Pattern *smio_pat)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i = 0;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-                               "MVDD Voltage is outside the supported range.",
-                               return -EINVAL);
-       } else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = 0;
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       SMIO_Pattern vol_level;
-       uint32_t mvdd;
-       uint16_t us_mvdd;
-       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       if (!data->sclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0,
-                * already converted to SMC_UL */
-               table->ACPILevel.SclkFrequency =
-                               data->dpm_table.sclk_table.dpm_levels[0].value;
-               result = fiji_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_sclk,
-                               table->ACPILevel.SclkFrequency,
-                               (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDC voltage value " \
-                               "in Clock Dependency Table",
-                               );
-       } else {
-               table->ACPILevel.SclkFrequency =
-                               data->vbios_boot_state.sclk_bootup_value;
-               table->ACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
-       }
-
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-                       table->ACPILevel.SclkFrequency,  &dividers);
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error retrieving Engine Clock dividers from VBIOS.",
-                       return result);
-
-       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-       table->ACPILevel.DeepSleepDivId = 0;
-
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_PWRON, 0);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                       SPLL_RESET, 1);
-       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
-                       SCLK_MUX_SEL, 4);
-
-       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       if (!data->mclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-               table->MemoryACPILevel.MclkFrequency =
-                               data->dpm_table.mclk_table.dpm_levels[0].value;
-               result = fiji_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk,
-                               table->MemoryACPILevel.MclkFrequency,
-                       (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
-                               );
-       } else {
-               table->MemoryACPILevel.MclkFrequency =
-                               data->vbios_boot_state.mclk_bootup_value;
-               table->MemoryACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
-       }
-
-       us_mvdd = 0;
-       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-                       (data->mclk_dpm_key_disabled))
-               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-       else {
-               if (!fiji_populate_mvdd_value(hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[0].value,
-                               &vol_level))
-                       us_mvdd = vol_level.Voltage;
-       }
-
-       table->MemoryACPILevel.MinMvdd =
-                       PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       table->MemoryACPILevel.ActivityLevel =
-                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       table->MemoryACPILevel.StutterEnable = false;
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
-       return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       table->VceLevelCount = (uint8_t)(mm_table->count);
-       table->VceBootLevel = 0;
-
-       for (count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
-               table->VceLevel[count].MinVoltage = 0;
-               table->VceLevel[count].MinVoltage |=
-                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->VceLevel[count].MinVoltage |=
-                               ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /*retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for VCE engine clock",
-                               return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       table->AcpLevelCount = (uint8_t)(mm_table->count);
-       table->AcpBootLevel = 0;
-
-       for (count = 0; count < table->AcpLevelCount; count++) {
-               table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
-               table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->AcpLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for engine clock", return result);
-
-               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
-               int32_t eng_clock, int32_t mem_clock,
-               struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
-       uint32_t dram_timing;
-       uint32_t dram_timing2;
-       uint32_t burstTime;
-       ULONG state, trrds, trrdl;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                       eng_clock, mem_clock);
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
-       state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
-       trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
-       trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
-       arb_regs->McArbBurstTime   = (uint8_t)burstTime;
-       arb_regs->TRRDS            = (uint8_t)trrds;
-       arb_regs->TRRDL            = (uint8_t)trrdl;
-
-       return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
-       uint32_t i, j;
-       int result = 0;
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = fiji_populate_memory_timing_parameters(hwmgr,
-                                       data->dpm_table.sclk_table.dpm_levels[i].value,
-                                       data->dpm_table.mclk_table.dpm_levels[j].value,
-                                       &arb_regs.entries[i][j]);
-                       if (result)
-                               break;
-               }
-       }
-
-       if (!result)
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.arb_table_start,
-                               (uint8_t *)&arb_regs,
-                               sizeof(SMU73_Discrete_MCArbDramTimingTable),
-                               SMC_RAM_END);
-       return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       table->UvdLevelCount = (uint8_t)(mm_table->count);
-       table->UvdBootLevel = 0;
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].MinVoltage = 0;
-               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].VclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Vclk clock", return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Dclk clock", return result);
-
-               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
-       }
-       return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table */
-       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-                       data->vbios_boot_state.sclk_bootup_value,
-                       (uint32_t *)&(table->GraphicsBootLevel));
-
-       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-                       data->vbios_boot_state.mclk_bootup_value,
-                       (uint32_t *)&(table->MemoryBootLevel));
-
-       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
-                       VOLTAGE_SCALE;
-
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-       return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint8_t count, level;
-
-       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-       for (level = 0; level < count; level++) {
-               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
-                               data->vbios_boot_state.sclk_bootup_value) {
-                       smu_data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
-       for (level = 0; level < count; level++) {
-               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
-                               data->vbios_boot_state.mclk_bootup_value) {
-                       smu_data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-                       volt_with_cks, value;
-       uint16_t clock_freq_u16;
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-                       volt_offset = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-
-       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-       /* Read SMU_Eefuse to read and calculate RO and determine
-        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-        */
-       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (146 * 4));
-       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (148 * 4));
-       efuse &= 0xFF000000;
-       efuse = efuse >> 24;
-       efuse2 &= 0xF;
-
-       if (efuse2 == 1)
-               ro = (2300 - 1350) * efuse / 255 + 1350;
-       else
-               ro = (2500 - 1000) * efuse / 255 + 1000;
-
-       if (ro >= 1660)
-               type = 0;
-       else
-               type = 1;
-
-       /* Populate Stretch amount */
-       smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-       for (i = 0; i < sclk_table->count; i++) {
-               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-                               sclk_table->entries[i].cks_enable << i;
-               volt_without_cks = (uint32_t)((14041 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-               volt_with_cks = (uint32_t)((13946 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
-               if (volt_without_cks >= volt_with_cks)
-                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
-               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-       }
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       STRETCH_ENABLE, 0x0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       staticEnable, 0x1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x0);
-
-       /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ClockStretcher);
-               PP_ASSERT_WITH_CODE(false,
-                               "Stretch Amount in PPTable not supported\n",
-                               return -EINVAL);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL);
-       value &= 0xFFC2FF87;
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-                       fiji_clock_stretcher_lookup_table[stretch_amount2][0];
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-                       fiji_clock_stretcher_lookup_table[stretch_amount2][1];
-       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
-                       GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
-                       SclkFrequency) / 100);
-       if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
-                       clock_freq_u16 &&
-           fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
-                       clock_freq_u16) {
-               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-               value |= (fiji_clock_stretch_amount_conversion
-                               [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
-                                [stretch_amount]) << 3;
-       }
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
-                       CKS_LOOKUPTableEntry[0].minFreq);
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
-                       CKS_LOOKUPTableEntry[0].maxFreq);
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-                       fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-                       (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL, value);
-
-       /* Populate DDT Lookup Table */
-       for (i = 0; i < 4; i++) {
-               /* Assign the minimum and maximum VID stored
-                * in the last row of Clock Stretcher Voltage Table.
-                */
-               smu_data->smc_state_table.ClockStretcherDataTable.
-               ClockStretcherDataTableEntry[i].minVID =
-                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
-               smu_data->smc_state_table.ClockStretcherDataTable.
-               ClockStretcherDataTableEntry[i].maxVID =
-                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
-               /* Loop through each SCLK and check the frequency
-                * to see if it lies within the frequency for clock stretcher.
-                */
-               for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
-                       cks_setting = 0;
-                       clock_freq = PP_SMC_TO_HOST_UL(
-                                       smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
-                       /* Check the allowed frequency against the sclk level[j].
-                        *  Sclk's endianness has already been converted,
-                        *  and it's in 10Khz unit,
-                        *  as opposed to Data table, which is in Mhz unit.
-                        */
-                       if (clock_freq >=
-                                       (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
-                               cks_setting |= 0x2;
-                               if (clock_freq <
-                                               (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
-                                       cks_setting |= 0x1;
-                       }
-                       smu_data->smc_state_table.ClockStretcherDataTable.
-                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
-               }
-               CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
-                               ClockStretcherDataTable.
-                               ClockStretcherDataTableEntry[i].setting);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
-       value &= 0xFFFFFFFE;
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
-       return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
-               struct SMU73_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint16_t config;
-
-       config = VR_MERGED_WITH_VDDC;
-       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
-       /* Set Vddc Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               config = VR_SVI2_PLANE_1;
-               table->VRConfig |= config;
-       } else {
-               PP_ASSERT_WITH_CODE(false,
-                               "VDDC should be on SVI2 control in merged mode!",
-                               );
-       }
-       /* Set Vddci Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               config = VR_SVI2_PLANE_2;  /* only in merged mode */
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               config = VR_SMIO_PATTERN_1;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       }
-       /* Set Mvdd Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-               config = VR_SVI2_PLANE_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               config = VR_SMIO_PATTERN_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       }
-
-       return 0;
-}
-
-static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t tmp;
-       int result;
-
-       /* This is a read-modify-write on the first byte of the ARB table.
-        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
-        * is the field 'current'.
-        * This solution is ugly, but we never write the whole table only
-        * individual fields in it.
-        * In reality this field should not be in that structure
-        * but in a soft register.
-        */
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
-       if (result)
-               return result;
-
-       tmp &= 0x00FFFFFF;
-       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-       return smu7_write_smc_sram_dword(hwmgr,
-                       smu_data->smu7_data.arb_table_start,  tmp, SMC_RAM_END);
-}
-
-static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct SMU73_Discrete_GraphicsLevel *levels =
-                               data->smc_state_table.GraphicsLevel;
-       unsigned min_level = 1;
-
-       hwmgr->default_gfx_power_profile.activity_threshold =
-                       be16_to_cpu(levels[0].ActivityLevel);
-       hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
-       hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
-       hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
-       hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
-       hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
-       /* Workaround compute SDMA instability: disable lowest SCLK
-        * DPM level. Optimize compute power profile: Use only highest
-        * 2 power levels (if more than 2 are available), Hysteresis:
-        * 0ms up, 5ms down
-        */
-       if (data->smc_state_table.GraphicsDpmLevelCount > 2)
-               min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
-       else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
-               min_level = 1;
-       else
-               min_level = 0;
-       hwmgr->default_compute_power_profile.min_sclk =
-                       be32_to_cpu(levels[min_level].SclkFrequency);
-       hwmgr->default_compute_power_profile.up_hyst = 0;
-       hwmgr->default_compute_power_profile.down_hyst = 5;
-
-       hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
-       hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-
-       return 0;
-}
-
-static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
-{
-       pp_atomctrl_voltage_table param_led_dpm;
-       int result = 0;
-       u32 mask = 0;
-
-       result = atomctrl_get_voltage_table_v3(hwmgr,
-                                              VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
-                                              &param_led_dpm);
-       if (result == 0) {
-               int i, j;
-               u32 tmp = param_led_dpm.mask_low;
-
-               for (i = 0, j = 0; i < 32; i++) {
-                       if (tmp & 1) {
-                               mask |= (i << (8 * j));
-                               if (++j >= 3)
-                                       break;
-                       }
-                       tmp >>= 1;
-               }
-       }
-       if (mask)
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                                                   PPSMC_MSG_LedConfig,
-                                                   mask);
-       return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput  the pointer to input data (PowerState)
-* @return   always 0
-*/
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-       uint8_t i;
-       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
-       fiji_initialize_power_tune_defaults(hwmgr);
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
-               fiji_populate_smc_voltage_tables(hwmgr, table);
-
-       table->SystemFlags = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-       if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
-               result = fiji_populate_ulv_state(hwmgr, table);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to initialize ULV state!", return result);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_ULV_PARAMETER, 0x40035);
-       }
-
-       result = fiji_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Link Level!", return result);
-
-       result = fiji_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Graphics Level!", return result);
-
-       result = fiji_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Memory Level!", return result);
-
-       result = fiji_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ACPI Level!", return result);
-
-       result = fiji_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize VCE Level!", return result);
-
-       result = fiji_populate_smc_acp_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ACP Level!", return result);
-
-       result = fiji_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
-       /* Since only the initial state is completely set up at this point
-        * (the other states are just copies of the boot state) we only
-        * need to populate the  ARB settings for the initial state.
-        */
-       result = fiji_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to Write ARB settings for the initial state.", return result);
-
-       result = fiji_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize UVD Level!", return result);
-
-       result = fiji_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot Level!", return result);
-
-       result = fiji_populate_smc_initailial_state(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot State!", return result);
-
-       result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate BAPM Parameters!", return result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher)) {
-               result = fiji_populate_clock_stretcher_data_table(hwmgr);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to populate Clock Stretcher Data Table!",
-                               return result);
-       }
-
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-       table->TemperatureLimitHigh =
-                       table_info->cac_dtp_table->usTargetOperatingTemp *
-                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
-       table->TemperatureLimitLow  =
-                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
-       table->MemoryVoltageChangeEnable = 1;
-       table->MemoryInterval = 1;
-       table->VoltageResponseTime = 0;
-       table->PhaseResponseTime = 0;
-       table->MemoryThermThrottleEnable = 1;
-       table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
-       table->PCIeGenInterval = 1;
-       table->VRConfig = 0;
-
-       result = fiji_populate_vr_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate VRConfig setting!", return result);
-
-       table->ThermGpio = 17;
-       table->SclkStepSize = 0x4000;
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       } else {
-               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       }
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-                       &gpio_pin)) {
-               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       } else {
-               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       }
-
-       /* Thermal Output GPIO */
-       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
-                       &gpio_pin)) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
-               /* For porlarity read GPIOPAD_A with assigned Gpio pin
-                * since VBIOS will program this register to set 'inactive state',
-                * driver can then determine 'active state' from this and
-                * program SMU with correct polarity
-                */
-               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
-                               (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-               /* if required, combine VRHot/PCC with thermal out GPIO */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot) &&
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_CombinePCCWithThermalSignal))
-                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-       } else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ThermalOutGPIO);
-               table->ThermOutGpio = 17;
-               table->ThermOutPolarity = 1;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-       }
-
-       for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
-               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = smu7_copy_bytes_to_smc(hwmgr,
-                       smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, SystemFlags),
-                       (uint8_t *)&(table->SystemFlags),
-                       sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
-                       SMC_RAM_END);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to upload dpm data to SMC memory!", return result);
-
-       result = fiji_init_arb_table_index(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to upload arb data to SMC memory!", return result);
-
-       result = fiji_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to  populate PM fuses to SMC memory!", return result);
-
-       result = fiji_setup_dpm_led_config(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                           "Failed to setup dpm led config", return result);
-
-       fiji_save_default_power_profile(hwmgr);
-
-       return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       if (smu_data->smu7_data.fan_table_start == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (duty100 == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
-                       usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->
-                       thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = smu7_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
-                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
-                       reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
-                       hwmgr->device, CGS_IND_REG__SMC,
-                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
-                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
-                       SMC_RAM_END);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ucMinimumPWMLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetFanMinPwm,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetFanSclkTarget,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
-       if (res)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-
-       return 0;
-}
-
-
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
-       int ret;
-       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-
-       if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
-               return 0;
-
-       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
-
-       if (!ret)
-               /* If this param is not changed, this function could fire unnecessarily */
-               smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
-       return ret;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return fiji_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable,
-                                       LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               SMC_RAM_END);
-       }
-       result = fiji_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((result == 0),
-                       "Failed to program memory timing parameters!",
-                       );
-       return result;
-}
-
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
-{
-       switch (type) {
-       case SMU_SoftRegisters:
-               switch (member) {
-               case HandshakeDisables:
-                       return offsetof(SMU73_SoftRegisters, HandshakeDisables);
-               case VoltageChangeTimeout:
-                       return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
-               case AverageGraphicsActivity:
-                       return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
-               case PreVBlankGap:
-                       return offsetof(SMU73_SoftRegisters, PreVBlankGap);
-               case VBlankTimeout:
-                       return offsetof(SMU73_SoftRegisters, VBlankTimeout);
-               case UcodeLoadStatus:
-                       return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
-               }
-       case SMU_Discrete_DpmTable:
-               switch (member) {
-               case UvdBootLevel:
-                       return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
-               case VceBootLevel:
-                       return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-               case LowSclkInterruptThreshold:
-                       return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
-               }
-       }
-       pr_warn("can't get the offset of type %x member %x\n", type, member);
-       return 0;
-}
-
-uint32_t fiji_get_mac_definition(uint32_t value)
-{
-       switch (value) {
-       case SMU_MAX_LEVELS_GRAPHICS:
-               return SMU73_MAX_LEVELS_GRAPHICS;
-       case SMU_MAX_LEVELS_MEMORY:
-               return SMU73_MAX_LEVELS_MEMORY;
-       case SMU_MAX_LEVELS_LINK:
-               return SMU73_MAX_LEVELS_LINK;
-       case SMU_MAX_ENTRIES_SMIO:
-               return SMU73_MAX_ENTRIES_SMIO;
-       case SMU_MAX_LEVELS_VDDC:
-               return SMU73_MAX_LEVELS_VDDC;
-       case SMU_MAX_LEVELS_VDDGFX:
-               return SMU73_MAX_LEVELS_VDDGFX;
-       case SMU_MAX_LEVELS_VDDCI:
-               return SMU73_MAX_LEVELS_VDDCI;
-       case SMU_MAX_LEVELS_MVDD:
-               return SMU73_MAX_LEVELS_MVDD;
-       }
-
-       pr_warn("can't get the mac of %x\n", value);
-       return 0;
-}
-
-
-static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       smu_data->smc_state_table.UvdBootLevel = 0;
-       if (table_info->mm_dep_table->count > 0)
-               smu_data->smc_state_table.UvdBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
-                                               UvdBootLevel);
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0x00FFFFFF;
-       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_UVDDPM) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
-       return 0;
-}
-
-static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_StablePState))
-               smu_data->smc_state_table.VceBootLevel =
-                       (uint8_t) (table_info->mm_dep_table->count - 1);
-       else
-               smu_data->smc_state_table.VceBootLevel = 0;
-
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                                       offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFF00FFFF;
-       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
-       return 0;
-}
-
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
-       switch (type) {
-       case SMU_UVD_TABLE:
-               fiji_update_uvd_smc_table(hwmgr);
-               break;
-       case SMU_VCE_TABLE:
-               fiji_update_vce_smc_table(hwmgr);
-               break;
-       case SMU_SAMU_TABLE:
-               fiji_update_samu_smc_table(hwmgr);
-               break;
-       default:
-               break;
-       }
-       return 0;
-}
-
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, DpmTable),
-                       &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               smu_data->smu7_data.dpm_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, SoftRegisters),
-                       &tmp, SMC_RAM_END);
-
-       if (!result) {
-               data->soft_regs_start = tmp;
-               smu_data->smu7_data.soft_regs_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, mcRegisterTable),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.mc_reg_table_start = tmp;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, FanTable),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.fan_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.arb_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU73_Firmware_Header, Version),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               hwmgr->microcode_version_info.SMC = tmp;
-
-       error |= (0 != result);
-
-       return error ? -1 : 0;
-}
-
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-
-       /* Program additional LP registers
-        * that are no longer programmed by VBIOS
-        */
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
-       return 0;
-}
-
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-                       ? true : false;
-}
-
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-               struct amd_pp_profile *request)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
-                       (hwmgr->smu_backend);
-       struct SMU73_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
-                       SMU73_MAX_LEVELS_GRAPHICS;
-       uint32_t i;
-
-       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
-               levels[i].ActivityLevel =
-                               cpu_to_be16(request->activity_threshold);
-               levels[i].EnabledForActivity = 1;
-               levels[i].UpHyst = request->up_hyst;
-               levels[i].DownHyst = request->down_hyst;
-       }
-
-       return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                               array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
deleted file mode 100644 (file)
index d9c72d9..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_SMC_H
-#define FIJI_SMC_H
-
-#include "smumgr.h"
-#include "smu73.h"
-
-struct fiji_pt_defaults {
-       uint8_t   SviLoadLineEn;
-       uint8_t   SviLoadLineVddC;
-       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-       uint8_t   TDC_MAWt;
-       uint8_t   TdcWaterfallCtl;
-       uint8_t   DTEAmbientTempBase;
-};
-
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
-uint32_t fiji_get_mac_definition(uint32_t value);
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-               struct amd_pp_profile *request);
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-#endif
-
index 592a89aff12bd9538596cc1906368a1504aec7b5..f572beff197f0630b413bd948676c4519d6c1da5 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "pp_debug.h"
 #include "smumgr.h"
+#include "smu7_dyn_defaults.h"
 #include "smu73.h"
 #include "smu_ucode_xfer_vi.h"
 #include "fiji_smumgr.h"
 #include "gca/gfx_8_0_d.h"
 #include "bif/bif_5_0_d.h"
 #include "bif/bif_5_0_sh_mask.h"
-#include "fiji_pwrvirus.h"
-#include "fiji_smc.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "hardwaremanager.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+
 
 #define AVFS_EN_MSB                                        1568
 #define AVFS_EN_LSB                                        1568
 
 #define FIJI_SMC_SIZE 0x20000
 
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define VDDC_VDDCI_DELTA            300
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+                               {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+                               {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+               /*sviLoadLIneEn,  SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+               {1,               0xF,             0xFD,
+               /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+               0x19,        5,               45}
+};
+
 static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
                /*  Min        Sclk       pcie     DeepSleep Activity  CgSpll      CgSpll    spllSpread  SpllSpread   CcPwr  CcPwr  Sclk   Display     Enabled     Enabled                       Voltage    Power */
                /* Voltage,  Frequency,  DpmLevel,  DivId,    Level,  FuncCntl3,  FuncCntl4,  Spectrum,   Spectrum2,  DynRm, DynRm1  Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
@@ -159,46 +200,6 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
        return result;
 }
 
-static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
-{
-       int i;
-       uint32_t reg, data;
-
-       for (i = 0; i < size; i++) {
-               reg  = pvirus->reg;
-               data = pvirus->data;
-               if (reg != 0xffffffff)
-                       cgs_write_register(hwmgr->device, reg, data);
-               else
-                       break;
-               pvirus++;
-       }
-}
-
-static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
-{
-       int i;
-       cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
-       cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
-       cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
-       for (i = 0; i < section->dfy_size; i++)
-               cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
-}
-
-static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr)
-{
-       execute_pwr_table(hwmgr, PwrVirusTable_pre, ARRAY_SIZE(PwrVirusTable_pre));
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
-       execute_pwr_table(hwmgr, PwrVirusTable_post, ARRAY_SIZE(PwrVirusTable_post));
-
-       return 0;
-}
-
 static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
@@ -277,7 +278,7 @@ static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
                                " table over to SMU",
                                return -EINVAL;);
                smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
-               PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(hwmgr),
+               PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
                                "[AVFS][fiji_avfs_event_mgr] Could not setup "
                                "Pwr Virus for AVFS ",
                                return -EINVAL;);
@@ -365,13 +366,6 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
        return false;
 }
 
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param    smumgr  the address of the powerplay hardware manager.
-* @param    smc_addr the address in the SMC RAM to access.
-* @param    value to write to the SMC SRAM.
-*/
 static int fiji_smu_init(struct pp_hwmgr *hwmgr)
 {
        int i;
@@ -393,6 +387,2334 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+               uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+       uint32_t i;
+       uint16_t vddci;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       *voltage = *mvdd = 0;
+
+
+       /* clock - voltage dependency table is empty table */
+       if (dep_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < dep_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (dep_table->entries[i].clk >= clock) {
+                       *voltage |= (dep_table->entries[i].vddc *
+                                       VOLTAGE_SCALE) << VDDC_SHIFT;
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else if (dep_table->entries[i].vddci)
+                               *voltage |= (dep_table->entries[i].vddci *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else {
+                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               (dep_table->entries[i].vddc -
+                                                               VDDC_VDDCI_DELTA));
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       }
+
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+                                       VOLTAGE_SCALE;
+                       else if (dep_table->entries[i].mvdd)
+                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+                                       VOLTAGE_SCALE;
+
+                       *voltage |= 1 << PHASES_SHIFT;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+       else if (dep_table->entries[i-1].vddci) {
+               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                               (dep_table->entries[i].vddc -
+                                               VDDC_VDDCI_DELTA));
+               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+       else if (dep_table->entries[i].mvdd)
+               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+       return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+       uint32_t tmp;
+       tmp = raw_setting * 4096 / 100;
+       return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+       switch (line) {
+       case SMU7_I2CLineID_DDC1:
+               *scl = SMU7_I2C_DDC1CLK;
+               *sda = SMU7_I2C_DDC1DATA;
+               break;
+       case SMU7_I2CLineID_DDC2:
+               *scl = SMU7_I2C_DDC2CLK;
+               *sda = SMU7_I2C_DDC2DATA;
+               break;
+       case SMU7_I2CLineID_DDC3:
+               *scl = SMU7_I2C_DDC3CLK;
+               *sda = SMU7_I2C_DDC3DATA;
+               break;
+       case SMU7_I2CLineID_DDC4:
+               *scl = SMU7_I2C_DDC4CLK;
+               *sda = SMU7_I2C_DDC4DATA;
+               break;
+       case SMU7_I2CLineID_DDC5:
+               *scl = SMU7_I2C_DDC5CLK;
+               *sda = SMU7_I2C_DDC5DATA;
+               break;
+       case SMU7_I2CLineID_DDC6:
+               *scl = SMU7_I2C_DDC6CLK;
+               *sda = SMU7_I2C_DDC6DATA;
+               break;
+       case SMU7_I2CLineID_SCLSDA:
+               *scl = SMU7_I2C_SCL;
+               *sda = SMU7_I2C_SDA;
+               break;
+       case SMU7_I2CLineID_DDCVGA:
+               *scl = SMU7_I2C_DDCVGACLK;
+               *sda = SMU7_I2C_DDCVGADATA;
+               break;
+       default:
+               *scl = 0;
+               *sda = 0;
+               break;
+       }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (table_info &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID)
+               smu_data->power_tune_defaults =
+                               &fiji_power_tune_data_set_array
+                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+       else
+               smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       SMU73_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+       struct pp_advance_fan_control_parameters *fan_table =
+                       &hwmgr->thermal_controller.advanceFanControlParameters;
+       uint8_t uc_scl, uc_sda;
+
+       /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+        * as requested by SMC team
+        */
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usTDP * 128));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usTDP * 128));
+
+       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+                       "Target Operating Temp is out of Range!",
+                       );
+
+       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+       /* The following are for new Fiji Multi-input fan/thermal control */
+       dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTargetOperatingTemp * 256);
+       dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitHotspot * 256);
+       dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+       dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+       dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitVrVddc * 256);
+       dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+       dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitPlx * 256);
+
+       dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainEdge));
+       dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
+       dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainLiquid));
+       dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+       dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+       dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainPlx));
+       dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+       dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+       dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+       dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+       dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+       get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+       dpm_table->Liquid_I2C_LineSCL = uc_scl;
+       dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+       get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+       dpm_table->Vr_I2C_LineSCL = uc_scl;
+       dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+       get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+       dpm_table->Plx_I2C_LineSCL = uc_scl;
+       dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+       return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       /* TDC number of fraction bits are changed from 8 to 7
+        * for Fiji as requested by SMC team
+        */
+       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+       return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr,
+                       fuse_table_offset +
+                       offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else {
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+               smu_data->power_tune_table.LPMLTemperatureMin =
+                               (uint8_t)((temp >> 16) & 0xff);
+               smu_data->power_tune_table.LPMLTemperatureMax =
+                               (uint8_t)((temp >> 8) & 0xff);
+               smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+       }
+       return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+       return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.
+                       usFanOutputSensitivity & (1 << 15)) ||
+                       0 == hwmgr->thermal_controller.advanceFanControlParameters.
+                       usFanOutputSensitivity)
+               hwmgr->thermal_controller.advanceFanControlParameters.
+               usFanOutputSensitivity = hwmgr->thermal_controller.
+                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+                                       advanceFanControlParameters.usFanOutputSensitivity);
+       return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+       return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       uint32_t pm_fuse_table_offset;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU73_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to get pm_fuse_table_offset Failed!",
+                                       return -EINVAL);
+
+               /* DW6 */
+               if (fiji_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate SviLoadLine Failed!",
+                                       return -EINVAL);
+               /* DW7 */
+               if (fiji_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
+               /* DW8 */
+               if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TdcWaterfallCtl, "
+                                       "LPMLTemperature Min and Max Failed!",
+                                       return -EINVAL);
+
+               /* DW9-DW12 */
+               if (0 != fiji_populate_temperature_scaler(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate LPMLTemperatureScaler Failed!",
+                                       return -EINVAL);
+
+               /* DW13-DW14 */
+               if (fiji_populate_fuzzy_fan(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
+                                       return -EINVAL);
+
+               /* DW15-DW18 */
+               if (fiji_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Failed!",
+                                       return -EINVAL);
+
+               /* DW20 */
+               if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+                                       "Sidd Failed!", return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed!",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       uint32_t count;
+       uint8_t index;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+                       table_info->vddc_lookup_table;
+       /* tables is already swapped, so in order to use the value from it,
+        * we need to swap it back.
+        * We are populating vddc CAC data to BapmVddc table
+        * in split and merged mode
+        */
+
+       for (count = 0; count < lookup_table->count; count++) {
+               index = phm_get_voltage_index(lookup_table,
+                               data->vddc_voltage_table.entries[count].value);
+               table->BapmVddcVidLoSidd[count] =
+                       convert_to_vid(lookup_table->entries[index].us_cac_low);
+               table->BapmVddcVidHiSidd[count] =
+                       convert_to_vid(lookup_table->entries[index].us_cac_high);
+       }
+
+       return 0;
+}
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = fiji_populate_cac_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate CAC voltage tables to SMC",
+                       return -EINVAL);
+
+       return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_Ulv *state)
+{
+       int result = 0;
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+       state->VddcPhase = 1;
+
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+               CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+       }
+       return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       int i;
+
+       /* Index (dpm_table->pcie_speed_table.count)
+        * is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity = 1;
+               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+                       (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t ref_clock;
+       uint32_t ref_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error retrieving Engine Clock dividers from VBIOS.",
+                       return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+       ref_clock = atomctrl_get_reference_clock(hwmgr);
+       ref_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider */
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup */
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+                       SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+                       SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               struct pp_atomctrl_internal_ss_info ssInfo;
+
+               uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+               if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+                               vco_freq, &ssInfo)) {
+                       /*
+                        * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+                        * ss_info.speed_spectrum_rate -- in unit of khz
+                        *
+                        * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+                        */
+                       uint32_t clk_s = ref_clock * 5 /
+                                       (ref_divider * ssInfo.speed_spectrum_rate);
+                       /* clkv = 2 * D * fbdiv / NS */
+                       uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+                                       fbdiv / (clk_s * 10000);
+
+                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+                                       CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+                       cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+                                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+                                       CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+               }
+       }
+
+       sclk->SclkFrequency        = clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, uint16_t sclk_al_threshold,
+               struct SMU73_Discrete_GraphicsLevel *level)
+{
+       int result;
+       /* PP_Clocks minClocks; */
+       uint32_t threshold, mvdd;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+       /* populate graphics levels */
+       result = fiji_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk, clock,
+                       (uint32_t *)(&level->MinVoltage), &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find VDDC voltage value for "
+                       "VDDC engine clock dependency table",
+                       return result);
+
+       level->SclkFrequency = clock;
+       level->ActivityLevel = sclk_al_threshold;
+       level->CcPwrDynRm = 0;
+       level->CcPwrDynRm1 = 0;
+       level->EnabledForActivity = 0;
+       level->EnabledForThrottle = 1;
+       level->UpHyst = 10;
+       level->DownHyst = 0;
+       level->VoltageDownHyst = 0;
+       level->PowerThrottle = 0;
+
+       threshold = clock * data->fast_watermark_threshold / 100;
+
+       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+               level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+                                                               hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+       /* Default to slow, highest DPM level will be
+        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+        */
+       level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+       return 0;
+}
+
+static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+       uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+       int result = 0;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+                       SMU73_MAX_LEVELS_GRAPHICS;
+       struct SMU73_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t i, max_entry;
+       uint8_t hightest_pcie_level_enabled = 0,
+                       lowest_pcie_level_enabled = 0,
+                       mid_pcie_level_enabled = 0,
+                       count = 0;
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = fiji_populate_single_graphic_level(hwmgr,
+                               dpm_table->sclk_table.dpm_levels[i].value,
+                               (uint16_t)smu_data->activity_target[i],
+                               &levels[i]);
+               if (result)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       levels[i].DeepSleepDivId = 0;
+       }
+
+       /* Only enable level 0 for now.*/
+       levels[0].EnabledForActivity = 1;
+
+       /* set highest level watermark to high */
+       levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+                       (uint8_t)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       if (pcie_table != NULL) {
+               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+                               "There must be 1 or more PCIE levels defined in PPTable.",
+                               return -EINVAL);
+               max_entry = pcie_entry_cnt - 1;
+               for (i = 0; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel =
+                                       (uint8_t) ((i < max_entry) ? i : max_entry);
+       } else {
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
+                       hightest_pcie_level_enabled++;
+
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << lowest_pcie_level_enabled)) == 0))
+                       lowest_pcie_level_enabled++;
+
+               while ((count < hightest_pcie_level_enabled) &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+                       count++;
+
+               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+                               hightest_pcie_level_enabled ?
+                                               (lowest_pcie_level_enabled + 1 + count) :
+                                               hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to hightest_pcie_level_enabled */
+               for (i = 2; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to lowest_pcie_level_enabled */
+               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to mid_pcie_level_enabled */
+               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+       }
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP  Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz,       450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz,       500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz,       550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz,       600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz,       650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz,       700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz,       750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz,       800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+       if (mem_clock <= 10000)
+               return 0x0;
+       if (mem_clock <= 15000)
+               return 0x1;
+       if (mem_clock <= 20000)
+               return 0x2;
+       if (mem_clock <= 25000)
+               return 0x3;
+       if (mem_clock <= 30000)
+               return 0x4;
+       if (mem_clock <= 35000)
+               return 0x5;
+       if (mem_clock <= 40000)
+               return 0x6;
+       if (mem_clock <= 45000)
+               return 0x7;
+       if (mem_clock <= 50000)
+               return 0x8;
+       if (mem_clock <= 55000)
+               return 0x9;
+       if (mem_clock <= 60000)
+               return 0xa;
+       if (mem_clock <= 65000)
+               return 0xb;
+       if (mem_clock <= 70000)
+               return 0xc;
+       if (mem_clock <= 75000)
+               return 0xd;
+       if (mem_clock <= 80000)
+               return 0xe;
+       /* mem_clock > 800MHz */
+       return 0xf;
+}
+
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+    uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+       struct pp_atomctrl_memory_clock_param mem_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Failed to get Memory PLL Dividers.",
+                       );
+
+       /* Save the result data to outpupt memory level structure */
+       mclk->MclkFrequency   = clock;
+       mclk->MclkDivider     = (uint8_t)mem_param.mpll_post_divider;
+       mclk->FreqRange       = fiji_get_mclk_frequency_ratio(clock);
+
+       return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int result = 0;
+       uint32_t mclk_stutter_mode_threshold = 60000;
+
+       if (table_info->vdd_dep_on_mclk) {
+               result = fiji_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_mclk, clock,
+                               (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find MinVddc voltage value from memory "
+                               "VDDC voltage dependency table", return result);
+       }
+
+       mem_level->EnabledForThrottle = 1;
+       mem_level->EnabledForActivity = 0;
+       mem_level->UpHyst = 0;
+       mem_level->DownHyst = 100;
+       mem_level->VoltageDownHyst = 0;
+       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       mem_level->StutterEnable = false;
+
+       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       /* enable stutter mode if all the follow condition applied
+        * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+        * &(data->DisplayTiming.numExistingDisplays));
+        */
+       data->display_timing.num_existing_displays = 1;
+
+       if (mclk_stutter_mode_threshold &&
+               (clock <= mclk_stutter_mode_threshold) &&
+               (!data->is_uvd_enabled) &&
+               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+                               STUTTER_ENABLE) & 0x1))
+               mem_level->StutterEnable = true;
+
+       result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+       uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+                       SMU73_MAX_LEVELS_MEMORY;
+       struct SMU73_Discrete_MemoryLevel *levels =
+                       smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                               "can not populate memory level as memory clock is zero",
+                               return -EINVAL);
+               result = fiji_populate_single_memory_level(hwmgr,
+                               dpm_table->mclk_table.dpm_levels[i].value,
+                               &levels[i]);
+               if (result)
+                       return result;
+       }
+
+       /* Only enable level 0 for now. */
+       levels[0].EnabledForActivity = 1;
+
+       /* in order to prevent MC activity from stutter mode to push DPM up.
+        * the UVD change complements this by putting the MCLK in
+        * a higher state by default such that we are not effected by
+        * up threshold or and MCLK DPM latency.
+        */
+       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount =
+                       (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       /* set highest level watermark to high */
+       levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+               uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+                               "MVDD Voltage is outside the supported range.",
+                               return -EINVAL);
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = 0;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       SMIO_Pattern vol_level;
+       uint32_t mvdd;
+       uint16_t us_mvdd;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (!data->sclk_dpm_key_disabled) {
+               /* Get MinVoltage and Frequency from DPM0,
+                * already converted to SMC_UL */
+               table->ACPILevel.SclkFrequency =
+                               data->dpm_table.sclk_table.dpm_levels[0].value;
+               result = fiji_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_sclk,
+                               table->ACPILevel.SclkFrequency,
+                               (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Cannot find ACPI VDDC voltage value " \
+                               "in Clock Dependency Table",
+                               );
+       } else {
+               table->ACPILevel.SclkFrequency =
+                               data->vbios_boot_state.sclk_bootup_value;
+               table->ACPILevel.MinVoltage =
+                               data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+       }
+
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+                       table->ACPILevel.SclkFrequency,  &dividers);
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error retrieving Engine Clock dividers from VBIOS.",
+                       return result);
+
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_PWRON, 0);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                       SPLL_RESET, 1);
+       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+                       SCLK_MUX_SEL, 4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       if (!data->mclk_dpm_key_disabled) {
+               /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+               table->MemoryACPILevel.MclkFrequency =
+                               data->dpm_table.mclk_table.dpm_levels[0].value;
+               result = fiji_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_mclk,
+                               table->MemoryACPILevel.MclkFrequency,
+                       (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+                               );
+       } else {
+               table->MemoryACPILevel.MclkFrequency =
+                               data->vbios_boot_state.mclk_bootup_value;
+               table->MemoryACPILevel.MinVoltage =
+                               data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+       }
+
+       us_mvdd = 0;
+       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+                       (data->mclk_dpm_key_disabled))
+               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+       else {
+               if (!fiji_populate_mvdd_value(hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[0].value,
+                               &vol_level))
+                       us_mvdd = vol_level.Voltage;
+       }
+
+       table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       table->MemoryACPILevel.ActivityLevel =
+                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = false;
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+       return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->VceLevelCount = (uint8_t)(mm_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage = 0;
+               table->VceLevel[count].MinVoltage |=
+                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->VceLevel[count].MinVoltage |=
+                               ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /*retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->AcpLevelCount = (uint8_t)(mm_table->count);
+       table->AcpBootLevel = 0;
+
+       for (count = 0; count < table->AcpLevelCount; count++) {
+               table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+               table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->AcpLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for engine clock", return result);
+
+               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+               SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               /* not sure whether we need evclk or not */
+               table->SamuLevel[count].MinVoltage = 0;
+               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+               int32_t eng_clock, int32_t mem_clock,
+               struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+       uint32_t dram_timing;
+       uint32_t dram_timing2;
+       uint32_t burstTime;
+       ULONG state, trrds, trrdl;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                       eng_clock, mem_clock);
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+       state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+       trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+       trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+       arb_regs->McArbBurstTime   = (uint8_t)burstTime;
+       arb_regs->TRRDS            = (uint8_t)trrds;
+       arb_regs->TRRDL            = (uint8_t)trrdl;
+
+       return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+       uint32_t i, j;
+       int result = 0;
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = fiji_populate_memory_timing_parameters(hwmgr,
+                                       data->dpm_table.sclk_table.dpm_levels[i].value,
+                                       data->dpm_table.mclk_table.dpm_levels[j].value,
+                                       &arb_regs.entries[i][j]);
+                       if (result)
+                               break;
+               }
+       }
+
+       if (!result)
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU73_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END);
+       return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+
+       table->UvdLevelCount = (uint8_t)(mm_table->count);
+       table->UvdBootLevel = 0;
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].MinVoltage = 0;
+               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+               table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].VclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Vclk clock", return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Dclk clock", return result);
+
+               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+       }
+       return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table */
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(table->GraphicsBootLevel));
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+                       data->vbios_boot_state.mclk_bootup_value,
+                       (uint32_t *)&(table->MemoryBootLevel));
+
+       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+                       VOLTAGE_SCALE;
+
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+       return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint8_t count, level;
+
+       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+                               data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+                               data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+                       volt_with_cks, value;
+       uint16_t clock_freq_u16;
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+                       volt_offset = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+       /* Read SMU_Eefuse to read and calculate RO and determine
+        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+        */
+       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (146 * 4));
+       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (148 * 4));
+       efuse &= 0xFF000000;
+       efuse = efuse >> 24;
+       efuse2 &= 0xF;
+
+       if (efuse2 == 1)
+               ro = (2300 - 1350) * efuse / 255 + 1350;
+       else
+               ro = (2500 - 1000) * efuse / 255 + 1000;
+
+       if (ro >= 1660)
+               type = 0;
+       else
+               type = 1;
+
+       /* Populate Stretch amount */
+       smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+       for (i = 0; i < sclk_table->count; i++) {
+               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+                               sclk_table->entries[i].cks_enable << i;
+               volt_without_cks = (uint32_t)((14041 *
+                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+               volt_with_cks = (uint32_t)((13946 *
+                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+               if (volt_without_cks >= volt_with_cks)
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+       }
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       STRETCH_ENABLE, 0x0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       staticEnable, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x0);
+
+       /* Populate CKS Lookup Table */
+       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+               stretch_amount2 = 0;
+       else if (stretch_amount == 3 || stretch_amount == 4)
+               stretch_amount2 = 1;
+       else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ClockStretcher);
+               PP_ASSERT_WITH_CODE(false,
+                               "Stretch Amount in PPTable not supported\n",
+                               return -EINVAL);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL);
+       value &= 0xFFC2FF87;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+                       fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+                       fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+                       GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+                       SclkFrequency) / 100);
+       if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+                       clock_freq_u16 &&
+           fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+                       clock_freq_u16) {
+               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+               value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+               value |= (fiji_clock_stretch_amount_conversion
+                               [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+                                [stretch_amount]) << 3;
+       }
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].minFreq);
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].maxFreq);
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+                       fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+                       (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL, value);
+
+       /* Populate DDT Lookup Table */
+       for (i = 0; i < 4; i++) {
+               /* Assign the minimum and maximum VID stored
+                * in the last row of Clock Stretcher Voltage Table.
+                */
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].minVID =
+                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].maxVID =
+                               (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+               /* Loop through each SCLK and check the frequency
+                * to see if it lies within the frequency for clock stretcher.
+                */
+               for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+                       cks_setting = 0;
+                       clock_freq = PP_SMC_TO_HOST_UL(
+                                       smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+                       /* Check the allowed frequency against the sclk level[j].
+                        *  Sclk's endianness has already been converted,
+                        *  and it's in 10Khz unit,
+                        *  as opposed to Data table, which is in Mhz unit.
+                        */
+                       if (clock_freq >=
+                                       (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+                               cks_setting |= 0x2;
+                               if (clock_freq <
+                                               (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+                                       cks_setting |= 0x1;
+                       }
+                       smu_data->smc_state_table.ClockStretcherDataTable.
+                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+               }
+               CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+                               ClockStretcherDataTable.
+                               ClockStretcherDataTableEntry[i].setting);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+       value &= 0xFFFFFFFE;
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+       return 0;
+}
+
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+               struct SMU73_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint16_t config;
+
+       config = VR_MERGED_WITH_VDDC;
+       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+       /* Set Vddc Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+               config = VR_SVI2_PLANE_1;
+               table->VRConfig |= config;
+       } else {
+               PP_ASSERT_WITH_CODE(false,
+                               "VDDC should be on SVI2 control in merged mode!",
+                               );
+       }
+       /* Set Vddci Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               config = VR_SVI2_PLANE_2;  /* only in merged mode */
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               config = VR_SMIO_PATTERN_1;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       }
+       /* Set Mvdd Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+               config = VR_SVI2_PLANE_2;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               config = VR_SMIO_PATTERN_2;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       }
+
+       return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       uint32_t tmp;
+       int result;
+
+       /* This is a read-modify-write on the first byte of the ARB table.
+        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+        * is the field 'current'.
+        * This solution is ugly, but we never write the whole table only
+        * individual fields in it.
+        * In reality this field should not be in that structure
+        * but in a soft register.
+        */
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+       if (result)
+               return result;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+       return smu7_write_smc_sram_dword(hwmgr,
+                       smu_data->smu7_data.arb_table_start,  tmp, SMC_RAM_END);
+}
+
+static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct SMU73_Discrete_GraphicsLevel *levels =
+                               data->smc_state_table.GraphicsLevel;
+       unsigned min_level = 1;
+
+       hwmgr->default_gfx_power_profile.activity_threshold =
+                       be16_to_cpu(levels[0].ActivityLevel);
+       hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+       hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+       hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+       hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+       hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+       /* Workaround compute SDMA instability: disable lowest SCLK
+        * DPM level. Optimize compute power profile: Use only highest
+        * 2 power levels (if more than 2 are available), Hysteresis:
+        * 0ms up, 5ms down
+        */
+       if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+               min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+       else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+               min_level = 1;
+       else
+               min_level = 0;
+       hwmgr->default_compute_power_profile.min_sclk =
+                       be32_to_cpu(levels[min_level].SclkFrequency);
+       hwmgr->default_compute_power_profile.up_hyst = 0;
+       hwmgr->default_compute_power_profile.down_hyst = 5;
+
+       hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+       hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+
+       return 0;
+}
+
+static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
+{
+       pp_atomctrl_voltage_table param_led_dpm;
+       int result = 0;
+       u32 mask = 0;
+
+       result = atomctrl_get_voltage_table_v3(hwmgr,
+                                              VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
+                                              &param_led_dpm);
+       if (result == 0) {
+               int i, j;
+               u32 tmp = param_led_dpm.mask_low;
+
+               for (i = 0, j = 0; i < 32; i++) {
+                       if (tmp & 1) {
+                               mask |= (i << (8 * j));
+                               if (++j >= 3)
+                                       break;
+                       }
+                       tmp >>= 1;
+               }
+       }
+       if (mask)
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                                   PPSMC_MSG_LedConfig,
+                                                   mask);
+       return 0;
+}
+
+static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       uint8_t i;
+       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+       fiji_initialize_power_tune_defaults(hwmgr);
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+               fiji_populate_smc_voltage_tables(hwmgr, table);
+
+       table->SystemFlags = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+               result = fiji_populate_ulv_state(hwmgr, table);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to initialize ULV state!", return result);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = fiji_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Link Level!", return result);
+
+       result = fiji_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Graphics Level!", return result);
+
+       result = fiji_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Memory Level!", return result);
+
+       result = fiji_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ACPI Level!", return result);
+
+       result = fiji_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize VCE Level!", return result);
+
+       result = fiji_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ACP Level!", return result);
+
+       result = fiji_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize SAMU Level!", return result);
+
+       /* Since only the initial state is completely set up at this point
+        * (the other states are just copies of the boot state) we only
+        * need to populate the  ARB settings for the initial state.
+        */
+       result = fiji_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to Write ARB settings for the initial state.", return result);
+
+       result = fiji_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize UVD Level!", return result);
+
+       result = fiji_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot Level!", return result);
+
+       result = fiji_populate_smc_initailial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot State!", return result);
+
+       result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate BAPM Parameters!", return result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ClockStretcher)) {
+               result = fiji_populate_clock_stretcher_data_table(hwmgr);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to populate Clock Stretcher Data Table!",
+                               return result);
+       }
+
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+       table->TemperatureLimitHigh =
+                       table_info->cac_dtp_table->usTargetOperatingTemp *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->TemperatureLimitLow  =
+                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->MemoryVoltageChangeEnable = 1;
+       table->MemoryInterval = 1;
+       table->VoltageResponseTime = 0;
+       table->PhaseResponseTime = 0;
+       table->MemoryThermThrottleEnable = 1;
+       table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
+       table->PCIeGenInterval = 1;
+       table->VRConfig = 0;
+
+       result = fiji_populate_vr_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate VRConfig setting!", return result);
+
+       table->ThermGpio = 17;
+       table->SclkStepSize = 0x4000;
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                       &gpio_pin)) {
+               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       } else {
+               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       }
+
+       /* Thermal Output GPIO */
+       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+                       &gpio_pin)) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+               /* For porlarity read GPIOPAD_A with assigned Gpio pin
+                * since VBIOS will program this register to set 'inactive state',
+                * driver can then determine 'active state' from this and
+                * program SMU with correct polarity
+                */
+               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+                               (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+               /* if required, combine VRHot/PCC with thermal out GPIO */
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot) &&
+                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_CombinePCCWithThermalSignal))
+                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+       } else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ThermalOutGPIO);
+               table->ThermOutGpio = 17;
+               table->ThermOutPolarity = 1;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+       }
+
+       for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr,
+                       smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+                       (uint8_t *)&(table->SystemFlags),
+                       sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+                       SMC_RAM_END);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload dpm data to SMC memory!", return result);
+
+       result = fiji_init_arb_table_index(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload arb data to SMC memory!", return result);
+
+       result = fiji_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+
+       result = fiji_setup_dpm_led_config(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                           "Failed to setup dpm led config", return result);
+
+       fiji_save_default_power_profile(hwmgr);
+
+       return 0;
+}
+
+static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       if (smu_data->smu7_data.fan_table_start == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (duty100 == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+                       usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->
+                       thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
+                       reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+                       hwmgr->device, CGS_IND_REG__SMC,
+                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+                       SMC_RAM_END);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ucMinimumPWMLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SetFanMinPwm,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ucMinimumPWMLimit);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SetFanSclkTarget,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+       if (res)
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+
+       return 0;
+}
+
+
+static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+       int ret;
+       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+       if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+               return 0;
+
+       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+
+       if (!ret)
+               /* If this param is not changed, this function could fire unnecessarily */
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+       return ret;
+}
+
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return fiji_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU73_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+       result = fiji_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+       return result;
+}
+
+static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+               case DRAM_LOG_ADDR_H:
+                       return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H);
+               case DRAM_LOG_ADDR_L:
+                       return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L);
+               case DRAM_LOG_PHY_ADDR_H:
+                       return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+               case DRAM_LOG_PHY_ADDR_L:
+                       return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+               case DRAM_LOG_BUFF_SIZE:
+                       return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case UvdBootLevel:
+                       return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+               case VceBootLevel:
+                       return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+               case SamuBootLevel:
+                       return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       pr_warn("can't get the offset of type %x member %x\n", type, member);
+       return 0;
+}
+
+static uint32_t fiji_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU73_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU73_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU73_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU73_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU73_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDGFX:
+               return SMU73_MAX_LEVELS_VDDGFX;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU73_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU73_MAX_LEVELS_MVDD;
+       }
+
+       pr_warn("can't get the mac of %x\n", value);
+       return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       smu_data->smc_state_table.UvdBootLevel = 0;
+       if (table_info->mm_dep_table->count > 0)
+               smu_data->smc_state_table.UvdBootLevel =
+                               (uint8_t) (table_info->mm_dep_table->count - 1);
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+                                               UvdBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0x00FFFFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_UVDDPM) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+       return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_StablePState))
+               smu_data->smc_state_table.VceBootLevel =
+                       (uint8_t) (table_info->mm_dep_table->count - 1);
+       else
+               smu_data->smc_state_table.VceBootLevel = 0;
+
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                                       offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFF00FFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+       return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+       smu_data->smc_state_table.SamuBootLevel = 0;
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFFFFFF00;
+       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+       return 0;
+}
+
+static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               fiji_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               fiji_update_vce_smc_table(hwmgr);
+               break;
+       case SMU_SAMU_TABLE:
+               fiji_update_samu_smc_table(hwmgr);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, DpmTable),
+                       &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               smu_data->smu7_data.dpm_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, SoftRegisters),
+                       &tmp, SMC_RAM_END);
+
+       if (!result) {
+               data->soft_regs_start = tmp;
+               smu_data->smu7_data.soft_regs_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, mcRegisterTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.mc_reg_table_start = tmp;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, FanTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.fan_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.arb_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU73_Firmware_Header, Version),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (0 != result);
+
+       return error ? -1 : 0;
+}
+
+static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+       /* Program additional LP registers
+        * that are no longer programmed by VBIOS
+        */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+       return 0;
+}
+
+static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
+
+static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+               struct amd_pp_profile *request)
+{
+       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
+                       (hwmgr->smu_backend);
+       struct SMU73_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+                       SMU73_MAX_LEVELS_GRAPHICS;
+       uint32_t i;
+
+       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+               levels[i].ActivityLevel =
+                               cpu_to_be16(request->activity_threshold);
+               levels[i].EnabledForActivity = 1;
+               levels[i].UpHyst = request->up_hyst;
+               levels[i].DownHyst = request->down_hyst;
+       }
+
+       return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                               array_size, SMC_RAM_END);
+}
 
 const struct pp_smumgr_func fiji_smu_funcs = {
        .smu_init = &fiji_smu_init,
index 175bf9f8ef9cd9ba25e9ed86db56dc71e490b2f4..279647772578b7e246f4602b6de77c3bd461c58e 100644 (file)
 #include "smu7_smumgr.h"
 
 
+struct fiji_pt_defaults {
+       uint8_t   SviLoadLineEn;
+       uint8_t   SviLoadLineVddC;
+       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+       uint8_t   TDC_MAWt;
+       uint8_t   TdcWaterfallCtl;
+       uint8_t   DTEAmbientTempBase;
+};
+
 struct fiji_smumgr {
        struct smu7_smumgr                   smu7_data;
        struct SMU73_Discrete_DpmTable       smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
deleted file mode 100644 (file)
index efb0fc0..0000000
+++ /dev/null
@@ -1,2568 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- *
- */
-
-#include "pp_debug.h"
-#include "iceland_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-
-#include "smu71_discrete.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "processpptables.h"
-
-#include "iceland_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX    1
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define VDDC_VDDCI_DELTA            200
-
-#define DEVICE_ID_VI_ICELAND_M_6900    0x6900
-#define DEVICE_ID_VI_ICELAND_M_6901    0x6901
-#define DEVICE_ID_VI_ICELAND_M_6902    0x6902
-#define DEVICE_ID_VI_ICELAND_M_6903    0x6903
-
-static const struct iceland_pt_defaults defaults_iceland = {
-       /*
-        * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
-        * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
-        */
-       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
-       { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
-       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
-};
-
-/* 35W - XT, XTL */
-static const struct iceland_pt_defaults defaults_icelandxt = {
-       /*
-        * sviLoadLIneEn, SviLoadLineVddC,
-        * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
-        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
-        * BAPM_TEMP_GRADIENT
-        */
-       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
-       { 0xA7,  0x0, 0x0, 0xB5,  0x0, 0x0, 0x9F,  0x0, 0x0, 0xD6,  0x0, 0x0, 0xD7,  0x0, 0x0},
-       { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
-};
-
-/* 25W - PRO, LE */
-static const struct iceland_pt_defaults defaults_icelandpro = {
-       /*
-        * sviLoadLIneEn, SviLoadLineVddC,
-        * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
-        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
-        * BAPM_TEMP_GRADIENT
-        */
-       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
-       { 0xB7,  0x0, 0x0, 0xC3,  0x0, 0x0, 0xB5,  0x0, 0x0, 0xEA,  0x0, 0x0, 0xE6,  0x0, 0x0},
-       { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
-};
-
-static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       struct cgs_system_info sys_info = {0};
-       uint32_t dev_id;
-
-       sys_info.size = sizeof(struct cgs_system_info);
-       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
-       cgs_query_system_info(hwmgr->device, &sys_info);
-       dev_id = (uint32_t)sys_info.value;
-
-       switch (dev_id) {
-       case DEVICE_ID_VI_ICELAND_M_6900:
-       case DEVICE_ID_VI_ICELAND_M_6903:
-               smu_data->power_tune_defaults = &defaults_icelandxt;
-               break;
-
-       case DEVICE_ID_VI_ICELAND_M_6901:
-       case DEVICE_ID_VI_ICELAND_M_6902:
-               smu_data->power_tune_defaults = &defaults_icelandpro;
-               break;
-       default:
-               smu_data->power_tune_defaults = &defaults_iceland;
-               pr_warn("Unknown V.I. Device ID.\n");
-               break;
-       }
-       return;
-}
-
-static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
-       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
-       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
-       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-       return 0;
-}
-
-static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
-       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->tdc_vddc_throttle_release_limit_perc;
-       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
-
-       return 0;
-}
-
-static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
-       uint32_t temp;
-
-       if (smu7_read_smc_sram_dword(hwmgr,
-                       fuse_table_offset +
-                       offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, SMC_RAM_END))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-                               return -EINVAL);
-       else
-               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
-
-       return 0;
-}
-
-static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-       return 0;
-}
-
-static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 8; i++)
-               smu_data->power_tune_table.GnbLPML[i] = 0;
-
-       return 0;
-}
-
-static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
-
-       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
-       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
-       return 0;
-}
-
-static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
-       uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
-
-       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
-                           "The CAC Leakage table does not exist!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
-                           "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
-                           "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
-               for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
-                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
-                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
-               }
-       } else {
-               PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
-       }
-
-       return 0;
-}
-
-static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint8_t *vid = smu_data->power_tune_table.VddCVid;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
-               "There should never be more than 8 entries for VddcVid!!!",
-               return -EINVAL);
-
-       for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
-               vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
-       }
-
-       return 0;
-}
-
-
-
-static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint32_t pm_fuse_table_offset;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to get pm_fuse_table_offset Failed!",
-                                       return -EINVAL);
-
-               /* DW0 - DW3 */
-               if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate bapm vddc vid Failed!",
-                                       return -EINVAL);
-
-               /* DW4 - DW5 */
-               if (iceland_populate_vddc_vid(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate vddc vid Failed!",
-                                       return -EINVAL);
-
-               /* DW6 */
-               if (iceland_populate_svi_load_line(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate SviLoadLine Failed!",
-                                       return -EINVAL);
-               /* DW7 */
-               if (iceland_populate_tdc_limit(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
-               /* DW8 */
-               if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TdcWaterfallCtl, "
-                                       "LPMLTemperature Min and Max Failed!",
-                                       return -EINVAL);
-
-               /* DW9-DW12 */
-               if (0 != iceland_populate_temperature_scaler(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate LPMLTemperatureScaler Failed!",
-                                       return -EINVAL);
-
-               /* DW13-DW16 */
-               if (iceland_populate_gnb_lpml(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Failed!",
-                                       return -EINVAL);
-
-               /* DW18 */
-               if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
-                                       return -EINVAL);
-
-               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
-                               (uint8_t *)&smu_data->power_tune_table,
-                               sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to download PmFuseTable Failed!",
-                                       return -EINVAL);
-       }
-       return 0;
-}
-
-static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-       struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
-       uint32_t clock, uint32_t *vol)
-{
-       uint32_t i = 0;
-
-       /* clock - voltage dependency table is empty table */
-       if (allowed_clock_voltage_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
-                       *vol = allowed_clock_voltage_table->entries[i].v;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       *vol = allowed_clock_voltage_table->entries[i - 1].v;
-
-       return 0;
-}
-
-static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
-               pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
-               uint16_t *lo)
-{
-       uint16_t v_index;
-       bool vol_found = false;
-       *hi = tab->value * VOLTAGE_SCALE;
-       *lo = tab->value * VOLTAGE_SCALE;
-
-       /* SCLK/VDDC Dependency Table has to exist. */
-       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
-                       "The SCLK/VDDC Dependency Table does not exist.\n",
-                       return -EINVAL);
-
-       if (NULL == hwmgr->dyn_state.cac_leakage_table) {
-               pr_warn("CAC Leakage Table does not exist, using vddc.\n");
-               return 0;
-       }
-
-       /*
-        * Since voltage in the sclk/vddc dependency table is not
-        * necessarily in ascending order because of ELB voltage
-        * patching, loop through entire list to find exact voltage.
-        */
-       for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
-               if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
-                       vol_found = true;
-                       if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
-                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
-                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
-                       } else {
-                               pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
-                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
-                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
-                       }
-                       break;
-               }
-       }
-
-       /*
-        * If voltage is not found in the first pass, loop again to
-        * find the best match, equal or higher value.
-        */
-       if (!vol_found) {
-               for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
-                       if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
-                               vol_found = true;
-                               if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
-                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
-                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
-                               } else {
-                                       pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
-                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
-                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
-                               }
-                               break;
-                       }
-               }
-
-               if (!vol_found)
-                       pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
-       }
-
-       return 0;
-}
-
-static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
-               pp_atomctrl_voltage_table_entry *tab,
-               SMU71_Discrete_VoltageLevel *smc_voltage_tab)
-{
-       int result;
-
-       result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
-                       &smc_voltage_tab->StdVoltageHiSidd,
-                       &smc_voltage_tab->StdVoltageLoSidd);
-       if (0 != result) {
-               smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
-               smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
-       }
-
-       smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
-       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
-       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
-
-       return 0;
-}
-
-static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
-                       SMU71_Discrete_DpmTable *table)
-{
-       unsigned int count;
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       table->VddcLevelCount = data->vddc_voltage_table.count;
-       for (count = 0; count < table->VddcLevelCount; count++) {
-               result = iceland_populate_smc_voltage_table(hwmgr,
-                               &(data->vddc_voltage_table.entries[count]),
-                               &(table->VddcLevel[count]));
-               PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
-
-               /* GPIO voltage control */
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
-                       table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
-                       table->VddcLevel[count].Smio = 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-
-       return 0;
-}
-
-static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
-                       SMU71_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-       int result;
-
-       table->VddciLevelCount = data->vddci_voltage_table.count;
-
-       for (count = 0; count < table->VddciLevelCount; count++) {
-               result = iceland_populate_smc_voltage_table(hwmgr,
-                               &(data->vddci_voltage_table.entries[count]),
-                               &(table->VddciLevel[count]));
-               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
-               else
-                       table->VddciLevel[count].Smio |= 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
-       return 0;
-}
-
-static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-                       SMU71_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-       int result;
-
-       table->MvddLevelCount = data->mvdd_voltage_table.count;
-
-       for (count = 0; count < table->VddciLevelCount; count++) {
-               result = iceland_populate_smc_voltage_table(hwmgr,
-                               &(data->mvdd_voltage_table.entries[count]),
-                               &table->MvddLevel[count]);
-               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
-                       table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
-               else
-                       table->MvddLevel[count].Smio |= 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
-
-       return 0;
-}
-
-
-static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-       SMU71_Discrete_DpmTable *table)
-{
-       int result;
-
-       result = iceland_populate_smc_vddc_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDC voltage table to SMC", return -EINVAL);
-
-       result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate VDDCI voltage table to SMC", return -EINVAL);
-
-       result = iceland_populate_smc_mvdd_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "can not populate MVDD voltage table to SMC", return -EINVAL);
-
-       return 0;
-}
-
-static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU71_Discrete_Ulv *state)
-{
-       uint32_t voltage_response_time, ulv_voltage;
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
-       PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
-
-       if (ulv_voltage == 0) {
-               data->ulv_supported = false;
-               return 0;
-       }
-
-       if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
-               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
-               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
-                       state->VddcOffset = 0;
-               else
-                       /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
-                       state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
-       } else {
-               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
-               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
-                       state->VddcOffsetVid = 0;
-               else  /* used in SVI2 Mode */
-                       state->VddcOffsetVid = (uint8_t)(
-                                       (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
-                                               * VOLTAGE_VID_OFFSET_SCALE2
-                                               / VOLTAGE_VID_OFFSET_SCALE1);
-       }
-       state->VddcPhase = 1;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
-       return 0;
-}
-
-static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
-                SMU71_Discrete_Ulv *ulv_level)
-{
-       return iceland_populate_ulv_level(hwmgr, ulv_level);
-}
-
-static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint32_t i;
-
-       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount =
-                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity =
-                       1;
-               table->LinkLevel[i].SPC =
-                       (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold =
-                       PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold =
-                       PP_HOST_TO_SMC_UL(30);
-       }
-
-       smu_data->smc_state_table.LinkLevelCount =
-               (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t    reference_clock;
-       uint32_t reference_divider;
-       uint32_t fbdiv;
-       int result;
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
-       reference_clock = atomctrl_get_reference_clock(hwmgr);
-
-       reference_divider = 1 + dividers.uc_pll_ref_div;
-
-       /* low 14 bits is fraction and high 12 bits is divider*/
-       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-       /* SPLL_FUNC_CNTL setup*/
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-       /* SPLL_FUNC_CNTL_3 setup*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
-       /* set to use fractional accumulation*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-               pp_atomctrl_internal_ss_info ss_info;
-
-               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
-               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
-                       /*
-                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
-                       * ss_info.speed_spectrum_rate -- in unit of khz
-                       */
-                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
-                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
-                       /* clkv = 2 * D * fbdiv / NS */
-                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
-                       cg_spll_spread_spectrum =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
-                       cg_spll_spread_spectrum =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-                       cg_spll_spread_spectrum_2 =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
-               }
-       }
-
-       sclk->SclkFrequency        = engine_clock;
-       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-       return 0;
-}
-
-static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
-                               const struct phm_phase_shedding_limits_table *pl,
-                                       uint32_t sclk, uint32_t *p_shed)
-{
-       unsigned int i;
-
-       /* use the minimum phase shedding */
-       *p_shed = 1;
-
-       for (i = 0; i < pl->count; i++) {
-               if (sclk < pl->entries[i].Sclk) {
-                       *p_shed = i;
-                       break;
-               }
-       }
-       return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-                                               uint32_t engine_clock,
-                               uint16_t sclk_activity_level_threshold,
-                               SMU71_Discrete_GraphicsLevel *graphic_level)
-{
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-       /* populate graphics levels*/
-       result = iceland_get_dependency_volt_by_clk(hwmgr,
-               hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
-               &graphic_level->MinVddc);
-       PP_ASSERT_WITH_CODE((0 == result),
-               "can not find VDDC voltage value for VDDC       \
-               engine clock dependency table", return result);
-
-       /* SCLK frequency in units of 10KHz*/
-       graphic_level->SclkFrequency = engine_clock;
-       graphic_level->MinVddcPhases = 1;
-
-       if (data->vddc_phase_shed_control)
-               iceland_populate_phase_value_based_on_sclk(hwmgr,
-                               hwmgr->dyn_state.vddc_phase_shed_limits_table,
-                               engine_clock,
-                               &graphic_level->MinVddcPhases);
-
-       /* Indicates maximum activity level for this performance level. 50% for now*/
-       graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
-       graphic_level->CcPwrDynRm = 0;
-       graphic_level->CcPwrDynRm1 = 0;
-       /* this level can be used if activity is high enough.*/
-       graphic_level->EnabledForActivity = 0;
-       /* this level can be used for throttling.*/
-       graphic_level->EnabledForThrottle = 1;
-       graphic_level->UpHyst = 0;
-       graphic_level->DownHyst = 100;
-       graphic_level->VoltageDownHyst = 0;
-       graphic_level->PowerThrottle = 0;
-
-       data->display_timing.min_clock_in_sr =
-                       hwmgr->display_config.min_core_set_clock_in_sr;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep))
-               graphic_level->DeepSleepDivId =
-                               smu7_get_sleep_divider_id_from_clock(engine_clock,
-                                               data->display_timing.min_clock_in_sr);
-
-       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
-       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       if (0 == result) {
-               graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
-       }
-
-       return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
-
-       uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
-                                               SMU71_MAX_LEVELS_GRAPHICS;
-
-       SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
-       uint32_t i;
-       uint8_t highest_pcie_level_enabled = 0;
-       uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
-       uint8_t count = 0;
-       int result = 0;
-
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               result = iceland_populate_single_graphic_level(hwmgr,
-                                       dpm_table->sclk_table.dpm_levels[i].value,
-                                       (uint16_t)smu_data->activity_target[i],
-                                       &(smu_data->smc_state_table.GraphicsLevel[i]));
-               if (result != 0)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-       }
-
-       /* Only enable level 0 for now. */
-       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
-       /* set highest level watermark to high */
-       if (dpm_table->sclk_table.count > 1)
-               smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       smu_data->smc_state_table.GraphicsDpmLevelCount =
-               (uint8_t)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-       while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                               (1 << (highest_pcie_level_enabled + 1))) != 0) {
-               highest_pcie_level_enabled++;
-       }
-
-       while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-               (1 << lowest_pcie_level_enabled)) == 0) {
-               lowest_pcie_level_enabled++;
-       }
-
-       while ((count < highest_pcie_level_enabled) &&
-                       ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
-               count++;
-       }
-
-       mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
-               (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
-       /* set pcieDpmLevel to highest_pcie_level_enabled*/
-       for (i = 2; i < dpm_table->sclk_table.count; i++) {
-               smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-       }
-
-       /* set pcieDpmLevel to lowest_pcie_level_enabled*/
-       smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-       /* set pcieDpmLevel to mid_pcie_level_enabled*/
-       smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-
-       /* level count will send to smc once at init smc table and never change*/
-       result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
-                               (uint8_t *)levels, (uint32_t)level_array_size,
-                                                               SMC_RAM_END);
-
-       return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    memory_clock the memory clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int iceland_calculate_mclk_params(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU71_Discrete_MemoryLevel *mclk,
-               bool strobe_mode,
-               bool dllStateOn
-               )
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
-       uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-       uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
-       uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
-       uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
-       uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
-       uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
-       uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
-       uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
-       pp_atomctrl_memory_clock_param mpll_param;
-       int result;
-
-       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
-                               memory_clock, &mpll_param, strobe_mode);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
-       /* MPLL_FUNC_CNTL setup*/
-       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
-       /* MPLL_FUNC_CNTL_1 setup*/
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                                       MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
-       /* MPLL_AD_FUNC_CNTL setup*/
-       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
-                                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
-       if (data->is_memory_gddr5) {
-               /* MPLL_DQ_FUNC_CNTL setup*/
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
-               /*
-                ************************************
-                Fref = Reference Frequency
-                NF = Feedback divider ratio
-                NR = Reference divider ratio
-                Fnom = Nominal VCO output frequency = Fref * NF / NR
-                Fs = Spreading Rate
-                D = Percentage down-spread / 2
-                Fint = Reference input frequency to PFD = Fref / NR
-                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
-                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
-                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
-                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
-                *************************************
-                */
-               pp_atomctrl_internal_ss_info ss_info;
-               uint32_t freq_nom;
-               uint32_t tmp;
-               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
-               /* for GDDR5 for all modes and DDR3 */
-               if (1 == mpll_param.qdr)
-                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
-               else
-                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
-               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
-               tmp = (freq_nom / reference_clock);
-               tmp = tmp * tmp;
-
-               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
-                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
-                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
-                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
-                       /*     = reference_clock * 5 / speed_spectrum_rate */
-                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
-                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
-                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
-                       uint32_t clkv =
-                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
-                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
-                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
-                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
-               }
-       }
-
-       /* MCLK_PWRMGT_CNTL setup */
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
-       /* Save the result data to outpupt memory level structure */
-       mclk->MclkFrequency   = memory_clock;
-       mclk->MpllFuncCntl    = mpll_func_cntl;
-       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
-       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
-       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
-       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
-       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
-       mclk->DllCntl         = dll_cntl;
-       mclk->MpllSs1         = mpll_ss1;
-       mclk->MpllSs2         = mpll_ss2;
-
-       return 0;
-}
-
-static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
-               bool strobe_mode)
-{
-       uint8_t mc_para_index;
-
-       if (strobe_mode) {
-               if (memory_clock < 12500) {
-                       mc_para_index = 0x00;
-               } else if (memory_clock > 47500) {
-                       mc_para_index = 0x0f;
-               } else {
-                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
-               }
-       } else {
-               if (memory_clock < 65000) {
-                       mc_para_index = 0x00;
-               } else if (memory_clock > 135000) {
-                       mc_para_index = 0x0f;
-               } else {
-                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
-               }
-       }
-
-       return mc_para_index;
-}
-
-static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
-       uint8_t mc_para_index;
-
-       if (memory_clock < 10000) {
-               mc_para_index = 0;
-       } else if (memory_clock >= 80000) {
-               mc_para_index = 0x0f;
-       } else {
-               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-       }
-
-       return mc_para_index;
-}
-
-static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
-                                       uint32_t memory_clock, uint32_t *p_shed)
-{
-       unsigned int i;
-
-       *p_shed = 1;
-
-       for (i = 0; i < pl->count; i++) {
-               if (memory_clock < pl->entries[i].Mclk) {
-                       *p_shed = i;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int iceland_populate_single_memory_level(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU71_Discrete_MemoryLevel *memory_level
-               )
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       int result = 0;
-       bool dll_state_on;
-       struct cgs_display_info info = {0};
-       uint32_t mclk_edc_wr_enable_threshold = 40000;
-       uint32_t mclk_edc_enable_threshold = 40000;
-       uint32_t mclk_strobe_mode_threshold = 40000;
-
-       if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
-               result = iceland_get_dependency_volt_by_clk(hwmgr,
-                       hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
-       }
-
-       if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
-               memory_level->MinVddci = memory_level->MinVddc;
-       } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
-               result = iceland_get_dependency_volt_by_clk(hwmgr,
-                               hwmgr->dyn_state.vddci_dependency_on_mclk,
-                               memory_clock,
-                               &memory_level->MinVddci);
-               PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
-       }
-
-       memory_level->MinVddcPhases = 1;
-
-       if (data->vddc_phase_shed_control) {
-               iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
-                               memory_clock, &memory_level->MinVddcPhases);
-       }
-
-       memory_level->EnabledForThrottle = 1;
-       memory_level->EnabledForActivity = 0;
-       memory_level->UpHyst = 0;
-       memory_level->DownHyst = 100;
-       memory_level->VoltageDownHyst = 0;
-
-       /* Indicates maximum activity level for this performance level.*/
-       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       memory_level->StutterEnable = 0;
-       memory_level->StrobeEnable = 0;
-       memory_level->EdcReadEnable = 0;
-       memory_level->EdcWriteEnable = 0;
-       memory_level->RttEnable = 0;
-
-       /* default set to low watermark. Highest level will be set to high later.*/
-       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       data->display_timing.num_existing_displays = info.display_count;
-
-       /* stutter mode not support on iceland */
-
-       /* decide strobe mode*/
-       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
-               (memory_clock <= mclk_strobe_mode_threshold);
-
-       /* decide EDC mode and memory clock ratio*/
-       if (data->is_memory_gddr5) {
-               memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
-                                       memory_level->StrobeEnable);
-
-               if ((mclk_edc_enable_threshold != 0) &&
-                               (memory_clock > mclk_edc_enable_threshold)) {
-                       memory_level->EdcReadEnable = 1;
-               }
-
-               if ((mclk_edc_wr_enable_threshold != 0) &&
-                               (memory_clock > mclk_edc_wr_enable_threshold)) {
-                       memory_level->EdcWriteEnable = 1;
-               }
-
-               if (memory_level->StrobeEnable) {
-                       if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
-                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
-                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-                       else
-                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
-               } else
-                       dll_state_on = data->dll_default_on;
-       } else {
-               memory_level->StrobeRatio =
-                       iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
-               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-       }
-
-       result = iceland_calculate_mclk_params(hwmgr,
-               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
-
-       if (0 == result) {
-               memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
-               memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
-               memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
-               /* MCLK frequency in units of 10KHz*/
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
-               /* Indicates maximum activity level for this performance level.*/
-               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
-       }
-
-       return result;
-}
-
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
-       uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
-       SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                       "can not populate memory level as memory clock is zero", return -EINVAL);
-               result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
-                       &(smu_data->smc_state_table.MemoryLevel[i]));
-               if (0 != result) {
-                       return result;
-               }
-       }
-
-       /* Only enable level 0 for now.*/
-       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
-       /*
-       * in order to prevent MC activity from stutter mode to push DPM up.
-       * the UVD change complements this by putting the MCLK in a higher state
-       * by default such that we are not effected by up threshold or and MCLK DPM latency.
-       */
-       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
-       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-       /* set highest level watermark to high*/
-       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       /* level count will send to smc once at init smc table and never change*/
-       result = smu7_copy_bytes_to_smc(hwmgr,
-               level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
-               SMC_RAM_END);
-
-       return result;
-}
-
-static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
-                                       SMU71_Discrete_VoltageLevel *voltage)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       uint32_t i = 0;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
-                       if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
-                               /* Always round to higher voltage. */
-                               voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-
-               PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
-                       "MVDD Voltage is outside the supported range.", return -EINVAL);
-
-       } else {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-       SMU71_Discrete_DpmTable *table)
-{
-       int result = 0;
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t vddc_phase_shed_control = 0;
-
-       SMU71_Discrete_VoltageLevel voltage_level;
-       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
-       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
-
-       /* The ACPI state should not do DPM on DC (or ever).*/
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       if (data->acpi_vddc)
-               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
-       else
-               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
-
-       table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
-       /* assign zero for now*/
-       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-               table->ACPILevel.SclkFrequency,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       /* divider ID for required SCLK*/
-       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-       table->ACPILevel.DeepSleepDivId = 0;
-
-       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-                                                       CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
-       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
-                                                       CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
-       spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
-                                                       CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
-
-       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-
-       /* For various features to be enabled/disabled while this level is active.*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       /* SCLK frequency in units of 10KHz*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
-       table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
-       table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
-       else {
-               if (data->acpi_vddci != 0)
-                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
-               else
-                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
-       }
-
-       if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
-               table->MemoryACPILevel.MinMvdd =
-                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
-       else
-               table->MemoryACPILevel.MinMvdd = 0;
-
-       /* Force reset on DLL*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
-       /* Disable DLL in ACPIState*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
-       /* Enable DLL bypass signal*/
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK0_BYPASS, 0);
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK1_BYPASS, 0);
-
-       table->MemoryACPILevel.DllCntl            =
-               PP_HOST_TO_SMC_UL(dll_cntl);
-       table->MemoryACPILevel.MclkPwrmgtCntl     =
-               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
-       table->MemoryACPILevel.MpllAdFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
-       table->MemoryACPILevel.MpllDqFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl       =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl_1     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
-       table->MemoryACPILevel.MpllFuncCntl_2     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
-       table->MemoryACPILevel.MpllSs1            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
-       table->MemoryACPILevel.MpllSs2            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       /* Indicates maximum activity level for this performance level.*/
-       table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       table->MemoryACPILevel.StutterEnable = 0;
-       table->MemoryACPILevel.StrobeEnable = 0;
-       table->MemoryACPILevel.EdcReadEnable = 0;
-       table->MemoryACPILevel.EdcWriteEnable = 0;
-       table->MemoryACPILevel.RttEnable = 0;
-
-       return result;
-}
-
-static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-                                       SMU71_Discrete_DpmTable *table)
-{
-       return 0;
-}
-
-static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU71_Discrete_DpmTable *table)
-{
-       return 0;
-}
-
-static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-               SMU71_Discrete_DpmTable *table)
-{
-       return 0;
-}
-
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-       SMU71_Discrete_DpmTable *table)
-{
-       return 0;
-}
-
-static int iceland_populate_memory_timing_parameters(
-               struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock,
-               uint32_t memory_clock,
-               struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
-               )
-{
-       uint32_t dramTiming;
-       uint32_t dramTiming2;
-       uint32_t burstTime;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                               engine_clock, memory_clock);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
-       arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
-       return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- * This function is to be called from the SetPowerState table.
- */
-static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       int result = 0;
-       SMU71_Discrete_MCArbDramTimingTable  arb_regs;
-       uint32_t i, j;
-
-       memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = iceland_populate_memory_timing_parameters
-                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
-                                data->dpm_table.mclk_table.dpm_levels[j].value,
-                                &arb_regs.entries[i][j]);
-
-                       if (0 != result) {
-                               break;
-                       }
-               }
-       }
-
-       if (0 == result) {
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.arb_table_start,
-                               (uint8_t *)&arb_regs,
-                               sizeof(SMU71_Discrete_MCArbDramTimingTable),
-                               SMC_RAM_END
-                               );
-       }
-
-       return result;
-}
-
-static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-                       SMU71_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table*/
-       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-                       data->vbios_boot_state.sclk_bootup_value,
-                       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
-
-       if (0 != result) {
-               smu_data->smc_state_table.GraphicsBootLevel = 0;
-               pr_err("VBIOS did not find boot engine clock value \
-                       in dependency table. Using Graphics DPM level 0!");
-               result = 0;
-       }
-
-       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-               data->vbios_boot_state.mclk_bootup_value,
-               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
-
-       if (0 != result) {
-               smu_data->smc_state_table.MemoryBootLevel = 0;
-               pr_err("VBIOS did not find boot engine clock value \
-                       in dependency table. Using Memory DPM level 0!");
-               result = 0;
-       }
-
-       table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               table->BootVddci = table->BootVddc;
-       else
-               table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
-
-       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
-       return result;
-}
-
-static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
-                                SMU71_Discrete_MCRegisters *mc_reg_table)
-{
-       const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
-
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
-               if (smu_data->mc_reg_table.validflag & 1<<j) {
-                       PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
-                               "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
-                       mc_reg_table->address[i].s0 =
-                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
-                       mc_reg_table->address[i].s1 =
-                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
-                       i++;
-               }
-       }
-
-       mc_reg_table->last = (uint8_t)i;
-
-       return 0;
-}
-
-/*convert register values from driver to SMC format */
-static void iceland_convert_mc_registers(
-       const struct iceland_mc_reg_entry *entry,
-       SMU71_Discrete_MCRegisterSet *data,
-       uint32_t num_entries, uint32_t valid_flag)
-{
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < num_entries; j++) {
-               if (valid_flag & 1<<j) {
-                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
-                       i++;
-               }
-       }
-}
-
-static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
-               const uint32_t memory_clock,
-               SMU71_Discrete_MCRegisterSet *mc_reg_table_data
-               )
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint32_t i = 0;
-
-       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
-               if (memory_clock <=
-                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
-                       break;
-               }
-       }
-
-       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
-               --i;
-
-       iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
-                               mc_reg_table_data, smu_data->mc_reg_table.last,
-                               smu_data->mc_reg_table.validflag);
-
-       return 0;
-}
-
-static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
-               SMU71_Discrete_MCRegisters *mc_regs)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       int res;
-       uint32_t i;
-
-       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
-               res = iceland_convert_mc_reg_table_entry_to_smc(
-                               hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[i].value,
-                               &mc_regs->data[i]
-                               );
-
-               if (0 != res)
-                       result = res;
-       }
-
-       return result;
-}
-
-static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t address;
-       int32_t result;
-
-       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
-               return 0;
-
-
-       memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
-
-       result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
-
-       if (result != 0)
-               return result;
-
-
-       address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
-
-       return  smu7_copy_bytes_to_smc(hwmgr, address,
-                                (uint8_t *)&smu_data->mc_regs.data[0],
-                               sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
-                               SMC_RAM_END);
-}
-
-static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-
-       memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
-       result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
-       result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize MCRegTable for driver state!", return result;);
-
-       return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
-                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
-}
-
-static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       uint8_t count, level;
-
-       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
-
-       for (level = 0; level < count; level++) {
-               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
-                        >= data->vbios_boot_state.sclk_bootup_value) {
-                       smu_data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
-
-       for (level = 0; level < count; level++) {
-               if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
-                       >= data->vbios_boot_state.mclk_bootup_value) {
-                       smu_data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
-       SMU71_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
-       struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
-       struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
-       const uint16_t *def1, *def2;
-       int i, j, k;
-
-
-       /*
-        * TDP number of fraction bits are changed from 8 to 7 for Iceland
-        * as requested by SMC team
-        */
-
-       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
-       dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
-
-       dpm_table->DTETjOffset = 0;
-
-       dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
-       dpm_table->GpuTjHyst = 8;
-
-       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
-       /* The following are for new Iceland Multi-input fan/thermal control */
-       if (NULL != ppm) {
-               dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
-               dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
-       } else {
-               dpm_table->PPM_PkgPwrLimit = 0;
-               dpm_table->PPM_TemperatureLimit = 0;
-       }
-
-       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
-       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
-
-       dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
-       def1 = defaults->bapmti_r;
-       def2 = defaults->bapmti_rc;
-
-       for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
-               for (j = 0; j < SMU71_DTE_SOURCES; j++) {
-                       for (k = 0; k < SMU71_DTE_SINKS; k++) {
-                               dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
-                               dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
-                               def1++;
-                               def2++;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
-                                           SMU71_Discrete_DpmTable *tab)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
-               tab->SVI2Enable |= VDDC_ON_SVI2;
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-               tab->SVI2Enable |= VDDCI_ON_SVI2;
-       else
-               tab->MergedVddci = 1;
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
-               tab->SVI2Enable |= MVDD_ON_SVI2;
-
-       PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
-               (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
-
-       return 0;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    pInput  the pointer to input data (PowerState)
- * @return   always 0
- */
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       SMU71_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
-
-
-       iceland_initialize_power_tune_defaults(hwmgr);
-       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
-               iceland_populate_smc_voltage_tables(hwmgr, table);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-
-       if (data->ulv_supported) {
-               result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
-               PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ULV state!", return result;);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_ULV_PARAMETER, 0x40035);
-       }
-
-       result = iceland_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Link Level!", return result;);
-
-       result = iceland_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Graphics Level!", return result;);
-
-       result = iceland_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Memory Level!", return result;);
-
-       result = iceland_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize ACPI Level!", return result;);
-
-       result = iceland_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize VCE Level!", return result;);
-
-       result = iceland_populate_smc_acp_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize ACP Level!", return result;);
-
-       result = iceland_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result;);
-
-       /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
-       /* need to populate the  ARB settings for the initial state. */
-       result = iceland_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to Write ARB settings for the initial state.", return result;);
-
-       result = iceland_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize UVD Level!", return result;);
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       result = iceland_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize Boot Level!", return result;);
-
-       result = iceland_populate_smc_initial_state(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
-
-       result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
-
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-
-       table->TemperatureLimitHigh =
-               (data->thermal_temp_setting.temperature_high *
-                SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       table->TemperatureLimitLow =
-               (data->thermal_temp_setting.temperature_low *
-               SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
-       table->MemoryVoltageChangeEnable  = 1;
-       table->MemoryInterval  = 1;
-       table->VoltageResponseTime  = 0;
-       table->PhaseResponseTime  = 0;
-       table->MemoryThermThrottleEnable  = 1;
-       table->PCIeBootLinkLevel = 0;
-       table->PCIeGenInterval = 1;
-
-       result = iceland_populate_smc_svi2_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to populate SVI2 setting!", return result);
-
-       table->ThermGpio  = 17;
-       table->SclkStepSize = 0x4000;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
-       table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
-       table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
-                                                                               offsetof(SMU71_Discrete_DpmTable, SystemFlags),
-                                                                               (uint8_t *)&(table->SystemFlags),
-                                                                               sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
-                                                                               SMC_RAM_END);
-
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to upload dpm data to SMC memory!", return result;);
-
-       /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
-       result = smu7_copy_bytes_to_smc(hwmgr,
-                       smu_data->smu7_data.ulv_setting_starts,
-                       (uint8_t *)&(smu_data->ulv_setting),
-                       sizeof(SMU71_Discrete_Ulv),
-                       SMC_RAM_END);
-
-
-       result = iceland_populate_initial_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((0 == result),
-               "Failed to populate initialize MC Reg table!", return result);
-
-       result = iceland_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to  populate PM fuses to SMC memory!", return result);
-
-       return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-       SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
-               return 0;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       if (0 == smu7_data->fan_table_start) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (0 == duty100) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = smu7_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       /* fan_table.FanControl_GL_Flag = 1; */
-
-       res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
-
-       return 0;
-}
-
-
-static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return iceland_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU71_Discrete_DpmTable,
-                                       LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               SMC_RAM_END);
-       }
-
-       result = iceland_update_and_upload_mc_reg_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
-
-       result = iceland_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((result == 0),
-                       "Failed to program memory timing parameters!",
-                       );
-
-       return result;
-}
-
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
-{
-       switch (type) {
-       case SMU_SoftRegisters:
-               switch (member) {
-               case HandshakeDisables:
-                       return offsetof(SMU71_SoftRegisters, HandshakeDisables);
-               case VoltageChangeTimeout:
-                       return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
-               case AverageGraphicsActivity:
-                       return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
-               case PreVBlankGap:
-                       return offsetof(SMU71_SoftRegisters, PreVBlankGap);
-               case VBlankTimeout:
-                       return offsetof(SMU71_SoftRegisters, VBlankTimeout);
-               case UcodeLoadStatus:
-                       return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
-               }
-       case SMU_Discrete_DpmTable:
-               switch (member) {
-               case LowSclkInterruptThreshold:
-                       return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
-               }
-       }
-       pr_warn("can't get the offset of type %x member %x\n", type, member);
-       return 0;
-}
-
-uint32_t iceland_get_mac_definition(uint32_t value)
-{
-       switch (value) {
-       case SMU_MAX_LEVELS_GRAPHICS:
-               return SMU71_MAX_LEVELS_GRAPHICS;
-       case SMU_MAX_LEVELS_MEMORY:
-               return SMU71_MAX_LEVELS_MEMORY;
-       case SMU_MAX_LEVELS_LINK:
-               return SMU71_MAX_LEVELS_LINK;
-       case SMU_MAX_ENTRIES_SMIO:
-               return SMU71_MAX_ENTRIES_SMIO;
-       case SMU_MAX_LEVELS_VDDC:
-               return SMU71_MAX_LEVELS_VDDC;
-       case SMU_MAX_LEVELS_VDDCI:
-               return SMU71_MAX_LEVELS_VDDCI;
-       case SMU_MAX_LEVELS_MVDD:
-               return SMU71_MAX_LEVELS_MVDD;
-       }
-
-       pr_warn("can't get the mac of %x\n", value);
-       return 0;
-}
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, DpmTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               smu7_data->dpm_table_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, SoftRegisters),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               data->soft_regs_start = tmp;
-               smu7_data->soft_regs_start = tmp;
-       }
-
-       error |= (0 != result);
-
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, mcRegisterTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               smu7_data->mc_reg_table_start = tmp;
-       }
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, FanTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               smu7_data->fan_table_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               smu7_data->arb_table_start = tmp;
-       }
-
-       error |= (0 != result);
-
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, Version),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               hwmgr->microcode_version_info.SMC = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU71_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU71_Firmware_Header, UlvSettings),
-                               &tmp, SMC_RAM_END);
-
-       if (0 == result) {
-               smu7_data->ulv_setting_starts = tmp;
-       }
-
-       error |= (0 != result);
-
-       return error ? 1 : 0;
-}
-
-/*---------------------------MC----------------------------*/
-
-static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
-       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
-{
-       bool result = true;
-
-       switch (in_reg) {
-       case  mmMC_SEQ_RAS_TIMING:
-               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
-               break;
-
-       case  mmMC_SEQ_DLL_STBY:
-               *out_reg = mmMC_SEQ_DLL_STBY_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD0:
-               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD1:
-               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CTRL:
-               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
-               break;
-
-       case mmMC_SEQ_CAS_TIMING:
-               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING:
-               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING2:
-               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CMD:
-               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CTL:
-               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D0:
-               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D1:
-               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D0:
-               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D1:
-               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
-               break;
-
-       case mmMC_PMG_CMD_EMRS:
-               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS1:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
-               break;
-
-       case mmMC_SEQ_PMG_TIMING:
-               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS2:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_2:
-               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
-               break;
-
-       default:
-               result = false;
-               break;
-       }
-
-       return result;
-}
-
-static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
-{
-       uint32_t i;
-       uint16_t address;
-
-       for (i = 0; i < table->last; i++) {
-               table->mc_reg_address[i].s0 =
-                       iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
-                       ? address : table->mc_reg_address[i].s1;
-       }
-       return 0;
-}
-
-static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
-                                       struct iceland_mc_reg_table *ni_table)
-{
-       uint8_t i, j;
-
-       PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-               "Invalid VramInfo table.", return -EINVAL);
-       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
-               "Invalid VramInfo table.", return -EINVAL);
-
-       for (i = 0; i < table->last; i++) {
-               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-       }
-       ni_table->last = table->last;
-
-       for (i = 0; i < table->num_entries; i++) {
-               ni_table->mc_reg_table_entry[i].mclk_max =
-                       table->mc_reg_table_entry[i].mclk_max;
-               for (j = 0; j < table->last; j++) {
-                       ni_table->mc_reg_table_entry[i].mc_data[j] =
-                               table->mc_reg_table_entry[i].mc_data[j];
-               }
-       }
-
-       ni_table->num_entries = table->num_entries;
-
-       return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to  mmMC_PMG_CMD_EMRS /_LP[15:0].
- *      Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3.   need to set these data for each clock range
- *
- * @param    hwmgr the address of the powerplay hardware manager.
- * @param    table the address of MCRegTable
- * @return   always 0
- */
-static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
-                                       struct iceland_mc_reg_table *table)
-{
-       uint8_t i, j, k;
-       uint32_t temp_reg;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       for (i = 0, j = table->last; i < table->last; i++) {
-               PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                       "Invalid VramInfo table.", return -EINVAL);
-
-               switch (table->mc_reg_address[i].s1) {
-
-               case mmMC_SEQ_MISC1:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       ((temp_reg & 0xffff0000)) |
-                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
-                               if (!data->is_memory_gddr5) {
-                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
-                               }
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-
-                       if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
-                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
-                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
-                               for (k = 0; k < table->num_entries; k++) {
-                                       table->mc_reg_table_entry[k].mc_data[j] =
-                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
-                               }
-                               j++;
-                               PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                                       "Invalid VramInfo table.", return -EINVAL);
-                       }
-
-                       break;
-
-               case mmMC_SEQ_RESERVE_M:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-                       break;
-
-               default:
-                       break;
-               }
-
-       }
-
-       table->last = j;
-
-       return 0;
-}
-
-static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
-{
-       uint8_t i, j;
-       for (i = 0; i < table->last; i++) {
-               for (j = 1; j < table->num_entries; j++) {
-                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
-                               table->mc_reg_table_entry[j].mc_data[i]) {
-                               table->validflag |= (1<<i);
-                               break;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
-       pp_atomctrl_mc_reg_table *table;
-       struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
-       uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
-
-       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       /* Program additional LP registers that are no longer programmed by VBIOS */
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
-       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
-       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
-       if (0 == result)
-               result = iceland_copy_vbios_smc_reg_table(table, ni_table);
-
-       if (0 == result) {
-               iceland_set_s0_mc_reg_index(ni_table);
-               result = iceland_set_mc_special_registers(hwmgr, ni_table);
-       }
-
-       if (0 == result)
-               iceland_set_valid_flag(ni_table);
-
-       kfree(table);
-
-       return result;
-}
-
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-                       ? true : false;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
deleted file mode 100644 (file)
index 13c8dbb..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _ICELAND_SMC_H
-#define _ICELAND_SMC_H
-
-#include "smumgr.h"
-
-
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
-uint32_t iceland_get_mac_definition(uint32_t value);
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
-#endif
-
index a778e174ba01237b71928a61f9b4765b3a0fbb99..34128822b8fbd22086adcf45ce7501f5aeb4e6c8 100644 (file)
 
 #include "smumgr.h"
 #include "iceland_smumgr.h"
-#include "smu_ucode_xfer_vi.h"
+
 #include "ppsmc.h"
+
+#include "cgs_common.h"
+
+#include "smu7_dyn_defaults.h"
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "processpptables.h"
+
+
 #include "smu/smu_7_1_1_d.h"
 #include "smu/smu_7_1_1_sh_mask.h"
-#include "cgs_common.h"
-#include "iceland_smc.h"
+#include "smu71_discrete.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
 
 #define ICELAND_SMC_SIZE               0x20000
 
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define VDDC_VDDCI_DELTA            200
+
+#define DEVICE_ID_VI_ICELAND_M_6900    0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901    0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902    0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903    0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+       /*
+        * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+        * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+        */
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+       { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+       /*
+        * sviLoadLIneEn, SviLoadLineVddC,
+        * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+        * BAPM_TEMP_GRADIENT
+        */
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+       { 0xA7,  0x0, 0x0, 0xB5,  0x0, 0x0, 0x9F,  0x0, 0x0, 0xD6,  0x0, 0x0, 0xD7,  0x0, 0x0},
+       { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+       /*
+        * sviLoadLIneEn, SviLoadLineVddC,
+        * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+        * BAPM_TEMP_GRADIENT
+        */
+       1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+       { 0xB7,  0x0, 0x0, 0xC3,  0x0, 0x0, 0xB5,  0x0, 0x0, 0xEA,  0x0, 0x0, 0xE6,  0x0, 0x0},
+       { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
 static int iceland_start_smc(struct pp_hwmgr *hwmgr)
 {
        PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -191,13 +260,6 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
        return result;
 }
 
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    smcAddress the address in the SMC RAM to access.
- * @param    value to write to the SMC SRAM.
- */
 static int iceland_smu_init(struct pp_hwmgr *hwmgr)
 {
        int i;
@@ -219,6 +281,2413 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       struct cgs_system_info sys_info = {0};
+       uint32_t dev_id;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       dev_id = (uint32_t)sys_info.value;
+
+       switch (dev_id) {
+       case DEVICE_ID_VI_ICELAND_M_6900:
+       case DEVICE_ID_VI_ICELAND_M_6903:
+               smu_data->power_tune_defaults = &defaults_icelandxt;
+               break;
+
+       case DEVICE_ID_VI_ICELAND_M_6901:
+       case DEVICE_ID_VI_ICELAND_M_6902:
+               smu_data->power_tune_defaults = &defaults_icelandpro;
+               break;
+       default:
+               smu_data->power_tune_defaults = &defaults_iceland;
+               pr_warn("Unknown V.I. Device ID.\n");
+               break;
+       }
+       return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->tdc_vddc_throttle_release_limit_perc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+       return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr,
+                       fuse_table_offset +
+                       offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+       return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 8; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+       HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+       return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+       uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+                           "The CAC Leakage table does not exist!", return -EINVAL);
+       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+                           "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+       PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+                           "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+               for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+                       lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+                       hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+               }
+       } else {
+               PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+       }
+
+       return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint8_t *vid = smu_data->power_tune_table.VddCVid;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+               "There should never be more than 8 entries for VddcVid!!!",
+               return -EINVAL);
+
+       for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+               vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+       }
+
+       return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint32_t pm_fuse_table_offset;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to get pm_fuse_table_offset Failed!",
+                                       return -EINVAL);
+
+               /* DW0 - DW3 */
+               if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate bapm vddc vid Failed!",
+                                       return -EINVAL);
+
+               /* DW4 - DW5 */
+               if (iceland_populate_vddc_vid(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate vddc vid Failed!",
+                                       return -EINVAL);
+
+               /* DW6 */
+               if (iceland_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate SviLoadLine Failed!",
+                                       return -EINVAL);
+               /* DW7 */
+               if (iceland_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
+               /* DW8 */
+               if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TdcWaterfallCtl, "
+                                       "LPMLTemperature Min and Max Failed!",
+                                       return -EINVAL);
+
+               /* DW9-DW12 */
+               if (0 != iceland_populate_temperature_scaler(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate LPMLTemperatureScaler Failed!",
+                                       return -EINVAL);
+
+               /* DW13-DW16 */
+               if (iceland_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Failed!",
+                                       return -EINVAL);
+
+               /* DW18 */
+               if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+                                       return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed!",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+       struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+       uint32_t clock, uint32_t *vol)
+{
+       uint32_t i = 0;
+
+       /* clock - voltage dependency table is empty table */
+       if (allowed_clock_voltage_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+                       *vol = allowed_clock_voltage_table->entries[i].v;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+       return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+               pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+               uint16_t *lo)
+{
+       uint16_t v_index;
+       bool vol_found = false;
+       *hi = tab->value * VOLTAGE_SCALE;
+       *lo = tab->value * VOLTAGE_SCALE;
+
+       /* SCLK/VDDC Dependency Table has to exist. */
+       PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+                       "The SCLK/VDDC Dependency Table does not exist.\n",
+                       return -EINVAL);
+
+       if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+               pr_warn("CAC Leakage Table does not exist, using vddc.\n");
+               return 0;
+       }
+
+       /*
+        * Since voltage in the sclk/vddc dependency table is not
+        * necessarily in ascending order because of ELB voltage
+        * patching, loop through entire list to find exact voltage.
+        */
+       for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+               if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+                       vol_found = true;
+                       if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+                       } else {
+                               pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+                               *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+                               *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+                       }
+                       break;
+               }
+       }
+
+       /*
+        * If voltage is not found in the first pass, loop again to
+        * find the best match, equal or higher value.
+        */
+       if (!vol_found) {
+               for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+                       if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+                               vol_found = true;
+                               if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+                               } else {
+                                       pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+                                       *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+                                       *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+                               }
+                               break;
+                       }
+               }
+
+               if (!vol_found)
+                       pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+       }
+
+       return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+               pp_atomctrl_voltage_table_entry *tab,
+               SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+       int result;
+
+       result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+                       &smc_voltage_tab->StdVoltageHiSidd,
+                       &smc_voltage_tab->StdVoltageLoSidd);
+       if (0 != result) {
+               smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+               smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+       }
+
+       smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+       CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+       return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->VddcLevelCount = data->vddc_voltage_table.count;
+       for (count = 0; count < table->VddcLevelCount; count++) {
+               result = iceland_populate_smc_voltage_table(hwmgr,
+                               &(data->vddc_voltage_table.entries[count]),
+                               &(table->VddcLevel[count]));
+               PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+               /* GPIO voltage control */
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+                       table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+                       table->VddcLevel[count].Smio = 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+       return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+       int result;
+
+       table->VddciLevelCount = data->vddci_voltage_table.count;
+
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               result = iceland_populate_smc_voltage_table(hwmgr,
+                               &(data->vddci_voltage_table.entries[count]),
+                               &(table->VddciLevel[count]));
+               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+               else
+                       table->VddciLevel[count].Smio |= 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+       return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+       int result;
+
+       table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               result = iceland_populate_smc_voltage_table(hwmgr,
+                               &(data->mvdd_voltage_table.entries[count]),
+                               &table->MvddLevel[count]);
+               PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+                       table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+               else
+                       table->MvddLevel[count].Smio |= 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+       return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+       SMU71_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = iceland_populate_smc_vddc_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+       result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+       result = iceland_populate_smc_mvdd_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+       return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU71_Discrete_Ulv *state)
+{
+       uint32_t voltage_response_time, ulv_voltage;
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+       PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+       if (ulv_voltage == 0) {
+               data->ulv_supported = false;
+               return 0;
+       }
+
+       if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+                       state->VddcOffset = 0;
+               else
+                       /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+                       state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+       } else {
+               /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+               if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+                       state->VddcOffsetVid = 0;
+               else  /* used in SVI2 Mode */
+                       state->VddcOffsetVid = (uint8_t)(
+                                       (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+                                               * VOLTAGE_VID_OFFSET_SCALE2
+                                               / VOLTAGE_VID_OFFSET_SCALE1);
+       }
+       state->VddcPhase = 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+                SMU71_Discrete_Ulv *ulv_level)
+{
+       return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint32_t i;
+
+       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount =
+                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity =
+                       1;
+               table->LinkLevel[i].SPC =
+                       (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold =
+                       PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold =
+                       PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+               (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t    reference_clock;
+       uint32_t reference_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+       reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+       reference_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider*/
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup*/
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               pp_atomctrl_internal_ss_info ss_info;
+
+               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+                       /*
+                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+                       * ss_info.speed_spectrum_rate -- in unit of khz
+                       */
+                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+                       /* clkv = 2 * D * fbdiv / NS */
+                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+               }
+       }
+
+       sclk->SclkFrequency        = engine_clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+                               const struct phm_phase_shedding_limits_table *pl,
+                                       uint32_t sclk, uint32_t *p_shed)
+{
+       unsigned int i;
+
+       /* use the minimum phase shedding */
+       *p_shed = 1;
+
+       for (i = 0; i < pl->count; i++) {
+               if (sclk < pl->entries[i].Sclk) {
+                       *p_shed = i;
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+                                               uint32_t engine_clock,
+                               uint16_t sclk_activity_level_threshold,
+                               SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+       /* populate graphics levels*/
+       result = iceland_get_dependency_volt_by_clk(hwmgr,
+               hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+               &graphic_level->MinVddc);
+       PP_ASSERT_WITH_CODE((0 == result),
+               "can not find VDDC voltage value for VDDC       \
+               engine clock dependency table", return result);
+
+       /* SCLK frequency in units of 10KHz*/
+       graphic_level->SclkFrequency = engine_clock;
+       graphic_level->MinVddcPhases = 1;
+
+       if (data->vddc_phase_shed_control)
+               iceland_populate_phase_value_based_on_sclk(hwmgr,
+                               hwmgr->dyn_state.vddc_phase_shed_limits_table,
+                               engine_clock,
+                               &graphic_level->MinVddcPhases);
+
+       /* Indicates maximum activity level for this performance level. 50% for now*/
+       graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+       graphic_level->CcPwrDynRm = 0;
+       graphic_level->CcPwrDynRm1 = 0;
+       /* this level can be used if activity is high enough.*/
+       graphic_level->EnabledForActivity = 0;
+       /* this level can be used for throttling.*/
+       graphic_level->EnabledForThrottle = 1;
+       graphic_level->UpHyst = 0;
+       graphic_level->DownHyst = 100;
+       graphic_level->VoltageDownHyst = 0;
+       graphic_level->PowerThrottle = 0;
+
+       data->display_timing.min_clock_in_sr =
+                       hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep))
+               graphic_level->DeepSleepDivId =
+                               smu7_get_sleep_divider_id_from_clock(engine_clock,
+                                               data->display_timing.min_clock_in_sr);
+
+       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       if (0 == result) {
+               graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+       }
+
+       return result;
+}
+
+static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+       uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+                                               SMU71_MAX_LEVELS_GRAPHICS;
+
+       SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+       uint32_t i;
+       uint8_t highest_pcie_level_enabled = 0;
+       uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+       uint8_t count = 0;
+       int result = 0;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = iceland_populate_single_graphic_level(hwmgr,
+                                       dpm_table->sclk_table.dpm_levels[i].value,
+                                       (uint16_t)smu_data->activity_target[i],
+                                       &(smu_data->smc_state_table.GraphicsLevel[i]));
+               if (result != 0)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+       }
+
+       /* Only enable level 0 for now. */
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+       /* set highest level watermark to high */
+       if (dpm_table->sclk_table.count > 1)
+               smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+               (uint8_t)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                               (1 << (highest_pcie_level_enabled + 1))) != 0) {
+               highest_pcie_level_enabled++;
+       }
+
+       while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+               (1 << lowest_pcie_level_enabled)) == 0) {
+               lowest_pcie_level_enabled++;
+       }
+
+       while ((count < highest_pcie_level_enabled) &&
+                       ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+               count++;
+       }
+
+       mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+               (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+       /* set pcieDpmLevel to highest_pcie_level_enabled*/
+       for (i = 2; i < dpm_table->sclk_table.count; i++) {
+               smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+       }
+
+       /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+       smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+       /* set pcieDpmLevel to mid_pcie_level_enabled*/
+       smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
+                               (uint8_t *)levels, (uint32_t)level_array_size,
+                                                               SMC_RAM_END);
+
+       return result;
+}
+
+static int iceland_calculate_mclk_params(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU71_Discrete_MemoryLevel *mclk,
+               bool strobe_mode,
+               bool dllStateOn
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
+       uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+       uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+       uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+       uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+       uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+       uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+       uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
+       uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+       pp_atomctrl_memory_clock_param mpll_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+                               memory_clock, &mpll_param, strobe_mode);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+       /* MPLL_FUNC_CNTL setup*/
+       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+       /* MPLL_FUNC_CNTL_1 setup*/
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                                       MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+       /* MPLL_AD_FUNC_CNTL setup*/
+       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+                                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+       if (data->is_memory_gddr5) {
+               /* MPLL_DQ_FUNC_CNTL setup*/
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+               /*
+                ************************************
+                Fref = Reference Frequency
+                NF = Feedback divider ratio
+                NR = Reference divider ratio
+                Fnom = Nominal VCO output frequency = Fref * NF / NR
+                Fs = Spreading Rate
+                D = Percentage down-spread / 2
+                Fint = Reference input frequency to PFD = Fref / NR
+                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+                *************************************
+                */
+               pp_atomctrl_internal_ss_info ss_info;
+               uint32_t freq_nom;
+               uint32_t tmp;
+               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+               /* for GDDR5 for all modes and DDR3 */
+               if (1 == mpll_param.qdr)
+                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+               else
+                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+               tmp = (freq_nom / reference_clock);
+               tmp = tmp * tmp;
+
+               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
+                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+                       /*     = reference_clock * 5 / speed_spectrum_rate */
+                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+                       uint32_t clkv =
+                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+               }
+       }
+
+       /* MCLK_PWRMGT_CNTL setup */
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+       /* Save the result data to outpupt memory level structure */
+       mclk->MclkFrequency   = memory_clock;
+       mclk->MpllFuncCntl    = mpll_func_cntl;
+       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+       mclk->DllCntl         = dll_cntl;
+       mclk->MpllSs1         = mpll_ss1;
+       mclk->MpllSs2         = mpll_ss2;
+
+       return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+               bool strobe_mode)
+{
+       uint8_t mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500) {
+                       mc_para_index = 0x00;
+               } else if (memory_clock > 47500) {
+                       mc_para_index = 0x0f;
+               } else {
+                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+               }
+       } else {
+               if (memory_clock < 65000) {
+                       mc_para_index = 0x00;
+               } else if (memory_clock > 135000) {
+                       mc_para_index = 0x0f;
+               } else {
+                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+               }
+       }
+
+       return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+       uint8_t mc_para_index;
+
+       if (memory_clock < 10000) {
+               mc_para_index = 0;
+       } else if (memory_clock >= 80000) {
+               mc_para_index = 0x0f;
+       } else {
+               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+       }
+
+       return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+                                       uint32_t memory_clock, uint32_t *p_shed)
+{
+       unsigned int i;
+
+       *p_shed = 1;
+
+       for (i = 0; i < pl->count; i++) {
+               if (memory_clock < pl->entries[i].Mclk) {
+                       *p_shed = i;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_populate_single_memory_level(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU71_Discrete_MemoryLevel *memory_level
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int result = 0;
+       bool dll_state_on;
+       struct cgs_display_info info = {0};
+       uint32_t mclk_edc_wr_enable_threshold = 40000;
+       uint32_t mclk_edc_enable_threshold = 40000;
+       uint32_t mclk_strobe_mode_threshold = 40000;
+
+       if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+               result = iceland_get_dependency_volt_by_clk(hwmgr,
+                       hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+       }
+
+       if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+               memory_level->MinVddci = memory_level->MinVddc;
+       } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+               result = iceland_get_dependency_volt_by_clk(hwmgr,
+                               hwmgr->dyn_state.vddci_dependency_on_mclk,
+                               memory_clock,
+                               &memory_level->MinVddci);
+               PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+       }
+
+       memory_level->MinVddcPhases = 1;
+
+       if (data->vddc_phase_shed_control) {
+               iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+                               memory_clock, &memory_level->MinVddcPhases);
+       }
+
+       memory_level->EnabledForThrottle = 1;
+       memory_level->EnabledForActivity = 0;
+       memory_level->UpHyst = 0;
+       memory_level->DownHyst = 100;
+       memory_level->VoltageDownHyst = 0;
+
+       /* Indicates maximum activity level for this performance level.*/
+       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       memory_level->StutterEnable = 0;
+       memory_level->StrobeEnable = 0;
+       memory_level->EdcReadEnable = 0;
+       memory_level->EdcWriteEnable = 0;
+       memory_level->RttEnable = 0;
+
+       /* default set to low watermark. Highest level will be set to high later.*/
+       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       data->display_timing.num_existing_displays = info.display_count;
+
+       /* stutter mode not support on iceland */
+
+       /* decide strobe mode*/
+       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+               (memory_clock <= mclk_strobe_mode_threshold);
+
+       /* decide EDC mode and memory clock ratio*/
+       if (data->is_memory_gddr5) {
+               memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+                                       memory_level->StrobeEnable);
+
+               if ((mclk_edc_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_enable_threshold)) {
+                       memory_level->EdcReadEnable = 1;
+               }
+
+               if ((mclk_edc_wr_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_wr_enable_threshold)) {
+                       memory_level->EdcWriteEnable = 1;
+               }
+
+               if (memory_level->StrobeEnable) {
+                       if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+                       else
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+               } else
+                       dll_state_on = data->dll_default_on;
+       } else {
+               memory_level->StrobeRatio =
+                       iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+       }
+
+       result = iceland_calculate_mclk_params(hwmgr,
+               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+       if (0 == result) {
+               memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+               memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+               memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+               /* MCLK frequency in units of 10KHz*/
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+               /* Indicates maximum activity level for this performance level.*/
+               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+       }
+
+       return result;
+}
+
+static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+       uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+       SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                       "can not populate memory level as memory clock is zero", return -EINVAL);
+               result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+                       &(smu_data->smc_state_table.MemoryLevel[i]));
+               if (0 != result) {
+                       return result;
+               }
+       }
+
+       /* Only enable level 0 for now.*/
+       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+       /*
+       * in order to prevent MC activity from stutter mode to push DPM up.
+       * the UVD change complements this by putting the MCLK in a higher state
+       * by default such that we are not effected by up threshold or and MCLK DPM latency.
+       */
+       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       /* set highest level watermark to high*/
+       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr,
+               level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+               SMC_RAM_END);
+
+       return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+                                       SMU71_Discrete_VoltageLevel *voltage)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+                       if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+                               /* Always round to higher voltage. */
+                               voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+
+               PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+                       "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+       SMU71_Discrete_DpmTable *table)
+{
+       int result = 0;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t vddc_phase_shed_control = 0;
+
+       SMU71_Discrete_VoltageLevel voltage_level;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+       /* The ACPI state should not do DPM on DC (or ever).*/
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (data->acpi_vddc)
+               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+       else
+               table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+       table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+       /* assign zero for now*/
+       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+               table->ACPILevel.SclkFrequency,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* divider ID for required SCLK*/
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+                                                       CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
+       spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
+                                                       CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
+       spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
+                                                       CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+       /* For various features to be enabled/disabled while this level is active.*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       /* SCLK frequency in units of 10KHz*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+       table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+       table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+       else {
+               if (data->acpi_vddci != 0)
+                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+               else
+                       table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+       }
+
+       if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+               table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       /* Force reset on DLL*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+       /* Disable DLL in ACPIState*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+       /* Enable DLL bypass signal*/
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK0_BYPASS, 0);
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK1_BYPASS, 0);
+
+       table->MemoryACPILevel.DllCntl            =
+               PP_HOST_TO_SMC_UL(dll_cntl);
+       table->MemoryACPILevel.MclkPwrmgtCntl     =
+               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+       table->MemoryACPILevel.MpllAdFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+       table->MemoryACPILevel.MpllDqFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl       =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl_1     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+       table->MemoryACPILevel.MpllFuncCntl_2     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+       table->MemoryACPILevel.MpllSs1            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+       table->MemoryACPILevel.MpllSs2            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       /* Indicates maximum activity level for this performance level.*/
+       table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = 0;
+       table->MemoryACPILevel.StrobeEnable = 0;
+       table->MemoryACPILevel.EdcReadEnable = 0;
+       table->MemoryACPILevel.EdcWriteEnable = 0;
+       table->MemoryACPILevel.RttEnable = 0;
+
+       return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+                                       SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+               SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+       SMU71_Discrete_DpmTable *table)
+{
+       return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+               struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock,
+               uint32_t memory_clock,
+               struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+               )
+{
+       uint32_t dramTiming;
+       uint32_t dramTiming2;
+       uint32_t burstTime;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                               engine_clock, memory_clock);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+       arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+       return 0;
+}
+
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       int result = 0;
+       SMU71_Discrete_MCArbDramTimingTable  arb_regs;
+       uint32_t i, j;
+
+       memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = iceland_populate_memory_timing_parameters
+                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+                                data->dpm_table.mclk_table.dpm_levels[j].value,
+                                &arb_regs.entries[i][j]);
+
+                       if (0 != result) {
+                               break;
+                       }
+               }
+       }
+
+       if (0 == result) {
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU71_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END
+                               );
+       }
+
+       return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+                       SMU71_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table*/
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+       if (0 != result) {
+               smu_data->smc_state_table.GraphicsBootLevel = 0;
+               pr_err("VBIOS did not find boot engine clock value \
+                       in dependency table. Using Graphics DPM level 0!");
+               result = 0;
+       }
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+               data->vbios_boot_state.mclk_bootup_value,
+               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+       if (0 != result) {
+               smu_data->smc_state_table.MemoryBootLevel = 0;
+               pr_err("VBIOS did not find boot engine clock value \
+                       in dependency table. Using Memory DPM level 0!");
+               result = 0;
+       }
+
+       table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               table->BootVddci = table->BootVddc;
+       else
+               table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+       return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+                                SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+       const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
+
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+               if (smu_data->mc_reg_table.validflag & 1<<j) {
+                       PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+                               "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+                       mc_reg_table->address[i].s0 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+
+       mc_reg_table->last = (uint8_t)i;
+
+       return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+       const struct iceland_mc_reg_entry *entry,
+       SMU71_Discrete_MCRegisterSet *data,
+       uint32_t num_entries, uint32_t valid_flag)
+{
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & 1<<j) {
+                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
+               const uint32_t memory_clock,
+               SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+               )
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint32_t i = 0;
+
+       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+               if (memory_clock <=
+                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+                       break;
+               }
+       }
+
+       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, smu_data->mc_reg_table.last,
+                               smu_data->mc_reg_table.validflag);
+
+       return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+               SMU71_Discrete_MCRegisters *mc_regs)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int res;
+       uint32_t i;
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               res = iceland_convert_mc_reg_table_entry_to_smc(
+                               hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[i].value,
+                               &mc_regs->data[i]
+                               );
+
+               if (0 != res)
+                       result = res;
+       }
+
+       return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t address;
+       int32_t result;
+
+       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+               return 0;
+
+
+       memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+       result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+       if (result != 0)
+               return result;
+
+
+       address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+       return  smu7_copy_bytes_to_smc(hwmgr, address,
+                                (uint8_t *)&smu_data->mc_regs.data[0],
+                               sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+                               SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+       memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+       result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+       result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize MCRegTable for driver state!", return result;);
+
+       return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       uint8_t count, level;
+
+       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+                        >= data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+                       >= data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU71_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+       struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+       struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+       const uint16_t *def1, *def2;
+       int i, j, k;
+
+
+       /*
+        * TDP number of fraction bits are changed from 8 to 7 for Iceland
+        * as requested by SMC team
+        */
+
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+       dpm_table->DTETjOffset = 0;
+
+       dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+       /* The following are for new Iceland Multi-input fan/thermal control */
+       if (NULL != ppm) {
+               dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+               dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+       } else {
+               dpm_table->PPM_PkgPwrLimit = 0;
+               dpm_table->PPM_TemperatureLimit = 0;
+       }
+
+       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+       CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+       dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+       def1 = defaults->bapmti_r;
+       def2 = defaults->bapmti_rc;
+
+       for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU71_DTE_SINKS; k++) {
+                               dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+                               dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+                               def1++;
+                               def2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+                                           SMU71_Discrete_DpmTable *tab)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+               tab->SVI2Enable |= VDDC_ON_SVI2;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+               tab->SVI2Enable |= VDDCI_ON_SVI2;
+       else
+               tab->MergedVddci = 1;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+               tab->SVI2Enable |= MVDD_ON_SVI2;
+
+       PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+               (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+       return 0;
+}
+
+static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       SMU71_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+
+
+       iceland_initialize_power_tune_defaults(hwmgr);
+       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+               iceland_populate_smc_voltage_tables(hwmgr, table);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+       if (data->ulv_supported) {
+               result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+               PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ULV state!", return result;);
+
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = iceland_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Link Level!", return result;);
+
+       result = iceland_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Graphics Level!", return result;);
+
+       result = iceland_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Memory Level!", return result;);
+
+       result = iceland_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize ACPI Level!", return result;);
+
+       result = iceland_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize VCE Level!", return result;);
+
+       result = iceland_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize ACP Level!", return result;);
+
+       result = iceland_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize SAMU Level!", return result;);
+
+       /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+       /* need to populate the  ARB settings for the initial state. */
+       result = iceland_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to Write ARB settings for the initial state.", return result;);
+
+       result = iceland_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize UVD Level!", return result;);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       result = iceland_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to initialize Boot Level!", return result;);
+
+       result = iceland_populate_smc_initial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+       result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+
+       table->TemperatureLimitHigh =
+               (data->thermal_temp_setting.temperature_high *
+                SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       table->TemperatureLimitLow =
+               (data->thermal_temp_setting.temperature_low *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+       table->MemoryVoltageChangeEnable  = 1;
+       table->MemoryInterval  = 1;
+       table->VoltageResponseTime  = 0;
+       table->PhaseResponseTime  = 0;
+       table->MemoryThermThrottleEnable  = 1;
+       table->PCIeBootLinkLevel = 0;
+       table->PCIeGenInterval = 1;
+
+       result = iceland_populate_smc_svi2_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to populate SVI2 setting!", return result);
+
+       table->ThermGpio  = 17;
+       table->SclkStepSize = 0x4000;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+       table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+       table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
+                                                                               offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+                                                                               (uint8_t *)&(table->SystemFlags),
+                                                                               sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+                                                                               SMC_RAM_END);
+
+       PP_ASSERT_WITH_CODE(0 == result,
+               "Failed to upload dpm data to SMC memory!", return result;);
+
+       /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr,
+                       smu_data->smu7_data.ulv_setting_starts,
+                       (uint8_t *)&(smu_data->ulv_setting),
+                       sizeof(SMU71_Discrete_Ulv),
+                       SMC_RAM_END);
+
+
+       result = iceland_populate_initial_mc_reg_table(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == result),
+               "Failed to populate initialize MC Reg table!", return result);
+
+       result = iceland_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+
+       return 0;
+}
+
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+       SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+               return 0;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       if (0 == smu7_data->fan_table_start) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (0 == duty100) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       /* fan_table.FanControl_GL_Flag = 1; */
+
+       res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+       return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return iceland_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU71_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+
+       result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+       result = iceland_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+
+       return result;
+}
+
+static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+               case DRAM_LOG_ADDR_H:
+                       return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_H);
+               case DRAM_LOG_ADDR_L:
+                       return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_L);
+               case DRAM_LOG_PHY_ADDR_H:
+                       return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+               case DRAM_LOG_PHY_ADDR_L:
+                       return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+               case DRAM_LOG_BUFF_SIZE:
+                       return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       pr_warn("can't get the offset of type %x member %x\n", type, member);
+       return 0;
+}
+
+static uint32_t iceland_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU71_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU71_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU71_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU71_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU71_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU71_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU71_MAX_LEVELS_MVDD;
+       }
+
+       pr_warn("can't get the mac of %x\n", value);
+       return 0;
+}
+
+static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, DpmTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->dpm_table_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, SoftRegisters),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               data->soft_regs_start = tmp;
+               smu7_data->soft_regs_start = tmp;
+       }
+
+       error |= (0 != result);
+
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, mcRegisterTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->mc_reg_table_start = tmp;
+       }
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, FanTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->fan_table_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->arb_table_start = tmp;
+       }
+
+       error |= (0 != result);
+
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, Version),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               hwmgr->microcode_version_info.SMC = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU71_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU71_Firmware_Header, UlvSettings),
+                               &tmp, SMC_RAM_END);
+
+       if (0 == result) {
+               smu7_data->ulv_setting_starts = tmp;
+       }
+
+       error |= (0 != result);
+
+       return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+       bool result = true;
+
+       switch (in_reg) {
+       case  mmMC_SEQ_RAS_TIMING:
+               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+               break;
+
+       case  mmMC_SEQ_DLL_STBY:
+               *out_reg = mmMC_SEQ_DLL_STBY_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD0:
+               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD1:
+               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CTRL:
+               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+               break;
+
+       case mmMC_SEQ_CAS_TIMING:
+               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING:
+               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING2:
+               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CMD:
+               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CTL:
+               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D0:
+               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D1:
+               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D0:
+               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D1:
+               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+               break;
+
+       case mmMC_PMG_CMD_EMRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS1:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+
+       case mmMC_SEQ_PMG_TIMING:
+               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS2:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_2:
+               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+               break;
+
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+       uint32_t i;
+       uint16_t address;
+
+       for (i = 0; i < table->last; i++) {
+               table->mc_reg_address[i].s0 =
+                       iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+                       ? address : table->mc_reg_address[i].s1;
+       }
+       return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+                                       struct iceland_mc_reg_table *ni_table)
+{
+       uint8_t i, j;
+
+       PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+               "Invalid VramInfo table.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+               "Invalid VramInfo table.", return -EINVAL);
+
+       for (i = 0; i < table->last; i++) {
+               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+       }
+       ni_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               ni_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       ni_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+
+       ni_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+                                       struct iceland_mc_reg_table *table)
+{
+       uint8_t i, j, k;
+       uint32_t temp_reg;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                       "Invalid VramInfo table.", return -EINVAL);
+
+               switch (table->mc_reg_address[i].s1) {
+
+               case mmMC_SEQ_MISC1:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+                               if (!data->is_memory_gddr5) {
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                               }
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++) {
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               }
+                               j++;
+                               PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                                       "Invalid VramInfo table.", return -EINVAL);
+                       }
+
+                       break;
+
+               case mmMC_SEQ_RESERVE_M:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+                       break;
+
+               default:
+                       break;
+               }
+
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+       uint8_t i, j;
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+                               table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->validflag |= (1<<i);
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+       pp_atomctrl_mc_reg_table *table;
+       struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+       uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       /* Program additional LP registers that are no longer programmed by VBIOS */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+       if (0 == result)
+               result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+       if (0 == result) {
+               iceland_set_s0_mc_reg_index(ni_table);
+               result = iceland_set_mc_special_registers(hwmgr, ni_table);
+       }
+
+       if (0 == result)
+               iceland_set_valid_flag(ni_table);
+
+       kfree(table);
+
+       return result;
+}
+
+static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
+
 const struct pp_smumgr_func iceland_smu_funcs = {
        .smu_init = &iceland_smu_init,
        .smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
deleted file mode 100644 (file)
index c92ea38..0000000
+++ /dev/null
@@ -1,2344 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "polaris10_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "polaris10_smumgr.h"
-#include "pppcielanes.h"
-
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
-#include "smu7_ppsmc.h"
-#include "smu7_smumgr.h"
-
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-#define POWERTUNE_DEFAULT_SET_MAX    1
-#define VDDC_VDDCI_DELTA            200
-#define MC_CG_ARB_FREQ_F1           0x0b
-
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-       /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
-        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
-       { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
-       { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
-       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
-                       {VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
-                       {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
-                       {VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
-                       {VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
-                       {VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
-                       {VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
-                       {VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
-                       {VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
-               uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-       uint32_t i;
-       uint16_t vddci;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       *voltage = *mvdd = 0;
-
-       /* clock - voltage dependency table is empty table */
-       if (dep_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < dep_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (dep_table->entries[i].clk >= clock) {
-                       *voltage |= (dep_table->entries[i].vddc *
-                                       VOLTAGE_SCALE) << VDDC_SHIFT;
-                       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else if (dep_table->entries[i].vddci)
-                               *voltage |= (dep_table->entries[i].vddci *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       else {
-                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               (dep_table->entries[i].vddc -
-                                                               (uint16_t)VDDC_VDDCI_DELTA));
-                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-                       }
-
-                       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
-                                       VOLTAGE_SCALE;
-                       else if (dep_table->entries[i].mvdd)
-                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
-                                       VOLTAGE_SCALE;
-
-                       *voltage |= 1 << PHASES_SHIFT;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
-               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
-                               VOLTAGE_SCALE) << VDDCI_SHIFT;
-       else if (dep_table->entries[i-1].vddci) {
-               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
-                               (dep_table->entries[i].vddc -
-                                               (uint16_t)VDDC_VDDCI_DELTA));
-               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-       }
-
-       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
-               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
-       else if (dep_table->entries[i].mvdd)
-               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
-       return 0;
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
-       uint32_t tmp;
-       tmp = raw_setting * 4096 / 100;
-       return (uint16_t)tmp;
-}
-
-static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-       SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-       struct pp_advance_fan_control_parameters *fan_table =
-                       &hwmgr->thermal_controller.advanceFanControlParameters;
-       int i, j, k;
-       const uint16_t *pdef1;
-       const uint16_t *pdef2;
-
-       table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-       table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
-       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-                               "Target Operating Temp is out of Range!",
-                               );
-
-       table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTargetOperatingTemp * 256);
-       table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
-                       cac_dtp_table->usTemperatureLimitHotspot * 256);
-       table->FanGainEdge = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainEdge));
-       table->FanGainHotspot = PP_HOST_TO_SMC_US(
-                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
-       pdef1 = defaults->BAPMTI_R;
-       pdef2 = defaults->BAPMTI_RC;
-
-       for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
-               for (j = 0; j < SMU74_DTE_SOURCES; j++) {
-                       for (k = 0; k < SMU74_DTE_SINKS; k++) {
-                               table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
-                               table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
-                               pdef1++;
-                               pdef2++;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
-       smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
-       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
-       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-       return 0;
-}
-
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
-       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
-       smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
-       return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-       uint32_t temp;
-
-       if (smu7_read_smc_sram_dword(hwmgr,
-                       fuse_table_offset +
-                       offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, SMC_RAM_END))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
-                               return -EINVAL);
-       else {
-               smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
-               smu_data->power_tune_table.LPMLTemperatureMin =
-                               (uint8_t)((temp >> 16) & 0xff);
-               smu_data->power_tune_table.LPMLTemperatureMax =
-                               (uint8_t)((temp >> 8) & 0xff);
-               smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
-       }
-       return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-       return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-/* TO DO move to hwmgr */
-       if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
-               || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
-                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-       smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
-                               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
-       return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               smu_data->power_tune_table.GnbLPML[i] = 0;
-
-       return 0;
-}
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
-       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
-       return 0;
-}
-
-static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t pm_fuse_table_offset;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (smu7_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU74_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to get pm_fuse_table_offset Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_svi_load_line(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate SviLoadLine Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_tdc_limit(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
-               if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TdcWaterfallCtl, "
-                                       "LPMLTemperature Min and Max Failed!",
-                                       return -EINVAL);
-
-               if (0 != polaris10_populate_temperature_scaler(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate LPMLTemperatureScaler Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_fuzzy_fan(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_gnb_lpml(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate GnbLPML Failed!",
-                                       return -EINVAL);
-
-               if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
-                                       "Sidd Failed!", return -EINVAL);
-
-               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
-                               (uint8_t *)&smu_data->power_tune_table,
-                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to download PmFuseTable Failed!",
-                                       return -EINVAL);
-       }
-       return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-                       SMU74_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count, level;
-
-       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               count = data->mvdd_voltage_table.count;
-               if (count > SMU_MAX_SMIO_LEVELS)
-                       count = SMU_MAX_SMIO_LEVELS;
-               for (level = 0; level < count; level++) {
-                       table->SmioTable2.Pattern[level].Voltage =
-                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
-                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
-                       table->SmioTable2.Pattern[level].Smio =
-                               (uint8_t) level;
-                       table->Smio[level] |=
-                               data->mvdd_voltage_table.entries[level].smio_low;
-               }
-               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
-               table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
-       }
-
-       return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
-                                       struct SMU74_Discrete_DpmTable *table)
-{
-       uint32_t count, level;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       count = data->vddci_voltage_table.count;
-
-       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               if (count > SMU_MAX_SMIO_LEVELS)
-                       count = SMU_MAX_SMIO_LEVELS;
-               for (level = 0; level < count; ++level) {
-                       table->SmioTable1.Pattern[level].Voltage =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
-                       table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
-                       table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
-               }
-       }
-
-       table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
-       return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    table  the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       uint32_t count;
-       uint8_t index;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
-                       table_info->vddc_lookup_table;
-       /* tables is already swapped, so in order to use the value from it,
-        * we need to swap it back.
-        * We are populating vddc CAC data to BapmVddc table
-        * in split and merged mode
-        */
-       for (count = 0; count < lookup_table->count; count++) {
-               index = phm_get_voltage_index(lookup_table,
-                               data->vddc_voltage_table.entries[count].value);
-               table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
-               table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
-               table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
-       }
-
-       return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always  0
-*/
-
-static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       polaris10_populate_smc_vddci_table(hwmgr, table);
-       polaris10_populate_smc_mvdd_table(hwmgr, table);
-       polaris10_populate_cac_table(hwmgr, table);
-
-       return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_Ulv *state)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
-                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
-       if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
-               state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
-       else
-               state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
-       return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       int i;
-
-       /* Index (dpm_table->pcie_speed_table.count)
-        * is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
-                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity = 1;
-               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
-       }
-
-       smu_data->smc_state_table.LinkLevelCount =
-                       (uint8_t)dpm_table->pcie_speed_table.count;
-
-/* To Do move to hwmgr */
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
-                                  SMU74_Discrete_DpmTable  *table)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t i, ref_clk;
-
-       struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
-       ref_clk = smu7_get_xclk(hwmgr);
-
-       if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
-               for (i = 0; i < NUM_SCLK_RANGE; i++) {
-                       table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
-                       table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
-                       table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
-                       table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
-                       table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
-                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
-                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
-                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
-               }
-               return;
-       }
-
-       for (i = 0; i < NUM_SCLK_RANGE; i++) {
-               smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
-               smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
-               table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
-               table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
-               table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
-               table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
-               table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
-               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
-       }
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param    hwmgr  the address of the hardware manager
-* @param    clock  the engine clock to use to populate the structure
-* @param    sclk   the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-       struct pp_atomctrl_clock_dividers_ai dividers;
-       uint32_t ref_clock;
-       uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
-       uint8_t i;
-       int result;
-       uint64_t temp;
-
-       sclk_setting->SclkFrequency = clock;
-       /* get the engine clock dividers for this clock value */
-       result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
-       if (result == 0) {
-               sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
-               sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
-               sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
-               sclk_setting->PllRange = dividers.ucSclkPllRange;
-               sclk_setting->Sclk_slew_rate = 0x400;
-               sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
-               sclk_setting->Pcc_down_slew_rate = 0xffff;
-               sclk_setting->SSc_En = dividers.ucSscEnable;
-               sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
-               sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
-               sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
-               return result;
-       }
-
-       ref_clock = smu7_get_xclk(hwmgr);
-
-       for (i = 0; i < NUM_SCLK_RANGE; i++) {
-               if (clock > smu_data->range_table[i].trans_lower_frequency
-               && clock <= smu_data->range_table[i].trans_upper_frequency) {
-                       sclk_setting->PllRange = i;
-                       break;
-               }
-       }
-
-       sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-       temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
-       temp <<= 0x10;
-       do_div(temp, ref_clock);
-       sclk_setting->Fcw_frac = temp & 0xffff;
-
-       pcc_target_percent = 10; /*  Hardcode 10% for now. */
-       pcc_target_freq = clock - (clock * pcc_target_percent / 100);
-       sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
-       ss_target_percent = 2; /*  Hardcode 2% for now. */
-       sclk_setting->SSc_En = 0;
-       if (ss_target_percent) {
-               sclk_setting->SSc_En = 1;
-               ss_target_freq = clock - (clock * ss_target_percent / 100);
-               sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-               temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
-               temp <<= 0x10;
-               do_div(temp, ref_clock);
-               sclk_setting->Fcw1_frac = temp & 0xffff;
-       }
-
-       return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    clock the engine clock to use to populate the structure
-* @param    sclk        the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, uint16_t sclk_al_threshold,
-               struct SMU74_Discrete_GraphicsLevel *level)
-{
-       int result;
-       /* PP_Clocks minClocks; */
-       uint32_t mvdd;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       SMU_SclkSetting curr_sclk_setting = { 0 };
-
-       result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
-       /* populate graphics levels */
-       result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_sclk, clock,
-                       &level->MinVoltage, &mvdd);
-
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "can not find VDDC voltage value for "
-                       "VDDC engine clock dependency table",
-                       return result);
-       level->ActivityLevel = sclk_al_threshold;
-
-       level->CcPwrDynRm = 0;
-       level->CcPwrDynRm1 = 0;
-       level->EnabledForActivity = 0;
-       level->EnabledForThrottle = 1;
-       level->UpHyst = 10;
-       level->DownHyst = 0;
-       level->VoltageDownHyst = 0;
-       level->PowerThrottle = 0;
-       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
-               level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
-                                                               hwmgr->display_config.min_core_set_clock_in_sr);
-
-       /* Default to slow, highest DPM level will be
-        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
-        */
-       if (data->update_up_hyst)
-               level->UpHyst = (uint8_t)data->up_hyst;
-       if (data->update_down_hyst)
-               level->DownHyst = (uint8_t)data->down_hyst;
-
-       level->SclkSetting = curr_sclk_setting;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
-       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
-       return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
-       int result = 0;
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
-                       SMU74_MAX_LEVELS_GRAPHICS;
-       struct SMU74_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t i, max_entry;
-       uint8_t hightest_pcie_level_enabled = 0,
-               lowest_pcie_level_enabled = 0,
-               mid_pcie_level_enabled = 0,
-               count = 0;
-
-       polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
-               result = polaris10_populate_single_graphic_level(hwmgr,
-                               dpm_table->sclk_table.dpm_levels[i].value,
-                               (uint16_t)smu_data->activity_target[i],
-                               &(smu_data->smc_state_table.GraphicsLevel[i]));
-               if (result)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       levels[i].DeepSleepDivId = 0;
-       }
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_SPLLShutdownSupport))
-               smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
-       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-       smu_data->smc_state_table.GraphicsDpmLevelCount =
-                       (uint8_t)dpm_table->sclk_table.count;
-       hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
-       if (pcie_table != NULL) {
-               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
-                               "There must be 1 or more PCIE levels defined in PPTable.",
-                               return -EINVAL);
-               max_entry = pcie_entry_cnt - 1;
-               for (i = 0; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel =
-                                       (uint8_t) ((i < max_entry) ? i : max_entry);
-       } else {
-               while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
-                       hightest_pcie_level_enabled++;
-
-               while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << lowest_pcie_level_enabled)) == 0))
-                       lowest_pcie_level_enabled++;
-
-               while ((count < hightest_pcie_level_enabled) &&
-                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
-                       count++;
-
-               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
-                               hightest_pcie_level_enabled ?
-                                               (lowest_pcie_level_enabled + 1 + count) :
-                                               hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to hightest_pcie_level_enabled */
-               for (i = 2; i < dpm_table->sclk_table.count; i++)
-                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to lowest_pcie_level_enabled */
-               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to mid_pcie_level_enabled */
-               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
-       }
-       /* level count will send to smc once at init smc table and never change */
-       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, SMC_RAM_END);
-
-       return result;
-}
-
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
-               uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result = 0;
-       struct cgs_display_info info = {0, 0, NULL};
-       uint32_t mclk_stutter_mode_threshold = 40000;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-
-       if (table_info->vdd_dep_on_mclk) {
-               result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk, clock,
-                               &mem_level->MinVoltage, &mem_level->MinMvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find MinVddc voltage value from memory "
-                               "VDDC voltage dependency table", return result);
-       }
-
-       mem_level->MclkFrequency = clock;
-       mem_level->EnabledForThrottle = 1;
-       mem_level->EnabledForActivity = 0;
-       mem_level->UpHyst = 0;
-       mem_level->DownHyst = 100;
-       mem_level->VoltageDownHyst = 0;
-       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       mem_level->StutterEnable = false;
-       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       data->display_timing.num_existing_displays = info.display_count;
-
-       if (mclk_stutter_mode_threshold &&
-               (clock <= mclk_stutter_mode_threshold) &&
-               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
-                               STUTTER_ENABLE) & 0x1))
-               mem_level->StutterEnable = true;
-
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
-       }
-       return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param    hwmgr      the address of the hardware manager
-*/
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
-       int result;
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
-       uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
-                       SMU74_MAX_LEVELS_MEMORY;
-       struct SMU74_Discrete_MemoryLevel *levels =
-                       smu_data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                               "can not populate memory level as memory clock is zero",
-                               return -EINVAL);
-               result = polaris10_populate_single_memory_level(hwmgr,
-                               dpm_table->mclk_table.dpm_levels[i].value,
-                               &levels[i]);
-               if (i == dpm_table->mclk_table.count - 1) {
-                       levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-                       levels[i].EnabledForActivity = 1;
-               }
-               if (result)
-                       return result;
-       }
-
-       /* In order to prevent MC activity from stutter mode to push DPM up,
-        * the UVD change complements this by putting the MCLK in
-        * a higher state by default such that we are not affected by
-        * up threshold or and MCLK DPM latency.
-        */
-       levels[0].ActivityLevel = 0x1f;
-       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
-       smu_data->smc_state_table.MemoryDpmLevelCount =
-                       (uint8_t)dpm_table->mclk_table.count;
-       hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
-                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
-       /* level count will send to smc once at init smc table and never change */
-       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                       (uint32_t)array_size, SMC_RAM_END);
-
-       return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param    hwmgr      the address of the hardware manager
-* @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
-* @param    voltage     the SMC VOLTAGE structure to be populated
-*/
-static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-               uint32_t mclk, SMIO_Pattern *smio_pat)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i = 0;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-                               "MVDD Voltage is outside the supported range.",
-                               return -EINVAL);
-       } else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint32_t sclk_frequency;
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       SMIO_Pattern vol_level;
-       uint32_t mvdd;
-       uint16_t us_mvdd;
-
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       /* Get MinVoltage and Frequency from DPM0,
-        * already converted to SMC_UL */
-       sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
-       result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_sclk,
-                       sclk_frequency,
-                       &table->ACPILevel.MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Cannot find ACPI VDDC voltage value "
-                       "in Clock Dependency Table",
-                       );
-
-       result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
-       PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       table->ACPILevel.DeepSleepDivId = 0;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
-       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
-       /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-       table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
-       result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                       table_info->vdd_dep_on_mclk,
-                       table->MemoryACPILevel.MclkFrequency,
-                       &table->MemoryACPILevel.MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((0 == result),
-                       "Cannot find ACPI VDDCI voltage value "
-                       "in Clock Dependency Table",
-                       );
-
-       us_mvdd = 0;
-       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-                       (data->mclk_dpm_key_disabled))
-               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-       else {
-               if (!polaris10_populate_mvdd_value(hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[0].value,
-                               &vol_level))
-                       us_mvdd = vol_level.Voltage;
-       }
-
-       if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
-               table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
-       else
-               table->MemoryACPILevel.MinMvdd = 0;
-
-       table->MemoryACPILevel.StutterEnable = false;
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       table->MemoryACPILevel.ActivityLevel =
-                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
-       return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->VceLevelCount = (uint8_t)(mm_table->count);
-       table->VceBootLevel = 0;
-
-       for (count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
-               table->VceLevel[count].MinVoltage = 0;
-               table->VceLevel[count].MinVoltage |=
-                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
-               table->VceLevel[count].MinVoltage |=
-                               (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /*retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for VCE engine clock",
-                               return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
-               int32_t eng_clock, int32_t mem_clock,
-               SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
-       uint32_t dram_timing;
-       uint32_t dram_timing2;
-       uint32_t burst_time;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                       eng_clock, mem_clock);
-       PP_ASSERT_WITH_CODE(result == 0,
-                       "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
-       arb_regs->McArbBurstTime   = (uint8_t)burst_time;
-
-       return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
-       uint32_t i, j;
-       int result = 0;
-
-       for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
-                       result = polaris10_populate_memory_timing_parameters(hwmgr,
-                                       hw_data->dpm_table.sclk_table.dpm_levels[i].value,
-                                       hw_data->dpm_table.mclk_table.dpm_levels[j].value,
-                                       &arb_regs.entries[i][j]);
-                       if (result == 0)
-                               result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
-                       if (result != 0)
-                               return result;
-               }
-       }
-
-       result = smu7_copy_bytes_to_smc(
-                       hwmgr,
-                       smu_data->smu7_data.arb_table_start,
-                       (uint8_t *)&arb_regs,
-                       sizeof(SMU74_Discrete_MCArbDramTimingTable),
-                       SMC_RAM_END);
-       return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->UvdLevelCount = (uint8_t)(mm_table->count);
-       table->UvdBootLevel = 0;
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].MinVoltage = 0;
-               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].VclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Vclk clock", return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for Dclk clock", return result);
-
-               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-       }
-
-       return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table */
-       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-                       data->vbios_boot_state.sclk_bootup_value,
-                       (uint32_t *)&(table->GraphicsBootLevel));
-
-       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-                       data->vbios_boot_state.mclk_bootup_value,
-                       (uint32_t *)&(table->MemoryBootLevel));
-
-       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
-                       VOLTAGE_SCALE;
-       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
-                       VOLTAGE_SCALE;
-
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-       return 0;
-}
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint8_t count, level;
-
-       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
-       for (level = 0; level < count; level++) {
-               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
-                               hw_data->vbios_boot_state.sclk_bootup_value) {
-                       smu_data->smc_state_table.GraphicsBootLevel = level;
-                       break;
-               }
-       }
-
-       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
-       for (level = 0; level < count; level++) {
-               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
-                               hw_data->vbios_boot_state.mclk_bootup_value) {
-                       smu_data->smc_state_table.MemoryBootLevel = level;
-                       break;
-               }
-       }
-
-       return 0;
-}
-
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-
-       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-       /* Read SMU_Eefuse to read and calculate RO and determine
-        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-        */
-       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (67 * 4));
-       efuse &= 0xFF000000;
-       efuse = efuse >> 24;
-
-       if (hwmgr->chip_id == CHIP_POLARIS10) {
-               min = 1000;
-               max = 2300;
-       } else {
-               min = 1100;
-               max = 2100;
-       }
-
-       ro = efuse * (max - min) / 255 + min;
-
-       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-       for (i = 0; i < sclk_table->count; i++) {
-               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-                               sclk_table->entries[i].cks_enable << i;
-               if (hwmgr->chip_id == CHIP_POLARIS10) {
-                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
-                                               (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
-                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
-                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
-               } else {
-                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
-                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
-                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
-                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
-               }
-
-               if (volt_without_cks >= volt_with_cks)
-                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
-               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-       }
-
-       smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
-       /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ClockStretcher);
-               PP_ASSERT_WITH_CODE(false,
-                               "Stretch Amount in PPTable not supported\n",
-                               return -EINVAL);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
-       value &= 0xFFFFFFFE;
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
-       return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param    hwmgr   the address of the hardware manager
-* @param    table   the SMC DPM table structure to be populated
-* @return   always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
-               struct SMU74_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint16_t config;
-
-       config = VR_MERGED_WITH_VDDC;
-       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
-       /* Set Vddc Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               config = VR_SVI2_PLANE_1;
-               table->VRConfig |= config;
-       } else {
-               PP_ASSERT_WITH_CODE(false,
-                               "VDDC should be on SVI2 control in merged mode!",
-                               );
-       }
-       /* Set Vddci Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               config = VR_SVI2_PLANE_2;  /* only in merged mode */
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               config = VR_SMIO_PATTERN_1;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
-       }
-       /* Set Mvdd Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
-               config = VR_SVI2_PLANE_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
-                       offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
-       } else {
-               config = VR_STATIC_VOLTAGE;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       }
-
-       return 0;
-}
-
-
-static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
-       int result = 0;
-       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
-       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
-       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
-       uint32_t tmp, i;
-
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)hwmgr->pptable;
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-
-
-       if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
-               return result;
-
-       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
-       if (0 == result) {
-               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
-               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
-               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
-               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
-               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
-               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
-               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
-               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
-               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
-               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
-               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
-               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
-               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
-               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
-               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
-               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
-               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
-               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
-               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
-               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
-               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
-               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
-               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
-               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
-               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
-                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
-                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
-               }
-
-               result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
-                               &tmp, SMC_RAM_END);
-
-               smu7_copy_bytes_to_smc(hwmgr,
-                                       tmp,
-                                       (uint8_t *)&AVFS_meanNsigma,
-                                       sizeof(AVFS_meanNsigma_t),
-                                       SMC_RAM_END);
-
-               result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
-                               &tmp, SMC_RAM_END);
-               smu7_copy_bytes_to_smc(hwmgr,
-                                       tmp,
-                                       (uint8_t *)&AVFS_SclkOffset,
-                                       sizeof(AVFS_Sclk_Offset_t),
-                                       SMC_RAM_END);
-
-               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
-                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
-                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
-                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
-               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
-       }
-       return result;
-}
-
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t tmp;
-       int result;
-
-       /* This is a read-modify-write on the first byte of the ARB table.
-        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
-        * is the field 'current'.
-        * This solution is ugly, but we never write the whole table only
-        * individual fields in it.
-        * In reality this field should not be in that structure
-        * but in a soft register.
-        */
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
-       if (result)
-               return result;
-
-       tmp &= 0x00FFFFFF;
-       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-       return smu7_write_smc_sram_dword(hwmgr,
-                       smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct  phm_ppt_v1_information *table_info =
-                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (table_info &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID)
-               smu_data->power_tune_defaults =
-                               &polaris10_power_tune_data_set_array
-                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-       else
-               smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct SMU74_Discrete_GraphicsLevel *levels =
-                               data->smc_state_table.GraphicsLevel;
-       unsigned min_level = 1;
-
-       hwmgr->default_gfx_power_profile.activity_threshold =
-                       be16_to_cpu(levels[0].ActivityLevel);
-       hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
-       hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
-       hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
-       hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
-       hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
-       /* Workaround compute SDMA instability: disable lowest SCLK
-        * DPM level. Optimize compute power profile: Use only highest
-        * 2 power levels (if more than 2 are available), Hysteresis:
-        * 0ms up, 5ms down
-        */
-       if (data->smc_state_table.GraphicsDpmLevelCount > 2)
-               min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
-       else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
-               min_level = 1;
-       else
-               min_level = 0;
-       hwmgr->default_compute_power_profile.min_sclk =
-               be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
-       hwmgr->default_compute_power_profile.up_hyst = 0;
-       hwmgr->default_compute_power_profile.down_hyst = 5;
-
-       hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
-       hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always 0
-*/
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-       uint8_t i;
-       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-       pp_atomctrl_clock_dividers_vi dividers;
-
-       polaris10_initialize_power_tune_defaults(hwmgr);
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
-               polaris10_populate_smc_voltage_tables(hwmgr, table);
-
-       table->SystemFlags = 0;
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (hw_data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-       if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
-               result = polaris10_populate_ulv_state(hwmgr, table);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to initialize ULV state!", return result);
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                               ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
-       }
-
-       result = polaris10_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Link Level!", return result);
-
-       result = polaris10_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Graphics Level!", return result);
-
-       result = polaris10_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Memory Level!", return result);
-
-       result = polaris10_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize ACPI Level!", return result);
-
-       result = polaris10_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize VCE Level!", return result);
-
-       result = polaris10_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
-       /* Since only the initial state is completely set up at this point
-        * (the other states are just copies of the boot state) we only
-        * need to populate the  ARB settings for the initial state.
-        */
-       result = polaris10_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to Write ARB settings for the initial state.", return result);
-
-       result = polaris10_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize UVD Level!", return result);
-
-       result = polaris10_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot Level!", return result);
-
-       result = polaris10_populate_smc_initailial_state(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize Boot State!", return result);
-
-       result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate BAPM Parameters!", return result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher)) {
-               result = polaris10_populate_clock_stretcher_data_table(hwmgr);
-               PP_ASSERT_WITH_CODE(0 == result,
-                               "Failed to populate Clock Stretcher Data Table!",
-                               return result);
-       }
-
-       result = polaris10_populate_avfs_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
-       table->CurrSclkPllRange = 0xff;
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-       table->TemperatureLimitHigh =
-                       table_info->cac_dtp_table->usTargetOperatingTemp *
-                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
-       table->TemperatureLimitLow  =
-                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
-       table->MemoryVoltageChangeEnable = 1;
-       table->MemoryInterval = 1;
-       table->VoltageResponseTime = 0;
-       table->PhaseResponseTime = 0;
-       table->MemoryThermThrottleEnable = 1;
-       table->PCIeBootLinkLevel = 0;
-       table->PCIeGenInterval = 1;
-       table->VRConfig = 0;
-
-       result = polaris10_populate_vr_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to populate VRConfig setting!", return result);
-
-       table->ThermGpio = 17;
-       table->SclkStepSize = 0x4000;
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
-               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
-       } else {
-               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_RegulatorHot);
-       }
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-                       &gpio_pin)) {
-               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       } else {
-               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_AutomaticDCTransition);
-       }
-
-       /* Thermal Output GPIO */
-       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
-                       &gpio_pin)) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
-               /* For porlarity read GPIOPAD_A with assigned Gpio pin
-                * since VBIOS will program this register to set 'inactive state',
-                * driver can then determine 'active state' from this and
-                * program SMU with correct polarity
-                */
-               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
-                                       & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-               /* if required, combine VRHot/PCC with thermal out GPIO */
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
-               && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
-                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-       } else {
-               table->ThermOutGpio = 17;
-               table->ThermOutPolarity = 1;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-       }
-
-       /* Populate BIF_SCLK levels into SMC DPM table */
-       for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
-               PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
-               if (i == 0)
-                       table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
-               else
-                       table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
-       }
-
-       for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
-               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = smu7_copy_bytes_to_smc(hwmgr,
-                       smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, SystemFlags),
-                       (uint8_t *)&(table->SystemFlags),
-                       sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
-                       SMC_RAM_END);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to upload dpm data to SMC memory!", return result);
-
-       result = polaris10_init_arb_table_index(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to upload arb data to SMC memory!", return result);
-
-       result = polaris10_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to  populate PM fuses to SMC memory!", return result);
-
-       polaris10_save_default_power_profile(hwmgr);
-
-       return 0;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return polaris10_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
-       int ret;
-       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
-               return 0;
-
-       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
-       ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
-                       0 : -1;
-
-       if (!ret)
-               /* If this param is not changed, this function could fire unnecessarily */
-               smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
-       return ret;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       if (smu_data->smu7_data.fan_table_start == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
-                       CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (duty100 == 0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
-                       usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
-                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->
-                       thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = smu7_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
-                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
-                       reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
-                       hwmgr->device, CGS_IND_REG__SMC,
-                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
-                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
-                       SMC_RAM_END);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ucMinimumPWMLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetFanMinPwm,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ucMinimumPWMLimit);
-
-       if (!res && hwmgr->thermal_controller.
-                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
-               res = smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SetFanSclkTarget,
-                               hwmgr->thermal_controller.
-                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
-       if (res)
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-
-       return 0;
-}
-
-static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       smu_data->smc_state_table.UvdBootLevel = 0;
-       if (table_info->mm_dep_table->count > 0)
-               smu_data->smc_state_table.UvdBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
-                                               UvdBootLevel);
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0x00FFFFFF;
-       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_UVDDPM) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
-       return 0;
-}
-
-static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_StablePState))
-               smu_data->smc_state_table.VceBootLevel =
-                       (uint8_t) (table_info->mm_dep_table->count - 1);
-       else
-               smu_data->smc_state_table.VceBootLevel = 0;
-
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                                       offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFF00FFFF;
-       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
-       return 0;
-}
-
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-
-static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
-       int max_entry, i;
-
-       max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
-                                               SMU74_MAX_LEVELS_LINK :
-                                               pcie_table->count;
-       /* Setup BIF_SCLK levels */
-       for (i = 0; i < max_entry; i++)
-               smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
-       return 0;
-}
-
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
-       switch (type) {
-       case SMU_UVD_TABLE:
-               polaris10_update_uvd_smc_table(hwmgr);
-               break;
-       case SMU_VCE_TABLE:
-               polaris10_update_vce_smc_table(hwmgr);
-               break;
-       case SMU_SAMU_TABLE:
-               polaris10_update_samu_smc_table(hwmgr);
-               break;
-       case SMU_BIF_TABLE:
-               polaris10_update_bif_smc_table(hwmgr);
-       default:
-               break;
-       }
-       return 0;
-}
-
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable,
-                                       LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               SMC_RAM_END);
-       }
-       PP_ASSERT_WITH_CODE((result == 0),
-                       "Failed to update SCLK threshold!", return result);
-
-       result = polaris10_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((result == 0),
-                       "Failed to program memory timing parameters!",
-                       );
-
-       return result;
-}
-
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
-{
-       switch (type) {
-       case SMU_SoftRegisters:
-               switch (member) {
-               case HandshakeDisables:
-                       return offsetof(SMU74_SoftRegisters, HandshakeDisables);
-               case VoltageChangeTimeout:
-                       return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
-               case AverageGraphicsActivity:
-                       return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
-               case PreVBlankGap:
-                       return offsetof(SMU74_SoftRegisters, PreVBlankGap);
-               case VBlankTimeout:
-                       return offsetof(SMU74_SoftRegisters, VBlankTimeout);
-               case UcodeLoadStatus:
-                       return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
-               }
-       case SMU_Discrete_DpmTable:
-               switch (member) {
-               case UvdBootLevel:
-                       return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
-               case VceBootLevel:
-                       return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-               case LowSclkInterruptThreshold:
-                       return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
-               }
-       }
-       pr_warn("can't get the offset of type %x member %x\n", type, member);
-       return 0;
-}
-
-uint32_t polaris10_get_mac_definition(uint32_t value)
-{
-       switch (value) {
-       case SMU_MAX_LEVELS_GRAPHICS:
-               return SMU74_MAX_LEVELS_GRAPHICS;
-       case SMU_MAX_LEVELS_MEMORY:
-               return SMU74_MAX_LEVELS_MEMORY;
-       case SMU_MAX_LEVELS_LINK:
-               return SMU74_MAX_LEVELS_LINK;
-       case SMU_MAX_ENTRIES_SMIO:
-               return SMU74_MAX_ENTRIES_SMIO;
-       case SMU_MAX_LEVELS_VDDC:
-               return SMU74_MAX_LEVELS_VDDC;
-       case SMU_MAX_LEVELS_VDDGFX:
-               return SMU74_MAX_LEVELS_VDDGFX;
-       case SMU_MAX_LEVELS_VDDCI:
-               return SMU74_MAX_LEVELS_VDDCI;
-       case SMU_MAX_LEVELS_MVDD:
-               return SMU74_MAX_LEVELS_MVDD;
-       case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
-               return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
-       }
-
-       pr_warn("can't get the mac of %x\n", value);
-       return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @return   always  0
-*/
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, DpmTable),
-                       &tmp, SMC_RAM_END);
-
-       if (0 == result)
-               smu_data->smu7_data.dpm_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, SoftRegisters),
-                       &tmp, SMC_RAM_END);
-
-       if (!result) {
-               data->soft_regs_start = tmp;
-               smu_data->smu7_data.soft_regs_start = tmp;
-       }
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, mcRegisterTable),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.mc_reg_table_start = tmp;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, FanTable),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.fan_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.arb_table_start = tmp;
-
-       error |= (0 != result);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                       SMU7_FIRMWARE_HEADER_LOCATION +
-                       offsetof(SMU74_Firmware_Header, Version),
-                       &tmp, SMC_RAM_END);
-
-       if (!result)
-               hwmgr->microcode_version_info.SMC = tmp;
-
-       error |= (0 != result);
-
-       return error ? -1 : 0;
-}
-
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-                       ? true : false;
-}
-
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-               struct amd_pp_profile *request)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
-                       (hwmgr->smu_backend);
-       struct SMU74_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
-                       SMU74_MAX_LEVELS_GRAPHICS;
-       uint32_t i;
-
-       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
-               levels[i].ActivityLevel =
-                               cpu_to_be16(request->activity_threshold);
-               levels[i].EnabledForActivity = 1;
-               levels[i].UpHyst = request->up_hyst;
-               levels[i].DownHyst = request->down_hyst;
-       }
-
-       return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                               array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
deleted file mode 100644 (file)
index 1df8154..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef POLARIS10_SMC_H
-#define POLARIS10_SMC_H
-
-#include "smumgr.h"
-
-
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
-uint32_t polaris10_get_mac_definition(uint32_t value);
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-               struct amd_pp_profile *request);
-
-#endif
-
index 61ee6281fbda181cce552e88adcbf1d86f8e6ffd..bd6be7793ca79ffb187ac2d55e225ba13df2c75d 100644 (file)
 #include "gca/gfx_8_0_d.h"
 #include "bif/bif_5_0_d.h"
 #include "bif/bif_5_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
 #include "ppatomctrl.h"
 #include "cgs_common.h"
-#include "polaris10_smc.h"
 #include "smu7_ppsmc.h"
 #include "smu7_smumgr.h"
 
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VDDC_VDDCI_DELTA            200
+#define MC_CG_ARB_FREQ_F1           0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+       /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+        * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+       { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+       { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+       { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+                       {VCO_2_4, POSTDIV_DIV_BY_16,  75, 160, 112},
+                       {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+                       {VCO_2_4, POSTDIV_DIV_BY_8,   75, 160, 112},
+                       {VCO_3_6, POSTDIV_DIV_BY_8,  112, 224, 160},
+                       {VCO_2_4, POSTDIV_DIV_BY_4,   75, 160, 112},
+                       {VCO_3_6, POSTDIV_DIV_BY_4,  112, 216, 160},
+                       {VCO_2_4, POSTDIV_DIV_BY_2,   75, 160, 108},
+                       {VCO_3_6, POSTDIV_DIV_BY_2,  112, 216, 160} };
+
 #define PPPOLARIS10_TARGETACTIVITY_DFLT                     50
 
 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
@@ -60,46 +94,6 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
 static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
        0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
-static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
-{
-       int i;
-       uint32_t reg, data;
-
-       for (i = 0; i < size; i++) {
-               reg  = pvirus->reg;
-               data = pvirus->data;
-               if (reg != 0xffffffff)
-                       cgs_write_register(hwmgr->device, reg, data);
-               else
-                       break;
-               pvirus++;
-       }
-}
-
-static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
-{
-       int i;
-       cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
-       cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
-       cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
-       for (i = 0; i < section->dfy_size; i++)
-               cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
-}
-
-static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr)
-{
-       execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
-       execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
-       execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
-
-       return 0;
-}
-
 static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
@@ -197,7 +191,7 @@ polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
                if (smu_data->avfs.avfs_btc_param > 1) {
                        pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
                        smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
-                       PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(hwmgr),
+                       PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
                        "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
                        return -EINVAL);
                }
@@ -389,6 +383,2195 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+               struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+               uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+       uint32_t i;
+       uint16_t vddci;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       *voltage = *mvdd = 0;
+
+       /* clock - voltage dependency table is empty table */
+       if (dep_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < dep_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (dep_table->entries[i].clk >= clock) {
+                       *voltage |= (dep_table->entries[i].vddc *
+                                       VOLTAGE_SCALE) << VDDC_SHIFT;
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+                               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else if (dep_table->entries[i].vddci)
+                               *voltage |= (dep_table->entries[i].vddci *
+                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       else {
+                               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               (dep_table->entries[i].vddc -
+                                                               (uint16_t)VDDC_VDDCI_DELTA));
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+                       }
+
+                       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+                               *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+                                       VOLTAGE_SCALE;
+                       else if (dep_table->entries[i].mvdd)
+                               *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+                                       VOLTAGE_SCALE;
+
+                       *voltage |= 1 << PHASES_SHIFT;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+               *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+       else if (dep_table->entries[i-1].vddci) {
+               vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+                               (dep_table->entries[i].vddc -
+                                               (uint16_t)VDDC_VDDCI_DELTA));
+               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+       }
+
+       if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+               *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+       else if (dep_table->entries[i].mvdd)
+               *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+       return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+       uint32_t tmp;
+       tmp = raw_setting * 4096 / 100;
+       return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+       struct pp_advance_fan_control_parameters *fan_table =
+                       &hwmgr->thermal_controller.advanceFanControlParameters;
+       int i, j, k;
+       const uint16_t *pdef1;
+       const uint16_t *pdef2;
+
+       table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+       table->TargetTdp  = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+                               "Target Operating Temp is out of Range!",
+                               );
+
+       table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTargetOperatingTemp * 256);
+       table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+                       cac_dtp_table->usTemperatureLimitHotspot * 256);
+       table->FanGainEdge = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainEdge));
+       table->FanGainHotspot = PP_HOST_TO_SMC_US(
+                       scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+       pdef1 = defaults->BAPMTI_R;
+       pdef2 = defaults->BAPMTI_RC;
+
+       for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU74_DTE_SINKS; k++) {
+                               table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+                               table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+                               pdef1++;
+                               pdef2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+       return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr,
+                       fuse_table_offset +
+                       offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+                               return -EINVAL);
+       else {
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+               smu_data->power_tune_table.LPMLTemperatureMin =
+                               (uint8_t)((temp >> 16) & 0xff);
+               smu_data->power_tune_table.LPMLTemperatureMax =
+                               (uint8_t)((temp >> 8) & 0xff);
+               smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+       }
+       return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+       return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+/* TO DO move to hwmgr */
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+               || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+                               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+       return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+       return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint32_t pm_fuse_table_offset;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU74_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to get pm_fuse_table_offset Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate SviLoadLine Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+               if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TdcWaterfallCtl, "
+                                       "LPMLTemperature Min and Max Failed!",
+                                       return -EINVAL);
+
+               if (0 != polaris10_populate_temperature_scaler(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate LPMLTemperatureScaler Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_fuzzy_fan(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate Fuzzy Fan Control parameters Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate GnbLPML Failed!",
+                                       return -EINVAL);
+
+               if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+                                       "Sidd Failed!", return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed!",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU74_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count, level;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               count = data->mvdd_voltage_table.count;
+               if (count > SMU_MAX_SMIO_LEVELS)
+                       count = SMU_MAX_SMIO_LEVELS;
+               for (level = 0; level < count; level++) {
+                       table->SmioTable2.Pattern[level].Voltage =
+                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+                       table->SmioTable2.Pattern[level].Smio =
+                               (uint8_t) level;
+                       table->Smio[level] |=
+                               data->mvdd_voltage_table.entries[level].smio_low;
+               }
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+               table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+                                       struct SMU74_Discrete_DpmTable *table)
+{
+       uint32_t count, level;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       count = data->vddci_voltage_table.count;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               if (count > SMU_MAX_SMIO_LEVELS)
+                       count = SMU_MAX_SMIO_LEVELS;
+               for (level = 0; level < count; ++level) {
+                       table->SmioTable1.Pattern[level].Voltage =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+                       table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+                       table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+               }
+       }
+
+       table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+       return 0;
+}
+
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       uint32_t count;
+       uint8_t index;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+                       table_info->vddc_lookup_table;
+       /* tables is already swapped, so in order to use the value from it,
+        * we need to swap it back.
+        * We are populating vddc CAC data to BapmVddc table
+        * in split and merged mode
+        */
+       for (count = 0; count < lookup_table->count; count++) {
+               index = phm_get_voltage_index(lookup_table,
+                               data->vddc_voltage_table.entries[count].value);
+               table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+               table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+               table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       polaris10_populate_smc_vddci_table(hwmgr, table);
+       polaris10_populate_smc_mvdd_table(hwmgr, table);
+       polaris10_populate_cac_table(hwmgr, table);
+
+       return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_Ulv *state)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+       if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
+               state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+       else
+               state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int i;
+
+       /* Index (dpm_table->pcie_speed_table.count)
+        * is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                               (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+                               dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity = 1;
+               table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+                       (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+                                  SMU74_Discrete_DpmTable  *table)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint32_t i, ref_clk;
+
+       struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+       ref_clk = smu7_get_xclk(hwmgr);
+
+       if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+               for (i = 0; i < NUM_SCLK_RANGE; i++) {
+                       table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+                       table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+                       table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+                       table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+                       table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+                       CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+               }
+               return;
+       }
+
+       for (i = 0; i < NUM_SCLK_RANGE; i++) {
+               smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+               smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+               table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+               table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+               table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+               table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+               table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+               CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+       }
+}
+
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       struct pp_atomctrl_clock_dividers_ai dividers;
+       uint32_t ref_clock;
+       uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+       uint8_t i;
+       int result;
+       uint64_t temp;
+
+       sclk_setting->SclkFrequency = clock;
+       /* get the engine clock dividers for this clock value */
+       result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock,  &dividers);
+       if (result == 0) {
+               sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+               sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+               sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+               sclk_setting->PllRange = dividers.ucSclkPllRange;
+               sclk_setting->Sclk_slew_rate = 0x400;
+               sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+               sclk_setting->Pcc_down_slew_rate = 0xffff;
+               sclk_setting->SSc_En = dividers.ucSscEnable;
+               sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+               sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+               sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+               return result;
+       }
+
+       ref_clock = smu7_get_xclk(hwmgr);
+
+       for (i = 0; i < NUM_SCLK_RANGE; i++) {
+               if (clock > smu_data->range_table[i].trans_lower_frequency
+               && clock <= smu_data->range_table[i].trans_upper_frequency) {
+                       sclk_setting->PllRange = i;
+                       break;
+               }
+       }
+
+       sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+       temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+       temp <<= 0x10;
+       do_div(temp, ref_clock);
+       sclk_setting->Fcw_frac = temp & 0xffff;
+
+       pcc_target_percent = 10; /*  Hardcode 10% for now. */
+       pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+       sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+       ss_target_percent = 2; /*  Hardcode 2% for now. */
+       sclk_setting->SSc_En = 0;
+       if (ss_target_percent) {
+               sclk_setting->SSc_En = 1;
+               ss_target_freq = clock - (clock * ss_target_percent / 100);
+               sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+               temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+               temp <<= 0x10;
+               do_div(temp, ref_clock);
+               sclk_setting->Fcw1_frac = temp & 0xffff;
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, uint16_t sclk_al_threshold,
+               struct SMU74_Discrete_GraphicsLevel *level)
+{
+       int result;
+       /* PP_Clocks minClocks; */
+       uint32_t mvdd;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       SMU_SclkSetting curr_sclk_setting = { 0 };
+
+       result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+       /* populate graphics levels */
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk, clock,
+                       &level->MinVoltage, &mvdd);
+
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "can not find VDDC voltage value for "
+                       "VDDC engine clock dependency table",
+                       return result);
+       level->ActivityLevel = sclk_al_threshold;
+
+       level->CcPwrDynRm = 0;
+       level->CcPwrDynRm1 = 0;
+       level->EnabledForActivity = 0;
+       level->EnabledForThrottle = 1;
+       level->UpHyst = 10;
+       level->DownHyst = 0;
+       level->VoltageDownHyst = 0;
+       level->PowerThrottle = 0;
+       data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+               level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+                                                               hwmgr->display_config.min_core_set_clock_in_sr);
+
+       /* Default to slow, highest DPM level will be
+        * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+        */
+       if (data->update_up_hyst)
+               level->UpHyst = (uint8_t)data->up_hyst;
+       if (data->update_down_hyst)
+               level->DownHyst = (uint8_t)data->down_hyst;
+
+       level->SclkSetting = curr_sclk_setting;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+       CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+       return 0;
+}
+
+static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+       uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+       int result = 0;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+                       SMU74_MAX_LEVELS_GRAPHICS;
+       struct SMU74_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t i, max_entry;
+       uint8_t hightest_pcie_level_enabled = 0,
+               lowest_pcie_level_enabled = 0,
+               mid_pcie_level_enabled = 0,
+               count = 0;
+
+       polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+               result = polaris10_populate_single_graphic_level(hwmgr,
+                               dpm_table->sclk_table.dpm_levels[i].value,
+                               (uint16_t)smu_data->activity_target[i],
+                               &(smu_data->smc_state_table.GraphicsLevel[i]));
+               if (result)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       levels[i].DeepSleepDivId = 0;
+       }
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_SPLLShutdownSupport))
+               smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+                       (uint8_t)dpm_table->sclk_table.count;
+       hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+       if (pcie_table != NULL) {
+               PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+                               "There must be 1 or more PCIE levels defined in PPTable.",
+                               return -EINVAL);
+               max_entry = pcie_entry_cnt - 1;
+               for (i = 0; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel =
+                                       (uint8_t) ((i < max_entry) ? i : max_entry);
+       } else {
+               while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (hightest_pcie_level_enabled + 1))) != 0))
+                       hightest_pcie_level_enabled++;
+
+               while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << lowest_pcie_level_enabled)) == 0))
+                       lowest_pcie_level_enabled++;
+
+               while ((count < hightest_pcie_level_enabled) &&
+                               ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                               (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+                       count++;
+
+               mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+                               hightest_pcie_level_enabled ?
+                                               (lowest_pcie_level_enabled + 1 + count) :
+                                               hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to hightest_pcie_level_enabled */
+               for (i = 2; i < dpm_table->sclk_table.count; i++)
+                       levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to lowest_pcie_level_enabled */
+               levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to mid_pcie_level_enabled */
+               levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+       }
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+               uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int result = 0;
+       struct cgs_display_info info = {0, 0, NULL};
+       uint32_t mclk_stutter_mode_threshold = 40000;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+
+       if (table_info->vdd_dep_on_mclk) {
+               result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                               table_info->vdd_dep_on_mclk, clock,
+                               &mem_level->MinVoltage, &mem_level->MinMvdd);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find MinVddc voltage value from memory "
+                               "VDDC voltage dependency table", return result);
+       }
+
+       mem_level->MclkFrequency = clock;
+       mem_level->EnabledForThrottle = 1;
+       mem_level->EnabledForActivity = 0;
+       mem_level->UpHyst = 0;
+       mem_level->DownHyst = 100;
+       mem_level->VoltageDownHyst = 0;
+       mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       mem_level->StutterEnable = false;
+       mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       data->display_timing.num_existing_displays = info.display_count;
+
+       if (mclk_stutter_mode_threshold &&
+               (clock <= mclk_stutter_mode_threshold) &&
+               (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+                               STUTTER_ENABLE) & 0x1))
+               mem_level->StutterEnable = true;
+
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+       }
+       return result;
+}
+
+static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+       int result;
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+       uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+                       SMU74_MAX_LEVELS_MEMORY;
+       struct SMU74_Discrete_MemoryLevel *levels =
+                       smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                               "can not populate memory level as memory clock is zero",
+                               return -EINVAL);
+               result = polaris10_populate_single_memory_level(hwmgr,
+                               dpm_table->mclk_table.dpm_levels[i].value,
+                               &levels[i]);
+               if (i == dpm_table->mclk_table.count - 1) {
+                       levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+                       levels[i].EnabledForActivity = 1;
+               }
+               if (result)
+                       return result;
+       }
+
+       /* In order to prevent MC activity from stutter mode to push DPM up,
+        * the UVD change complements this by putting the MCLK in
+        * a higher state by default such that we are not affected by
+        * up threshold or and MCLK DPM latency.
+        */
+       levels[0].ActivityLevel = 0x1f;
+       CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount =
+                       (uint8_t)dpm_table->mclk_table.count;
+       hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+                       phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+       /* level count will send to smc once at init smc table and never change */
+       result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                       (uint32_t)array_size, SMC_RAM_END);
+
+       return result;
+}
+
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+               uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+                               smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+                               "MVDD Voltage is outside the supported range.",
+                               return -EINVAL);
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+               SMU74_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint32_t sclk_frequency;
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       SMIO_Pattern vol_level;
+       uint32_t mvdd;
+       uint16_t us_mvdd;
+
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       /* Get MinVoltage and Frequency from DPM0,
+        * already converted to SMC_UL */
+       sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk,
+                       sclk_frequency,
+                       &table->ACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDC voltage value "
+                       "in Clock Dependency Table",
+                       );
+
+       result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
+       PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       table->ACPILevel.DeepSleepDivId = 0;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+       CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+       /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+       table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_mclk,
+                       table->MemoryACPILevel.MclkFrequency,
+                       &table->MemoryACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDCI voltage value "
+                       "in Clock Dependency Table",
+                       );
+
+       us_mvdd = 0;
+       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+                       (data->mclk_dpm_key_disabled))
+               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+       else {
+               if (!polaris10_populate_mvdd_value(hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[0].value,
+                               &vol_level))
+                       us_mvdd = vol_level.Voltage;
+       }
+
+       if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+               table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       table->MemoryACPILevel.StutterEnable = false;
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       table->MemoryACPILevel.ActivityLevel =
+                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+       return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU74_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
+
+       table->VceLevelCount = (uint8_t)(mm_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage = 0;
+               table->VceLevel[count].MinVoltage |=
+                               (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+               table->VceLevel[count].MinVoltage |=
+                               (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /*retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+               SMU74_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               /* not sure whether we need evclk or not */
+               table->SamuLevel[count].MinVoltage = 0;
+               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+       }
+       return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+               int32_t eng_clock, int32_t mem_clock,
+               SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+       uint32_t dram_timing;
+       uint32_t dram_timing2;
+       uint32_t burst_time;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                       eng_clock, mem_clock);
+       PP_ASSERT_WITH_CODE(result == 0,
+                       "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+       arb_regs->McArbBurstTime   = (uint8_t)burst_time;
+
+       return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+       uint32_t i, j;
+       int result = 0;
+
+       for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+                       result = polaris10_populate_memory_timing_parameters(hwmgr,
+                                       hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+                                       hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+                                       &arb_regs.entries[i][j]);
+                       if (result == 0)
+                               result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+                       if (result != 0)
+                               return result;
+               }
+       }
+
+       result = smu7_copy_bytes_to_smc(
+                       hwmgr,
+                       smu_data->smu7_data.arb_table_start,
+                       (uint8_t *)&arb_regs,
+                       sizeof(SMU74_Discrete_MCArbDramTimingTable),
+                       SMC_RAM_END);
+       return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       int result = -EINVAL;
+       uint8_t count;
+       struct pp_atomctrl_clock_dividers_vi dividers;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                       table_info->mm_dep_table;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
+
+       table->UvdLevelCount = (uint8_t)(mm_table->count);
+       table->UvdBootLevel = 0;
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].MinVoltage = 0;
+               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+               table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+                               VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+               table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].VclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Vclk clock", return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                               table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((0 == result),
+                               "can not find divide id for Dclk clock", return result);
+
+               table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+       }
+
+       return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table */
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+                       data->vbios_boot_state.sclk_bootup_value,
+                       (uint32_t *)&(table->GraphicsBootLevel));
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+                       data->vbios_boot_state.mclk_bootup_value,
+                       (uint32_t *)&(table->MemoryBootLevel));
+
+       table->BootVddc  = data->vbios_boot_state.vddc_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+                       VOLTAGE_SCALE;
+       table->BootMVdd  = data->vbios_boot_state.mvdd_bootup_value *
+                       VOLTAGE_SCALE;
+
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+       return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint8_t count, level;
+
+       count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+                               hw_data->vbios_boot_state.sclk_bootup_value) {
+                       smu_data->smc_state_table.GraphicsBootLevel = level;
+                       break;
+               }
+       }
+
+       count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+       for (level = 0; level < count; level++) {
+               if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+                               hw_data->vbios_boot_state.mclk_bootup_value) {
+                       smu_data->smc_state_table.MemoryBootLevel = level;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+       /* Read SMU_Eefuse to read and calculate RO and determine
+        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+        */
+       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (67 * 4));
+       efuse &= 0xFF000000;
+       efuse = efuse >> 24;
+
+       if (hwmgr->chip_id == CHIP_POLARIS10) {
+               min = 1000;
+               max = 2300;
+       } else {
+               min = 1100;
+               max = 2100;
+       }
+
+       ro = efuse * (max - min) / 255 + min;
+
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+       for (i = 0; i < sclk_table->count; i++) {
+               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+                               sclk_table->entries[i].cks_enable << i;
+               if (hwmgr->chip_id == CHIP_POLARIS10) {
+                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+                                               (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+               } else {
+                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+               }
+
+               if (volt_without_cks >= volt_with_cks)
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+       }
+
+       smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+       /* Populate CKS Lookup Table */
+       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+               stretch_amount2 = 0;
+       else if (stretch_amount == 3 || stretch_amount == 4)
+               stretch_amount2 = 1;
+       else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ClockStretcher);
+               PP_ASSERT_WITH_CODE(false,
+                               "Stretch Amount in PPTable not supported\n",
+                               return -EINVAL);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+       value &= 0xFFFFFFFE;
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+       return 0;
+}
+
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+               struct SMU74_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint16_t config;
+
+       config = VR_MERGED_WITH_VDDC;
+       table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+       /* Set Vddc Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+               config = VR_SVI2_PLANE_1;
+               table->VRConfig |= config;
+       } else {
+               PP_ASSERT_WITH_CODE(false,
+                               "VDDC should be on SVI2 control in merged mode!",
+                               );
+       }
+       /* Set Vddci Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               config = VR_SVI2_PLANE_2;  /* only in merged mode */
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               config = VR_SMIO_PATTERN_1;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+       }
+       /* Set Mvdd Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+               config = VR_SVI2_PLANE_2;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+                       offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+       } else {
+               config = VR_STATIC_VOLTAGE;
+               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+       }
+
+       return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
+       int result = 0;
+       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+       uint32_t tmp, i;
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+
+       if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return result;
+
+       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+       if (0 == result) {
+               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
+               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
+               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+               }
+
+               result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+                               &tmp, SMC_RAM_END);
+
+               smu7_copy_bytes_to_smc(hwmgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_meanNsigma,
+                                       sizeof(AVFS_meanNsigma_t),
+                                       SMC_RAM_END);
+
+               result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+                               &tmp, SMC_RAM_END);
+               smu7_copy_bytes_to_smc(hwmgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_SclkOffset,
+                                       sizeof(AVFS_Sclk_Offset_t),
+                                       SMC_RAM_END);
+
+               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+       }
+       return result;
+}
+
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint32_t tmp;
+       int result;
+
+       /* This is a read-modify-write on the first byte of the ARB table.
+        * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+        * is the field 'current'.
+        * This solution is ugly, but we never write the whole table only
+        * individual fields in it.
+        * In reality this field should not be in that structure
+        * but in a soft register.
+        */
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+       if (result)
+               return result;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+       return smu7_write_smc_sram_dword(hwmgr,
+                       smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct  phm_ppt_v1_information *table_info =
+                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (table_info &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID)
+               smu_data->power_tune_defaults =
+                               &polaris10_power_tune_data_set_array
+                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+       else
+               smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct SMU74_Discrete_GraphicsLevel *levels =
+                               data->smc_state_table.GraphicsLevel;
+       unsigned min_level = 1;
+
+       hwmgr->default_gfx_power_profile.activity_threshold =
+                       be16_to_cpu(levels[0].ActivityLevel);
+       hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+       hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+       hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+       hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+       hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+       /* Workaround compute SDMA instability: disable lowest SCLK
+        * DPM level. Optimize compute power profile: Use only highest
+        * 2 power levels (if more than 2 are available), Hysteresis:
+        * 0ms up, 5ms down
+        */
+       if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+               min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+       else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+               min_level = 1;
+       else
+               min_level = 0;
+       hwmgr->default_compute_power_profile.min_sclk =
+               be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
+       hwmgr->default_compute_power_profile.up_hyst = 0;
+       hwmgr->default_compute_power_profile.down_hyst = 5;
+
+       hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+       hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       uint8_t i;
+       struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+       pp_atomctrl_clock_dividers_vi dividers;
+
+       polaris10_initialize_power_tune_defaults(hwmgr);
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+               polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+       table->SystemFlags = 0;
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (hw_data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+               result = polaris10_populate_ulv_state(hwmgr, table);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to initialize ULV state!", return result);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+       }
+
+       result = polaris10_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Link Level!", return result);
+
+       result = polaris10_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Graphics Level!", return result);
+
+       result = polaris10_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Memory Level!", return result);
+
+       result = polaris10_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize ACPI Level!", return result);
+
+       result = polaris10_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize VCE Level!", return result);
+
+       result = polaris10_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize SAMU Level!", return result);
+
+       /* Since only the initial state is completely set up at this point
+        * (the other states are just copies of the boot state) we only
+        * need to populate the  ARB settings for the initial state.
+        */
+       result = polaris10_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to Write ARB settings for the initial state.", return result);
+
+       result = polaris10_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize UVD Level!", return result);
+
+       result = polaris10_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot Level!", return result);
+
+       result = polaris10_populate_smc_initailial_state(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to initialize Boot State!", return result);
+
+       result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate BAPM Parameters!", return result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ClockStretcher)) {
+               result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+               PP_ASSERT_WITH_CODE(0 == result,
+                               "Failed to populate Clock Stretcher Data Table!",
+                               return result);
+       }
+
+       result = polaris10_populate_avfs_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+       table->CurrSclkPllRange = 0xff;
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+       table->TemperatureLimitHigh =
+                       table_info->cac_dtp_table->usTargetOperatingTemp *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->TemperatureLimitLow  =
+                       (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+                       SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->MemoryVoltageChangeEnable = 1;
+       table->MemoryInterval = 1;
+       table->VoltageResponseTime = 0;
+       table->PhaseResponseTime = 0;
+       table->MemoryThermThrottleEnable = 1;
+       table->PCIeBootLinkLevel = 0;
+       table->PCIeGenInterval = 1;
+       table->VRConfig = 0;
+
+       result = polaris10_populate_vr_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to populate VRConfig setting!", return result);
+
+       table->ThermGpio = 17;
+       table->SclkStepSize = 0x4000;
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+               table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_RegulatorHot);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                       &gpio_pin)) {
+               table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       } else {
+               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_AutomaticDCTransition);
+       }
+
+       /* Thermal Output GPIO */
+       if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+                       &gpio_pin)) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+               /* For porlarity read GPIOPAD_A with assigned Gpio pin
+                * since VBIOS will program this register to set 'inactive state',
+                * driver can then determine 'active state' from this and
+                * program SMU with correct polarity
+                */
+               table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+                                       & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+               /* if required, combine VRHot/PCC with thermal out GPIO */
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+               && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+       } else {
+               table->ThermOutGpio = 17;
+               table->ThermOutPolarity = 1;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+       }
+
+       /* Populate BIF_SCLK levels into SMC DPM table */
+       for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+               PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+               if (i == 0)
+                       table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+               else
+                       table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+       }
+
+       for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(hwmgr,
+                       smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+                       (uint8_t *)&(table->SystemFlags),
+                       sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+                       SMC_RAM_END);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload dpm data to SMC memory!", return result);
+
+       result = polaris10_init_arb_table_index(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to upload arb data to SMC memory!", return result);
+
+       result = polaris10_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result,
+                       "Failed to  populate PM fuses to SMC memory!", return result);
+
+       polaris10_save_default_power_profile(hwmgr);
+
+       return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return polaris10_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+       int ret;
+       struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return 0;
+
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+       ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
+                       0 : -1;
+
+       if (!ret)
+               /* If this param is not changed, this function could fire unnecessarily */
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+       return ret;
+}
+
+static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       if (smu_data->smu7_data.fan_table_start == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (duty100 == 0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+                       usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+                       hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+                       thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->
+                       thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+                       thermal_controller.advanceFanControlParameters.ulCycleDelay *
+                       reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+                       hwmgr->device, CGS_IND_REG__SMC,
+                       CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+                       (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+                       SMC_RAM_END);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ucMinimumPWMLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SetFanMinPwm,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ucMinimumPWMLimit);
+
+       if (!res && hwmgr->thermal_controller.
+                       advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+               res = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SetFanSclkTarget,
+                               hwmgr->thermal_controller.
+                               advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+       if (res)
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+
+       return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       smu_data->smc_state_table.UvdBootLevel = 0;
+       if (table_info->mm_dep_table->count > 0)
+               smu_data->smc_state_table.UvdBootLevel =
+                               (uint8_t) (table_info->mm_dep_table->count - 1);
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+                                               UvdBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0x00FFFFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_UVDDPM) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+       return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_StablePState))
+               smu_data->smc_state_table.VceBootLevel =
+                       (uint8_t) (table_info->mm_dep_table->count - 1);
+       else
+               smu_data->smc_state_table.VceBootLevel = 0;
+
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                                       offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFF00FFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+       return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+       smu_data->smc_state_table.SamuBootLevel = 0;
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFFFFFF00;
+       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+       return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+       int max_entry, i;
+
+       max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+                                               SMU74_MAX_LEVELS_LINK :
+                                               pcie_table->count;
+       /* Setup BIF_SCLK levels */
+       for (i = 0; i < max_entry; i++)
+               smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+       return 0;
+}
+
+static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               polaris10_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               polaris10_update_vce_smc_table(hwmgr);
+               break;
+       case SMU_SAMU_TABLE:
+               polaris10_update_samu_smc_table(hwmgr);
+               break;
+       case SMU_BIF_TABLE:
+               polaris10_update_bif_smc_table(hwmgr);
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU74_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to update SCLK threshold!", return result);
+
+       result = polaris10_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters!",
+                       );
+
+       return result;
+}
+
+static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+               case DRAM_LOG_ADDR_H:
+                       return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_H);
+               case DRAM_LOG_ADDR_L:
+                       return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_L);
+               case DRAM_LOG_PHY_ADDR_H:
+                       return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+               case DRAM_LOG_PHY_ADDR_L:
+                       return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+               case DRAM_LOG_BUFF_SIZE:
+                       return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case UvdBootLevel:
+                       return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+               case VceBootLevel:
+                       return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+               case SamuBootLevel:
+                       return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       pr_warn("can't get the offset of type %x member %x\n", type, member);
+       return 0;
+}
+
+static uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU74_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU74_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU74_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU74_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU74_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDGFX:
+               return SMU74_MAX_LEVELS_VDDGFX;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU74_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU74_MAX_LEVELS_MVDD;
+       case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+               return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+       }
+
+       pr_warn("can't get the mac of %x\n", value);
+       return 0;
+}
+
+static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, DpmTable),
+                       &tmp, SMC_RAM_END);
+
+       if (0 == result)
+               smu_data->smu7_data.dpm_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, SoftRegisters),
+                       &tmp, SMC_RAM_END);
+
+       if (!result) {
+               data->soft_regs_start = tmp;
+               smu_data->smu7_data.soft_regs_start = tmp;
+       }
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, mcRegisterTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.mc_reg_table_start = tmp;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, FanTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.fan_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.arb_table_start = tmp;
+
+       error |= (0 != result);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                       SMU7_FIRMWARE_HEADER_LOCATION +
+                       offsetof(SMU74_Firmware_Header, Version),
+                       &tmp, SMC_RAM_END);
+
+       if (!result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (0 != result);
+
+       return error ? -1 : 0;
+}
+
+static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
+
+static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+               struct amd_pp_profile *request)
+{
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
+                       (hwmgr->smu_backend);
+       struct SMU74_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+                       SMU74_MAX_LEVELS_GRAPHICS;
+       uint32_t i;
+
+       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+               levels[i].ActivityLevel =
+                               cpu_to_be16(request->activity_threshold);
+               levels[i].EnabledForActivity = 1;
+               levels[i].UpHyst = request->up_hyst;
+               levels[i].DownHyst = request->down_hyst;
+       }
+
+       return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                               array_size, SMC_RAM_END);
+}
+
 const struct pp_smumgr_func polaris10_smu_funcs = {
        .smu_init = polaris10_smu_init,
        .smu_fini = smu7_smu_fini,
index c997117f2461bae908f739e5fe15e142d2a4e5a8..7f5359a97ef2d8f4a4182b1e708a2eff071f4be0 100644 (file)
 #include "pp_debug.h"
 #include "smumgr.h"
 #include "smu_ucode_xfer_vi.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
 #include "ppatomctrl.h"
 #include "cgs_common.h"
 #include "smu7_ppsmc.h"
 #include "smu7_smumgr.h"
+#include "smu7_common.h"
+
+#include "polaris10_pwrvirus.h"
 
 #define SMU7_SMC_SIZE 0x20000
 
@@ -540,6 +541,47 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
+{
+       int i;
+       uint32_t reg, data;
+
+       for (i = 0; i < size; i++) {
+               reg  = pvirus->reg;
+               data = pvirus->data;
+               if (reg != 0xffffffff)
+                       cgs_write_register(hwmgr->device, reg, data);
+               else
+                       break;
+               pvirus++;
+       }
+}
+
+static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
+{
+       int i;
+
+       cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
+       cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
+       cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
+       for (i = 0; i < section->dfy_size; i++)
+               cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
+}
+
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
+{
+       execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
+       execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
+       execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
+       execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
+       execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
+       execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
+       execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
+       execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
+
+       return 0;
+}
+
 int smu7_init(struct pp_hwmgr *hwmgr)
 {
        struct smu7_smumgr *smu_data;
index 0b63c5c1043cf97df5dd7d48b678c3b8633a0c0d..c87263bc0caa9a1b1f50a253d91ff916763693bb 100644 (file)
@@ -88,4 +88,6 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr);
 int smu7_init(struct pp_hwmgr *hwmgr);
 int smu7_smu_fini(struct pp_hwmgr *hwmgr);
 
-#endif
\ No newline at end of file
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
deleted file mode 100644 (file)
index 1f720cc..0000000
+++ /dev/null
@@ -1,3261 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- *
- */
-
-#include "pp_debug.h"
-#include "tonga_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "tonga_smumgr.h"
-#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-
-#include "smu72_discrete.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX    1
-#define VOLTAGE_VID_OFFSET_SCALE1   625
-#define VOLTAGE_VID_OFFSET_SCALE2   100
-#define MC_CG_ARB_FREQ_F1           0x0b
-#define VDDC_VDDCI_DELTA            200
-
-
-static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,  TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,        BAPM_TEMP_GRADIENT
- */
-       {1,               0xF,             0xFD,                0x19,
-        5,               45,                 0,              0xB0000,
-        {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
-               0xC9, 0xC9, 0x2F, 0x4D, 0x61},
-        {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
-               0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
-       },
-};
-
-/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
-       {600, 1050, 3, 0},
-       {600, 1050, 6, 1}
-};
-
-/* [FF, SS] type, [] 4 voltage ranges,
- * and [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
-       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
-       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
-};
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
-static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
-       {0, 1, 3, 2, 4, 5},
-       {0, 2, 4, 5, 6, 5}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- *  This is the unit expected by SMC firmware
- */
-
-
-static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
-       phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
-       uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
-       uint32_t i = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                          (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       /* clock - voltage dependency table is empty table */
-       if (allowed_clock_voltage_table->count == 0)
-               return -EINVAL;
-
-       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
-               /* find first sclk bigger than request */
-               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
-                       voltage->VddGfx = phm_get_voltage_index(
-                                       pptable_info->vddgfx_lookup_table,
-                               allowed_clock_voltage_table->entries[i].vddgfx);
-                       voltage->Vddc = phm_get_voltage_index(
-                                               pptable_info->vddc_lookup_table,
-                                 allowed_clock_voltage_table->entries[i].vddc);
-
-                       if (allowed_clock_voltage_table->entries[i].vddci)
-                               voltage->Vddci =
-                                       phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
-                       else
-                               voltage->Vddci =
-                                       phm_get_voltage_id(&data->vddci_voltage_table,
-                                               allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
-
-
-                       if (allowed_clock_voltage_table->entries[i].mvdd)
-                               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
-
-                       voltage->Phases = 1;
-                       return 0;
-               }
-       }
-
-       /* sclk is bigger than max sclk in the dependence table */
-       voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-               allowed_clock_voltage_table->entries[i-1].vddgfx);
-       voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
-               allowed_clock_voltage_table->entries[i-1].vddc);
-
-       if (allowed_clock_voltage_table->entries[i-1].vddci)
-               voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
-                       allowed_clock_voltage_table->entries[i-1].vddci);
-
-       if (allowed_clock_voltage_table->entries[i-1].mvdd)
-               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
-
-       return 0;
-}
-
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       unsigned int count;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-               table->VddcLevelCount = data->vddc_voltage_table.count;
-               for (count = 0; count < table->VddcLevelCount; count++) {
-                       table->VddcTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               }
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-       }
-       return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       unsigned int count;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-               table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
-               for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
-                       table->VddGfxTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               }
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
-       }
-       return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-
-       table->VddciLevelCount = data->vddci_voltage_table.count;
-       for (count = 0; count < table->VddciLevelCount; count++) {
-               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-                       table->VddciTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-                       table->SmioTable1.Pattern[count].Voltage =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
-                       table->SmioTable1.Pattern[count].Smio =
-                               (uint8_t) count;
-                       table->Smio[count] |=
-                               data->vddci_voltage_table.entries[count].smio_low;
-                       table->VddciTable[count] =
-                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
-               }
-       }
-
-       table->SmioMask1 = data->vddci_voltage_table.mask_low;
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
-       return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param    *hwmgr The address of the hardware manager.
- * @param    *table The SMC DPM table structure to be populated.
- * @return   0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t count;
-
-       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               table->MvddLevelCount = data->mvdd_voltage_table.count;
-               for (count = 0; count < table->MvddLevelCount; count++) {
-                       table->SmioTable2.Pattern[count].Voltage =
-                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
-                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
-                       table->SmioTable2.Pattern[count].Smio =
-                               (uint8_t) count;
-                       table->Smio[count] |=
-                               data->mvdd_voltage_table.entries[count].smio_low;
-               }
-               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
-       }
-
-       return 0;
-}
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       uint32_t count;
-       uint8_t index = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
-                                          pptable_info->vddgfx_lookup_table;
-       struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
-                                               pptable_info->vddc_lookup_table;
-
-       /* table is already swapped, so in order to use the value from it
-        * we need to swap it back.
-        */
-       uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
-       uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
-       for (count = 0; count < vddc_level_count; count++) {
-               /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
-               index = phm_get_voltage_index(vddc_lookup_table,
-                       data->vddc_voltage_table.entries[count].value);
-               table->BapmVddcVidLoSidd[count] =
-                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
-               table->BapmVddcVidHiSidd[count] =
-                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
-               table->BapmVddcVidHiSidd2[count] =
-                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
-       }
-
-       if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
-               /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
-               for (count = 0; count < vddgfx_level_count; count++) {
-                       index = phm_get_voltage_index(vddgfx_lookup_table,
-                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
-                       table->BapmVddGfxVidHiSidd2[count] =
-                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
-               }
-       } else {
-               for (count = 0; count < vddc_level_count; count++) {
-                       index = phm_get_voltage_index(vddc_lookup_table,
-                               data->vddc_voltage_table.entries[count].value);
-                       table->BapmVddGfxVidLoSidd[count] =
-                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
-                       table->BapmVddGfxVidHiSidd[count] =
-                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
-                       table->BapmVddGfxVidHiSidd2[count] =
-                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
-               }
-       }
-
-       return 0;
-}
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-
-static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
-       SMU72_Discrete_DpmTable *table)
-{
-       int result;
-
-       result = tonga_populate_smc_vddc_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "can not populate VDDC voltage table to SMC",
-                       return -EINVAL);
-
-       result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "can not populate VDDCI voltage table to SMC",
-                       return -EINVAL);
-
-       result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "can not populate VDDGFX voltage table to SMC",
-                       return -EINVAL);
-
-       result = tonga_populate_smc_mvdd_table(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "can not populate MVDD voltage table to SMC",
-                       return -EINVAL);
-
-       result = tonga_populate_cac_tables(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "can not populate CAC voltage tables to SMC",
-                       return -EINVAL);
-
-       return 0;
-}
-
-static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
-               struct SMU72_Discrete_Ulv *state)
-{
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       state->CcPwrDynRm = 0;
-       state->CcPwrDynRm1 = 0;
-
-       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
-       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
-                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
-       state->VddcPhase = 1;
-
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
-       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
-       return 0;
-}
-
-static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
-               struct SMU72_Discrete_DpmTable *table)
-{
-       return tonga_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t i;
-
-       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
-       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
-               table->LinkLevel[i].PcieGenSpeed  =
-                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
-               table->LinkLevel[i].PcieLaneCount =
-                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
-               table->LinkLevel[i].EnabledForActivity =
-                       1;
-               table->LinkLevel[i].SPC =
-                       (uint8_t)(data->pcie_spc_cap & 0xff);
-               table->LinkLevel[i].DownThreshold =
-                       PP_HOST_TO_SMC_UL(5);
-               table->LinkLevel[i].UpThreshold =
-                       PP_HOST_TO_SMC_UL(30);
-       }
-
-       smu_data->smc_state_table.LinkLevelCount =
-               (uint8_t)dpm_table->pcie_speed_table.count;
-       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
-               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
-       return 0;
-}
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       pp_atomctrl_clock_dividers_vi dividers;
-       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       uint32_t    reference_clock;
-       uint32_t reference_divider;
-       uint32_t fbdiv;
-       int result;
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
-       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
-       reference_clock = atomctrl_get_reference_clock(hwmgr);
-
-       reference_divider = 1 + dividers.uc_pll_ref_div;
-
-       /* low 14 bits is fraction and high 12 bits is divider*/
-       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
-       /* SPLL_FUNC_CNTL setup*/
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
-               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
-
-       /* SPLL_FUNC_CNTL_3 setup*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
-       /* set to use fractional accumulation*/
-       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
-               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
-               pp_atomctrl_internal_ss_info ss_info;
-
-               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
-               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
-                       /*
-                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
-                       * ss_info.speed_spectrum_rate -- in unit of khz
-                       */
-                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
-                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
-                       /* clkv = 2 * D * fbdiv / NS */
-                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
-                       cg_spll_spread_spectrum =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
-                       cg_spll_spread_spectrum =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
-                       cg_spll_spread_spectrum_2 =
-                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
-               }
-       }
-
-       sclk->SclkFrequency        = engine_clock;
-       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
-       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
-       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
-       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
-       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
-
-       return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    engine_clock the engine clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
-                                               uint32_t engine_clock,
-                               uint16_t sclk_activity_level_threshold,
-                               SMU72_Discrete_GraphicsLevel *graphic_level)
-{
-       int result;
-       uint32_t mvdd;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                           (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-       /* populate graphics levels*/
-       result = tonga_get_dependency_volt_by_clk(hwmgr,
-               pptable_info->vdd_dep_on_sclk, engine_clock,
-               &graphic_level->MinVoltage, &mvdd);
-       PP_ASSERT_WITH_CODE((!result),
-               "can not find VDDC voltage value for VDDC "
-               "engine clock dependency table", return result);
-
-       /* SCLK frequency in units of 10KHz*/
-       graphic_level->SclkFrequency = engine_clock;
-       /* Indicates maximum activity level for this performance level. 50% for now*/
-       graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
-       graphic_level->CcPwrDynRm = 0;
-       graphic_level->CcPwrDynRm1 = 0;
-       /* this level can be used if activity is high enough.*/
-       graphic_level->EnabledForActivity = 0;
-       /* this level can be used for throttling.*/
-       graphic_level->EnabledForThrottle = 1;
-       graphic_level->UpHyst = 0;
-       graphic_level->DownHyst = 0;
-       graphic_level->VoltageDownHyst = 0;
-       graphic_level->PowerThrottle = 0;
-
-       data->display_timing.min_clock_in_sr =
-                       hwmgr->display_config.min_core_set_clock_in_sr;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep))
-               graphic_level->DeepSleepDivId =
-                               smu7_get_sleep_divider_id_from_clock(engine_clock,
-                                               data->display_timing.min_clock_in_sr);
-
-       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
-       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       if (!result) {
-               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
-               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
-               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
-       }
-
-       return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param    hwmgr      the address of the hardware manager
- */
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
-       uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
-       uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-
-       uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
-                                               SMU72_MAX_LEVELS_GRAPHICS;
-
-       SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
-       uint32_t i, max_entry;
-       uint8_t highest_pcie_level_enabled = 0;
-       uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
-       uint8_t count = 0;
-       int result = 0;
-
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->sclk_table.count; i++) {
-               result = tonga_populate_single_graphic_level(hwmgr,
-                                       dpm_table->sclk_table.dpm_levels[i].value,
-                                       (uint16_t)smu_data->activity_target[i],
-                                       &(smu_data->smc_state_table.GraphicsLevel[i]));
-               if (result != 0)
-                       return result;
-
-               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
-               if (i > 1)
-                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-       }
-
-       /* Only enable level 0 for now. */
-       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
-       /* set highest level watermark to high */
-       if (dpm_table->sclk_table.count > 1)
-               smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
-                       PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       smu_data->smc_state_table.GraphicsDpmLevelCount =
-               (uint8_t)dpm_table->sclk_table.count;
-       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
-               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-       if (pcie_table != NULL) {
-               PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
-                       "There must be 1 or more PCIE levels defined in PPTable.",
-                       return -EINVAL);
-               max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
-               for (i = 0; i < dpm_table->sclk_table.count; i++) {
-                       smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
-                               (uint8_t) ((i < max_entry) ? i : max_entry);
-               }
-       } else {
-               if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
-                       pr_err("Pcie Dpm Enablemask is 0 !");
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                       (1<<(highest_pcie_level_enabled+1))) != 0)) {
-                       highest_pcie_level_enabled++;
-               }
-
-               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                       (1<<lowest_pcie_level_enabled)) == 0)) {
-                       lowest_pcie_level_enabled++;
-               }
-
-               while ((count < highest_pcie_level_enabled) &&
-                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
-                                       (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
-                       count++;
-               }
-               mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
-                       (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
-               /* set pcieDpmLevel to highest_pcie_level_enabled*/
-               for (i = 2; i < dpm_table->sclk_table.count; i++)
-                       smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to lowest_pcie_level_enabled*/
-               smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
-               /* set pcieDpmLevel to mid_pcie_level_enabled*/
-               smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-       }
-       /* level count will send to smc once at init smc table and never change*/
-       result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
-                               (uint8_t *)levels, (uint32_t)level_array_size,
-                                                               SMC_RAM_END);
-
-       return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    memory_clock the memory clock to use to populate the structure
- * @param    sclk        the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU72_Discrete_MemoryLevel *mclk,
-               bool strobe_mode,
-               bool dllStateOn
-               )
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
-       uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-       uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
-       uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
-       uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
-       uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
-       uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
-       uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
-       uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
-       pp_atomctrl_memory_clock_param mpll_param;
-       int result;
-
-       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
-                               memory_clock, &mpll_param, strobe_mode);
-       PP_ASSERT_WITH_CODE(
-                       !result,
-                       "Error retrieving Memory Clock Parameters from VBIOS.",
-                       return result);
-
-       /* MPLL_FUNC_CNTL setup*/
-       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
-                                       mpll_param.bw_ctrl);
-
-       /* MPLL_FUNC_CNTL_1 setup*/
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                       MPLL_FUNC_CNTL_1, CLKF,
-                                       mpll_param.mpll_fb_divider.cl_kf);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                       MPLL_FUNC_CNTL_1, CLKFRAC,
-                                       mpll_param.mpll_fb_divider.clk_frac);
-       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
-                                               MPLL_FUNC_CNTL_1, VCO_MODE,
-                                               mpll_param.vco_mode);
-
-       /* MPLL_AD_FUNC_CNTL setup*/
-       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
-                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
-                                       mpll_param.mpll_post_divider);
-
-       if (data->is_memory_gddr5) {
-               /* MPLL_DQ_FUNC_CNTL setup*/
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL,
-                                               mpll_param.yclk_sel);
-               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
-                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
-                                               mpll_param.mpll_post_divider);
-       }
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
-               /*
-                ************************************
-                Fref = Reference Frequency
-                NF = Feedback divider ratio
-                NR = Reference divider ratio
-                Fnom = Nominal VCO output frequency = Fref * NF / NR
-                Fs = Spreading Rate
-                D = Percentage down-spread / 2
-                Fint = Reference input frequency to PFD = Fref / NR
-                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
-                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
-                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
-                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
-                *************************************
-                */
-               pp_atomctrl_internal_ss_info ss_info;
-               uint32_t freq_nom;
-               uint32_t tmp;
-               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
-               /* for GDDR5 for all modes and DDR3 */
-               if (1 == mpll_param.qdr)
-                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
-               else
-                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
-               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
-               tmp = (freq_nom / reference_clock);
-               tmp = tmp * tmp;
-
-               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
-                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
-                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
-                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
-                       /*     = reference_clock * 5 / speed_spectrum_rate */
-                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
-                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
-                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
-                       uint32_t clkv =
-                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
-                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
-                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
-                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
-               }
-       }
-
-       /* MCLK_PWRMGT_CNTL setup */
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
-       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-       /* Save the result data to outpupt memory level structure */
-       mclk->MclkFrequency   = memory_clock;
-       mclk->MpllFuncCntl    = mpll_func_cntl;
-       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
-       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
-       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
-       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
-       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
-       mclk->DllCntl         = dll_cntl;
-       mclk->MpllSs1         = mpll_ss1;
-       mclk->MpllSs2         = mpll_ss2;
-
-       return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
-               bool strobe_mode)
-{
-       uint8_t mc_para_index;
-
-       if (strobe_mode) {
-               if (memory_clock < 12500)
-                       mc_para_index = 0x00;
-               else if (memory_clock > 47500)
-                       mc_para_index = 0x0f;
-               else
-                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
-       } else {
-               if (memory_clock < 65000)
-                       mc_para_index = 0x00;
-               else if (memory_clock > 135000)
-                       mc_para_index = 0x0f;
-               else
-                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
-       }
-
-       return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
-       uint8_t mc_para_index;
-
-       if (memory_clock < 10000)
-               mc_para_index = 0;
-       else if (memory_clock >= 80000)
-               mc_para_index = 0x0f;
-       else
-               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-
-       return mc_para_index;
-}
-
-
-static int tonga_populate_single_memory_level(
-               struct pp_hwmgr *hwmgr,
-               uint32_t memory_clock,
-               SMU72_Discrete_MemoryLevel *memory_level
-               )
-{
-       uint32_t mvdd = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       int result = 0;
-       bool dll_state_on;
-       struct cgs_display_info info = {0};
-       uint32_t mclk_edc_wr_enable_threshold = 40000;
-       uint32_t mclk_stutter_mode_threshold = 30000;
-       uint32_t mclk_edc_enable_threshold = 40000;
-       uint32_t mclk_strobe_mode_threshold = 40000;
-
-       if (NULL != pptable_info->vdd_dep_on_mclk) {
-               result = tonga_get_dependency_volt_by_clk(hwmgr,
-                               pptable_info->vdd_dep_on_mclk,
-                               memory_clock,
-                               &memory_level->MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE(
-                       !result,
-                       "can not find MinVddc voltage value from memory VDDC "
-                       "voltage dependency table",
-                       return result);
-       }
-
-       if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
-               memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
-       else
-               memory_level->MinMvdd = mvdd;
-
-       memory_level->EnabledForThrottle = 1;
-       memory_level->EnabledForActivity = 0;
-       memory_level->UpHyst = 0;
-       memory_level->DownHyst = 100;
-       memory_level->VoltageDownHyst = 0;
-
-       /* Indicates maximum activity level for this performance level.*/
-       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
-       memory_level->StutterEnable = 0;
-       memory_level->StrobeEnable = 0;
-       memory_level->EdcReadEnable = 0;
-       memory_level->EdcWriteEnable = 0;
-       memory_level->RttEnable = 0;
-
-       /* default set to low watermark. Highest level will be set to high later.*/
-       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
-       cgs_get_active_displays_info(hwmgr->device, &info);
-       data->display_timing.num_existing_displays = info.display_count;
-
-       if ((mclk_stutter_mode_threshold != 0) &&
-           (memory_clock <= mclk_stutter_mode_threshold) &&
-           (!data->is_uvd_enabled)
-           && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
-           && (data->display_timing.num_existing_displays <= 2)
-           && (data->display_timing.num_existing_displays != 0))
-               memory_level->StutterEnable = 1;
-
-       /* decide strobe mode*/
-       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
-               (memory_clock <= mclk_strobe_mode_threshold);
-
-       /* decide EDC mode and memory clock ratio*/
-       if (data->is_memory_gddr5) {
-               memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
-                                       memory_level->StrobeEnable);
-
-               if ((mclk_edc_enable_threshold != 0) &&
-                               (memory_clock > mclk_edc_enable_threshold)) {
-                       memory_level->EdcReadEnable = 1;
-               }
-
-               if ((mclk_edc_wr_enable_threshold != 0) &&
-                               (memory_clock > mclk_edc_wr_enable_threshold)) {
-                       memory_level->EdcWriteEnable = 1;
-               }
-
-               if (memory_level->StrobeEnable) {
-                       if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
-                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
-                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-                       } else {
-                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
-                       }
-
-               } else {
-                       dll_state_on = data->dll_default_on;
-               }
-       } else {
-               memory_level->StrobeRatio =
-                       tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
-               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
-       }
-
-       result = tonga_calculate_mclk_params(hwmgr,
-               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
-
-       if (!result) {
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
-               /* MCLK frequency in units of 10KHz*/
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
-               /* Indicates maximum activity level for this performance level.*/
-               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
-               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
-       }
-
-       return result;
-}
-
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data =
-                       (struct tonga_smumgr *)(hwmgr->smu_backend);
-       struct smu7_dpm_table *dpm_table = &data->dpm_table;
-       int result;
-
-       /* populate MCLK dpm table to SMU7 */
-       uint32_t level_array_address =
-                               smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
-       uint32_t level_array_size =
-                               sizeof(SMU72_Discrete_MemoryLevel) *
-                               SMU72_MAX_LEVELS_MEMORY;
-       SMU72_Discrete_MemoryLevel *levels =
-                               smu_data->smc_state_table.MemoryLevel;
-       uint32_t i;
-
-       memset(levels, 0x00, level_array_size);
-
-       for (i = 0; i < dpm_table->mclk_table.count; i++) {
-               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
-                       "can not populate memory level as memory clock is zero",
-                       return -EINVAL);
-               result = tonga_populate_single_memory_level(
-                               hwmgr,
-                               dpm_table->mclk_table.dpm_levels[i].value,
-                               &(smu_data->smc_state_table.MemoryLevel[i]));
-               if (result)
-                       return result;
-       }
-
-       /* Only enable level 0 for now.*/
-       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
-       /*
-       * in order to prevent MC activity from stutter mode to push DPM up.
-       * the UVD change complements this by putting the MCLK in a higher state
-       * by default such that we are not effected by up threshold or and MCLK DPM latency.
-       */
-       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
-       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
-       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-       /* set highest level watermark to high*/
-       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
-       /* level count will send to smc once at init smc table and never change*/
-       result = smu7_copy_bytes_to_smc(hwmgr,
-               level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
-               SMC_RAM_END);
-
-       return result;
-}
-
-static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
-                               uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i = 0;
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
-               /* find mvdd value which clock is more than request */
-               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
-                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
-                               /* Always round to higher voltage. */
-                               smio_pattern->Voltage =
-                                     data->mvdd_voltage_table.entries[i].value;
-                               break;
-                       }
-               }
-
-               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
-                       "MVDD Voltage is outside the supported range.",
-                       return -EINVAL);
-       } else {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-
-static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
-       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct pp_atomctrl_clock_dividers_vi dividers;
-
-       SMIO_Pattern voltage_level;
-       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
-       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
-       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
-       /* The ACPI state should not do DPM on DC (or ever).*/
-       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-       table->ACPILevel.MinVoltage =
-                       smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
-       /* assign zero for now*/
-       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
-       /* get the engine clock dividers for this clock value*/
-       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
-               table->ACPILevel.SclkFrequency,  &dividers);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error retrieving Engine Clock dividers from VBIOS.",
-               return result);
-
-       /* divider ID for required SCLK*/
-       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
-       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-       table->ACPILevel.DeepSleepDivId = 0;
-
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                                       SPLL_PWRON, 0);
-       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
-                                               SPLL_RESET, 1);
-       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
-                                               SCLK_MUX_SEL, 4);
-
-       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
-       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
-       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
-       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
-       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
-       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
-       table->ACPILevel.CcPwrDynRm = 0;
-       table->ACPILevel.CcPwrDynRm1 = 0;
-
-
-       /* For various features to be enabled/disabled while this level is active.*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
-       /* SCLK frequency in units of 10KHz*/
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
-       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
-       table->MemoryACPILevel.MinVoltage =
-                           smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
-
-       /*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
-       if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
-               table->MemoryACPILevel.MinMvdd =
-                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
-       else
-               table->MemoryACPILevel.MinMvdd = 0;
-
-       /* Force reset on DLL*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
-       /* Disable DLL in ACPIState*/
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
-       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
-               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
-       /* Enable DLL bypass signal*/
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK0_BYPASS, 0);
-       dll_cntl            = PHM_SET_FIELD(dll_cntl,
-               DLL_CNTL, MRDCK1_BYPASS, 0);
-
-       table->MemoryACPILevel.DllCntl            =
-               PP_HOST_TO_SMC_UL(dll_cntl);
-       table->MemoryACPILevel.MclkPwrmgtCntl     =
-               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
-       table->MemoryACPILevel.MpllAdFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
-       table->MemoryACPILevel.MpllDqFuncCntl     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl       =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
-       table->MemoryACPILevel.MpllFuncCntl_1     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
-       table->MemoryACPILevel.MpllFuncCntl_2     =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
-       table->MemoryACPILevel.MpllSs1            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
-       table->MemoryACPILevel.MpllSs2            =
-               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
-       table->MemoryACPILevel.EnabledForThrottle = 0;
-       table->MemoryACPILevel.EnabledForActivity = 0;
-       table->MemoryACPILevel.UpHyst = 0;
-       table->MemoryACPILevel.DownHyst = 100;
-       table->MemoryACPILevel.VoltageDownHyst = 0;
-       /* Indicates maximum activity level for this performance level.*/
-       table->MemoryACPILevel.ActivityLevel =
-                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
-       table->MemoryACPILevel.StutterEnable = 0;
-       table->MemoryACPILevel.StrobeEnable = 0;
-       table->MemoryACPILevel.EdcReadEnable = 0;
-       table->MemoryACPILevel.EdcWriteEnable = 0;
-       table->MemoryACPILevel.RttEnable = 0;
-
-       return result;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
-                                       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                                               pptable_info->mm_dep_table;
-
-       table->UvdLevelCount = (uint8_t) (mm_table->count);
-       table->UvdBootLevel = 0;
-
-       for (count = 0; count < table->UvdLevelCount; count++) {
-               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
-               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
-               table->UvdLevel[count].MinVoltage.Vddc =
-                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
-                                               mm_table->entries[count].vddc);
-               table->UvdLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
-                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                                               mm_table->entries[count].vddgfx) : 0;
-               table->UvdLevel[count].MinVoltage.Vddci =
-                       phm_get_voltage_id(&data->vddci_voltage_table,
-                                            mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               table->UvdLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(
-                                       hwmgr,
-                                       table->UvdLevel[count].VclkFrequency,
-                                       &dividers);
-
-               PP_ASSERT_WITH_CODE((!result),
-                                   "can not find divide id for Vclk clock",
-                                       return result);
-
-               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                                         table->UvdLevel[count].DclkFrequency, &dividers);
-               PP_ASSERT_WITH_CODE((!result),
-                                   "can not find divide id for Dclk clock",
-                                       return result);
-
-               table->UvdLevel[count].DclkDivider =
-                                       (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
-       }
-
-       return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                             (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                                                    pptable_info->mm_dep_table;
-
-       table->VceLevelCount = (uint8_t) (mm_table->count);
-       table->VceBootLevel = 0;
-
-       for (count = 0; count < table->VceLevelCount; count++) {
-               table->VceLevel[count].Frequency =
-                       mm_table->entries[count].eclk;
-               table->VceLevel[count].MinVoltage.Vddc =
-                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
-                               mm_table->entries[count].vddc);
-               table->VceLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
-                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->VceLevel[count].MinVoltage.Vddci =
-                       phm_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               table->VceLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                       table->VceLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((!result),
-                               "can not find divide id for VCE engine clock",
-                               return result);
-
-               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
-       }
-
-       return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                                                   pptable_info->mm_dep_table;
-
-       table->AcpLevelCount = (uint8_t) (mm_table->count);
-       table->AcpBootLevel = 0;
-
-       for (count = 0; count < table->AcpLevelCount; count++) {
-               table->AcpLevel[count].Frequency =
-                       pptable_info->mm_dep_table->entries[count].aclk;
-               table->AcpLevel[count].MinVoltage.Vddc =
-                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
-                       mm_table->entries[count].vddc);
-               table->AcpLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
-                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->AcpLevel[count].MinVoltage.Vddci =
-                       phm_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               table->AcpLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                       table->AcpLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((!result),
-                       "can not find divide id for engine clock", return result);
-
-               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
-       }
-
-       return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                                                   pptable_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t) (mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].Frequency =
-                       pptable_info->mm_dep_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage.Vddc =
-                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
-                               mm_table->entries[count].vddc);
-               table->SamuLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
-                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->SamuLevel[count].MinVoltage.Vddci =
-                       phm_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               table->SamuLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                       table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((!result),
-                       "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-       }
-
-       return result;
-}
-
-static int tonga_populate_memory_timing_parameters(
-               struct pp_hwmgr *hwmgr,
-               uint32_t engine_clock,
-               uint32_t memory_clock,
-               struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
-               )
-{
-       uint32_t dramTiming;
-       uint32_t dramTiming2;
-       uint32_t burstTime;
-       int result;
-
-       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
-                               engine_clock, memory_clock);
-
-       PP_ASSERT_WITH_CODE(result == 0,
-               "Error calling VBIOS to set DRAM_TIMING.", return result);
-
-       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
-       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
-       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
-       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
-       arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
-       return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- * This function is to be called from the SetPowerState table.
- */
-static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       int result = 0;
-       SMU72_Discrete_MCArbDramTimingTable  arb_regs;
-       uint32_t i, j;
-
-       memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
-       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
-               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
-                       result = tonga_populate_memory_timing_parameters
-                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
-                                data->dpm_table.mclk_table.dpm_levels[j].value,
-                                &arb_regs.entries[i][j]);
-
-                       if (result)
-                               break;
-               }
-       }
-
-       if (!result) {
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.arb_table_start,
-                               (uint8_t *)&arb_regs,
-                               sizeof(SMU72_Discrete_MCArbDramTimingTable),
-                               SMC_RAM_END
-                               );
-       }
-
-       return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       table->GraphicsBootLevel = 0;
-       table->MemoryBootLevel = 0;
-
-       /* find boot level from dpm table*/
-       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
-       data->vbios_boot_state.sclk_bootup_value,
-       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
-
-       if (result != 0) {
-               smu_data->smc_state_table.GraphicsBootLevel = 0;
-               pr_err("[powerplay] VBIOS did not find boot engine "
-                               "clock value in dependency table. "
-                               "Using Graphics DPM level 0 !");
-               result = 0;
-       }
-
-       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
-               data->vbios_boot_state.mclk_bootup_value,
-               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
-
-       if (result != 0) {
-               smu_data->smc_state_table.MemoryBootLevel = 0;
-               pr_err("[powerplay] VBIOS did not find boot "
-                               "engine clock value in dependency table."
-                               "Using Memory DPM level 0 !");
-               result = 0;
-       }
-
-       table->BootVoltage.Vddc =
-               phm_get_voltage_id(&(data->vddc_voltage_table),
-                       data->vbios_boot_state.vddc_bootup_value);
-       table->BootVoltage.VddGfx =
-               phm_get_voltage_id(&(data->vddgfx_voltage_table),
-                       data->vbios_boot_state.vddgfx_bootup_value);
-       table->BootVoltage.Vddci =
-               phm_get_voltage_id(&(data->vddci_voltage_table),
-                       data->vbios_boot_state.vddci_bootup_value);
-       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
-       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
-       return result;
-}
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
-       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-                       volt_with_cks, value;
-       uint16_t clock_freq_u16;
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-                       volt_offset = 0;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
-                       table_info->vdd_dep_on_sclk;
-       uint32_t hw_revision, dev_id;
-       struct cgs_system_info sys_info = {0};
-
-       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
-       sys_info.size = sizeof(struct cgs_system_info);
-
-       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
-       cgs_query_system_info(hwmgr->device, &sys_info);
-       hw_revision = (uint32_t)sys_info.value;
-
-       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
-       cgs_query_system_info(hwmgr->device, &sys_info);
-       dev_id = (uint32_t)sys_info.value;
-
-       /* Read SMU_Eefuse to read and calculate RO and determine
-        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
-        */
-       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (146 * 4));
-       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (148 * 4));
-       efuse &= 0xFF000000;
-       efuse = efuse >> 24;
-       efuse2 &= 0xF;
-
-       if (efuse2 == 1)
-               ro = (2300 - 1350) * efuse / 255 + 1350;
-       else
-               ro = (2500 - 1000) * efuse / 255 + 1000;
-
-       if (ro >= 1660)
-               type = 0;
-       else
-               type = 1;
-
-       /* Populate Stretch amount */
-       smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-
-       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
-       for (i = 0; i < sclk_table->count; i++) {
-               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
-                               sclk_table->entries[i].cks_enable << i;
-               if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
-                       volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
-                               (sclk_table->entries[i].clk/100) / 10000) * 1000 /
-                               (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
-                       volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
-                               (sclk_table->entries[i].clk/100) / 100000) * 1000 /
-                               (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
-               } else {
-                       volt_without_cks = (uint32_t)((14041 *
-                               (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-                               (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-                       volt_with_cks = (uint32_t)((13946 *
-                               (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-                               (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
-               }
-               if (volt_without_cks >= volt_with_cks)
-                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
-               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
-       }
-
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       STRETCH_ENABLE, 0x0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       staticEnable, 0x1);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x0);
-
-       /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_ClockStretcher);
-               PP_ASSERT_WITH_CODE(false,
-                               "Stretch Amount in PPTable not supported\n",
-                               return -EINVAL);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL);
-       value &= 0xFFC2FF87;
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-                       tonga_clock_stretcher_lookup_table[stretch_amount2][0];
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-                       tonga_clock_stretcher_lookup_table[stretch_amount2][1];
-       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
-                       GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
-                       SclkFrequency) / 100);
-       if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
-                       clock_freq_u16 &&
-           tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
-                       clock_freq_u16) {
-               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-               value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-               value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-               value |= (tonga_clock_stretch_amount_conversion
-                               [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
-                                [stretch_amount]) << 3;
-       }
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
-                       CKS_LOOKUPTableEntry[0].minFreq);
-       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
-                       CKS_LOOKUPTableEntry[0].maxFreq);
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-                       tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-                       (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL, value);
-
-       /* Populate DDT Lookup Table */
-       for (i = 0; i < 4; i++) {
-               /* Assign the minimum and maximum VID stored
-                * in the last row of Clock Stretcher Voltage Table.
-                */
-               smu_data->smc_state_table.ClockStretcherDataTable.
-               ClockStretcherDataTableEntry[i].minVID =
-                               (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
-               smu_data->smc_state_table.ClockStretcherDataTable.
-               ClockStretcherDataTableEntry[i].maxVID =
-                               (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
-               /* Loop through each SCLK and check the frequency
-                * to see if it lies within the frequency for clock stretcher.
-                */
-               for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
-                       cks_setting = 0;
-                       clock_freq = PP_SMC_TO_HOST_UL(
-                                       smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
-                       /* Check the allowed frequency against the sclk level[j].
-                        *  Sclk's endianness has already been converted,
-                        *  and it's in 10Khz unit,
-                        *  as opposed to Data table, which is in Mhz unit.
-                        */
-                       if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
-                               cks_setting |= 0x2;
-                               if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
-                                       cks_setting |= 0x1;
-                       }
-                       smu_data->smc_state_table.ClockStretcherDataTable.
-                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
-               }
-               CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
-                               ClockStretcherDataTable.
-                               ClockStretcherDataTableEntry[i].setting);
-       }
-
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixPWR_CKS_CNTL);
-       value &= 0xFFFFFFFE;
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                                       ixPWR_CKS_CNTL, value);
-
-       return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param    hwmgr      the address of the hardware manager
- * @param    table     the SMC DPM table structure to be populated
- * @return   always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
-                       SMU72_Discrete_DpmTable *table)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint16_t config;
-
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
-               /*  Splitted mode */
-               config = VR_SVI2_PLANE_1;
-               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
-               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-                       config = VR_SVI2_PLANE_2;
-                       table->VRConfig |= config;
-               } else {
-                       pr_err("VDDC and VDDGFX should "
-                               "be both on SVI2 control in splitted mode !\n");
-               }
-       } else {
-               /* Merged mode  */
-               config = VR_MERGED_WITH_VDDC;
-               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
-               /* Set Vddc Voltage Controller  */
-               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
-                       config = VR_SVI2_PLANE_1;
-                       table->VRConfig |= config;
-               } else {
-                       pr_err("VDDC should be on "
-                                       "SVI2 control in merged mode !\n");
-               }
-       }
-
-       /* Set Vddci Voltage Controller  */
-       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
-               config = VR_SVI2_PLANE_2;  /* only in merged mode */
-               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
-       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
-               config = VR_SMIO_PATTERN_1;
-               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
-       }
-
-       /* Set Mvdd Voltage Controller */
-       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               config = VR_SMIO_PATTERN_2;
-               table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
-       }
-
-       return 0;
-}
-
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t tmp;
-       int result;
-
-       /*
-       * This is a read-modify-write on the first byte of the ARB table.
-       * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
-       * is the field 'current'.
-       * This solution is ugly, but we never write the whole table only
-       * individual fields in it.
-       * In reality this field should not be in that structure
-       * but in a soft register.
-       */
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
-       if (result != 0)
-               return result;
-
-       tmp &= 0x00FFFFFF;
-       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
-       return smu7_write_smc_sram_dword(hwmgr,
-                       smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-
-static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-       SMU72_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
-       int  i, j, k;
-       const uint16_t *pdef1, *pdef2;
-
-       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
-                       (uint16_t)(cac_dtp_table->usTDP * 256));
-       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
-                       (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
-       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
-                       "Target Operating Temp is out of Range !",
-                       );
-
-       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
-       dpm_table->GpuTjHyst = 8;
-
-       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
-       dpm_table->BAPM_TEMP_GRADIENT =
-                               PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
-       pdef1 = defaults->bapmti_r;
-       pdef2 = defaults->bapmti_rc;
-
-       for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
-               for (j = 0; j < SMU72_DTE_SOURCES; j++) {
-                       for (k = 0; k < SMU72_DTE_SINKS; k++) {
-                               dpm_table->BAPMTI_R[i][j][k] =
-                                               PP_HOST_TO_SMC_US(*pdef1);
-                               dpm_table->BAPMTI_RC[i][j][k] =
-                                               PP_HOST_TO_SMC_US(*pdef2);
-                               pdef1++;
-                               pdef2++;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-
-       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
-       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
-       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
-       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
-       return 0;
-}
-
-static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
-       uint16_t tdc_limit;
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       /* TDC number of fraction bits are changed from 8 to 7
-        * for Fiji as requested by SMC team
-        */
-       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
-       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
-                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
-       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
-                       defaults->tdc_vddc_throttle_release_limit_perc;
-       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
-
-       return 0;
-}
-
-static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
-       struct tonga_smumgr *smu_data =
-                       (struct tonga_smumgr *)(hwmgr->smu_backend);
-       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-       uint32_t temp;
-
-       if (smu7_read_smc_sram_dword(hwmgr,
-                       fuse_table_offset +
-                       offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
-                       (uint32_t *)&temp, SMC_RAM_END))
-               PP_ASSERT_WITH_CODE(false,
-                               "Attempt to read PmFuses.DW6 "
-                               "(SviLoadLineEn) from SMC Failed !",
-                               return -EINVAL);
-       else
-               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
-
-       return 0;
-}
-
-static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
-       return 0;
-}
-
-static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-
-       if ((hwmgr->thermal_controller.advanceFanControlParameters.
-                       usFanOutputSensitivity & (1 << 15)) ||
-               (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
-               hwmgr->thermal_controller.advanceFanControlParameters.
-               usFanOutputSensitivity = hwmgr->thermal_controller.
-                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
-       smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
-                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
-                                       advanceFanControlParameters.usFanOutputSensitivity);
-       return 0;
-}
-
-static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
-       int i;
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-
-       /* Currently not used. Set all to zero. */
-       for (i = 0; i < 16; i++)
-               smu_data->power_tune_table.GnbLPML[i] = 0;
-
-       return 0;
-}
-
-static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
-       uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
-       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
-       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
-       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
-       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
-       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
-                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
-       return 0;
-}
-
-static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t pm_fuse_table_offset;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment)) {
-               if (smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, PmFuseTable),
-                               &pm_fuse_table_offset, SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                               "Attempt to get pm_fuse_table_offset Failed !",
-                               return -EINVAL);
-
-               /* DW6 */
-               if (tonga_populate_svi_load_line(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                               "Attempt to populate SviLoadLine Failed !",
-                               return -EINVAL);
-               /* DW7 */
-               if (tonga_populate_tdc_limit(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to populate TDCLimit Failed !",
-                                       return -EINVAL);
-               /* DW8 */
-               if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
-                       PP_ASSERT_WITH_CODE(false,
-                               "Attempt to populate TdcWaterfallCtl Failed !",
-                               return -EINVAL);
-
-               /* DW9-DW12 */
-               if (tonga_populate_temperature_scaler(hwmgr) != 0)
-                       PP_ASSERT_WITH_CODE(false,
-                               "Attempt to populate LPMLTemperatureScaler Failed !",
-                               return -EINVAL);
-
-               /* DW13-DW14 */
-               if (tonga_populate_fuzzy_fan(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                               "Attempt to populate Fuzzy Fan "
-                               "Control parameters Failed !",
-                               return -EINVAL);
-
-               /* DW15-DW18 */
-               if (tonga_populate_gnb_lpml(hwmgr))
-                       PP_ASSERT_WITH_CODE(false,
-                               "Attempt to populate GnbLPML Failed !",
-                               return -EINVAL);
-
-               /* DW20 */
-               if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
-                       PP_ASSERT_WITH_CODE(
-                               false,
-                               "Attempt to populate BapmVddCBaseLeakage "
-                               "Hi and Lo Sidd Failed !",
-                               return -EINVAL);
-
-               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
-                               (uint8_t *)&smu_data->power_tune_table,
-                               sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
-                       PP_ASSERT_WITH_CODE(false,
-                                       "Attempt to download PmFuseTable Failed !",
-                                       return -EINVAL);
-       }
-       return 0;
-}
-
-static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
-                                SMU72_Discrete_MCRegisters *mc_reg_table)
-{
-       const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
-
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
-               if (smu_data->mc_reg_table.validflag & 1<<j) {
-                       PP_ASSERT_WITH_CODE(
-                               i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
-                               "Index of mc_reg_table->address[] array "
-                               "out of boundary",
-                               return -EINVAL);
-                       mc_reg_table->address[i].s0 =
-                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
-                       mc_reg_table->address[i].s1 =
-                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
-                       i++;
-               }
-       }
-
-       mc_reg_table->last = (uint8_t)i;
-
-       return 0;
-}
-
-/*convert register values from driver to SMC format */
-static void tonga_convert_mc_registers(
-       const struct tonga_mc_reg_entry *entry,
-       SMU72_Discrete_MCRegisterSet *data,
-       uint32_t num_entries, uint32_t valid_flag)
-{
-       uint32_t i, j;
-
-       for (i = 0, j = 0; j < num_entries; j++) {
-               if (valid_flag & 1<<j) {
-                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
-                       i++;
-               }
-       }
-}
-
-static int tonga_convert_mc_reg_table_entry_to_smc(
-               struct pp_hwmgr *hwmgr,
-               const uint32_t memory_clock,
-               SMU72_Discrete_MCRegisterSet *mc_reg_table_data
-               )
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t i = 0;
-
-       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
-               if (memory_clock <=
-                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
-                       break;
-               }
-       }
-
-       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
-               --i;
-
-       tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
-                               mc_reg_table_data, smu_data->mc_reg_table.last,
-                               smu_data->mc_reg_table.validflag);
-
-       return 0;
-}
-
-static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_MCRegisters *mc_regs)
-{
-       int result = 0;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       int res;
-       uint32_t i;
-
-       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
-               res = tonga_convert_mc_reg_table_entry_to_smc(
-                               hwmgr,
-                               data->dpm_table.mclk_table.dpm_levels[i].value,
-                               &mc_regs->data[i]
-                               );
-
-               if (0 != res)
-                       result = res;
-       }
-
-       return result;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t address;
-       int32_t result;
-
-       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
-               return 0;
-
-
-       memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
-
-       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
-
-       if (result != 0)
-               return result;
-
-
-       address = smu_data->smu7_data.mc_reg_table_start +
-                       (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
-       return  smu7_copy_bytes_to_smc(
-                       hwmgr, address,
-                       (uint8_t *)&smu_data->mc_regs.data[0],
-                       sizeof(SMU72_Discrete_MCRegisterSet) *
-                       data->dpm_table.mclk_table.count,
-                       SMC_RAM_END);
-}
-
-static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-
-       memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
-       result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize MCRegTable for the MC register addresses !",
-               return result;);
-
-       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize MCRegTable for driver state !",
-               return result;);
-
-       return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
-                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
-}
-
-static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       struct  phm_ppt_v1_information *table_info =
-                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (table_info &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
-                       table_info->cac_dtp_table->usPowerTuneDataSetID)
-               smu_data->power_tune_defaults =
-                               &tonga_power_tune_data_set_array
-                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
-       else
-               smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
-}
-
-static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       struct SMU72_Discrete_GraphicsLevel *levels =
-                               data->smc_state_table.GraphicsLevel;
-       unsigned min_level = 1;
-
-       hwmgr->default_gfx_power_profile.activity_threshold =
-                       be16_to_cpu(levels[0].ActivityLevel);
-       hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
-       hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
-       hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
-       hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
-       hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
-       /* Workaround compute SDMA instability: disable lowest SCLK
-        * DPM level. Optimize compute power profile: Use only highest
-        * 2 power levels (if more than 2 are available), Hysteresis:
-        * 0ms up, 5ms down
-        */
-       if (data->smc_state_table.GraphicsDpmLevelCount > 2)
-               min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
-       else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
-               min_level = 1;
-       else
-               min_level = 0;
-       hwmgr->default_compute_power_profile.min_sclk =
-                       be32_to_cpu(levels[min_level].SclkFrequency);
-       hwmgr->default_compute_power_profile.up_hyst = 0;
-       hwmgr->default_compute_power_profile.down_hyst = 5;
-
-       hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
-       hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    pInput  the pointer to input data (PowerState)
- * @return   always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data =
-                       (struct tonga_smumgr *)(hwmgr->smu_backend);
-       SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       uint8_t i;
-       pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
-
-       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
-
-       tonga_initialize_power_tune_defaults(hwmgr);
-
-       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
-               tonga_populate_smc_voltage_tables(hwmgr, table);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StepVddc))
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
-       if (data->is_memory_gddr5)
-               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
-       i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
-       if (i == 1 || i == 0)
-               table->SystemFlags |= 0x40;
-
-       if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
-               result = tonga_populate_ulv_state(hwmgr, table);
-               PP_ASSERT_WITH_CODE(!result,
-                       "Failed to initialize ULV state !",
-                       return result;);
-
-               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixCG_ULV_PARAMETER, 0x40035);
-       }
-
-       result = tonga_populate_smc_link_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize Link Level !", return result);
-
-       result = tonga_populate_all_graphic_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize Graphics Level !", return result);
-
-       result = tonga_populate_all_memory_levels(hwmgr);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize Memory Level !", return result);
-
-       result = tonga_populate_smc_acpi_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize ACPI Level !", return result);
-
-       result = tonga_populate_smc_vce_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize VCE Level !", return result);
-
-       result = tonga_populate_smc_acp_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize ACP Level !", return result);
-
-       result = tonga_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize SAMU Level !", return result);
-
-       /* Since only the initial state is completely set up at this
-       * point (the other states are just copies of the boot state) we only
-       * need to populate the  ARB settings for the initial state.
-       */
-       result = tonga_program_memory_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to Write ARB settings for the initial state.",
-               return result;);
-
-       result = tonga_populate_smc_uvd_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize UVD Level !", return result);
-
-       result = tonga_populate_smc_boot_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize Boot Level !", return result);
-
-       tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to populate BAPM Parameters !", return result);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ClockStretcher)) {
-               result = tonga_populate_clock_stretcher_data_table(hwmgr);
-               PP_ASSERT_WITH_CODE(!result,
-                       "Failed to populate Clock Stretcher Data Table !",
-                       return result;);
-       }
-       table->GraphicsVoltageChangeEnable  = 1;
-       table->GraphicsThermThrottleEnable  = 1;
-       table->GraphicsInterval = 1;
-       table->VoltageInterval  = 1;
-       table->ThermalInterval  = 1;
-       table->TemperatureLimitHigh =
-               table_info->cac_dtp_table->usTargetOperatingTemp *
-               SMU7_Q88_FORMAT_CONVERSION_UNIT;
-       table->TemperatureLimitLow =
-               (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
-               SMU7_Q88_FORMAT_CONVERSION_UNIT;
-       table->MemoryVoltageChangeEnable  = 1;
-       table->MemoryInterval  = 1;
-       table->VoltageResponseTime  = 0;
-       table->PhaseResponseTime  = 0;
-       table->MemoryThermThrottleEnable  = 1;
-
-       /*
-       * Cail reads current link status and reports it as cap (we cannot
-       * change this due to some previous issues we had)
-       * SMC drops the link status to lowest level after enabling
-       * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
-       * but this time Cail reads current link status which was set to low by
-       * SMC and reports it as cap to powerplay
-       * To avoid it, we set PCIeBootLinkLevel to highest dpm level
-       */
-       PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
-                       "There must be 1 or more PCIE levels defined in PPTable.",
-                       return -EINVAL);
-
-       table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
-       table->PCIeGenInterval  = 1;
-
-       result = tonga_populate_vr_config(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to populate VRConfig setting !", return result);
-
-       table->ThermGpio  = 17;
-       table->SclkStepSize = 0x4000;
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
-                                               &gpio_pin_assignment)) {
-               table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot);
-       } else {
-               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot);
-       }
-
-       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
-                                               &gpio_pin_assignment)) {
-               table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition);
-       } else {
-               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition);
-       }
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_Falcon_QuickTransition);
-
-       if (0) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_AutomaticDCTransition);
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_Falcon_QuickTransition);
-       }
-
-       if (atomctrl_get_pp_assign_pin(hwmgr,
-                       THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
-               table->ThermOutPolarity =
-                       (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
-                       (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
-
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
-               /* if required, combine VRHot/PCC with thermal out GPIO*/
-               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_RegulatorHot) &&
-                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_CombinePCCWithThermalSignal)){
-                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
-               }
-       } else {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ThermalOutGPIO);
-
-               table->ThermOutGpio = 17;
-               table->ThermOutPolarity = 1;
-               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
-       }
-
-       for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
-               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
-       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
-       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
-       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
-       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
-       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
-       result = smu7_copy_bytes_to_smc(
-                       hwmgr,
-                       smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
-                       (uint8_t *)&(table->SystemFlags),
-                       sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
-                       SMC_RAM_END);
-
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to upload dpm data to SMC memory !", return result;);
-
-       result = tonga_init_arb_table_index(hwmgr);
-       PP_ASSERT_WITH_CODE(!result,
-                       "Failed to upload arb data to SMC memory !", return result);
-
-       tonga_populate_pm_fuses(hwmgr);
-       PP_ASSERT_WITH_CODE((!result),
-               "Failed to populate initialize pm fuses !", return result);
-
-       result = tonga_populate_initial_mc_reg_table(hwmgr);
-       PP_ASSERT_WITH_CODE((!result),
-               "Failed to populate initialize MC Reg table !", return result);
-
-       tonga_save_default_power_profile(hwmgr);
-
-       return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param    hwmgr  the address of the powerplay hardware manager.
-* @param    pInput the pointer to input data
-* @param    pOutput the pointer to output data
-* @param    pStorage the pointer to temporary storage
-* @param    Result the last failure code
-* @return   result from set temperature range routine
-*/
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                       (struct tonga_smumgr *)(hwmgr->smu_backend);
-       SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
-       uint32_t duty100;
-       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
-       uint16_t fdo_min, slope1, slope2;
-       uint32_t reference_clock;
-       int res;
-       uint64_t tmp64;
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_MicrocodeFanControl))
-               return 0;
-
-       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       if (0 == smu_data->smu7_data.fan_table_start) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
-                                               CGS_IND_REG__SMC,
-                                               CG_FDO_CTRL1, FMAX_DUTY100);
-
-       if (0 == duty100) {
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_MicrocodeFanControl);
-               return 0;
-       }
-
-       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
-       do_div(tmp64, 10000);
-       fdo_min = (uint16_t)tmp64;
-
-       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
-                  hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
-       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
-                 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
-       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
-                   hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
-       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
-                   hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
-       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
-       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
-       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
-       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
-       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
-       fan_table.Slope1 = cpu_to_be16(slope1);
-       fan_table.Slope2 = cpu_to_be16(slope2);
-
-       fan_table.FdoMin = cpu_to_be16(fdo_min);
-
-       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
-       fan_table.HystUp = cpu_to_be16(1);
-
-       fan_table.HystSlope = cpu_to_be16(1);
-
-       fan_table.TempRespLim = cpu_to_be16(5);
-
-       reference_clock = smu7_get_xclk(hwmgr);
-
-       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
-       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
-       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
-       fan_table.FanControl_GL_Flag = 1;
-
-       res = smu7_copy_bytes_to_smc(hwmgr,
-                                       smu_data->smu7_data.fan_table_start,
-                                       (uint8_t *)&fan_table,
-                                       (uint32_t)sizeof(fan_table),
-                                       SMC_RAM_END);
-
-       return 0;
-}
-
-
-static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->need_update_smu7_dpm_table &
-               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
-               return tonga_program_memory_timing_parameters(hwmgr);
-
-       return 0;
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data =
-                       (struct tonga_smumgr *)(hwmgr->smu_backend);
-
-       int result = 0;
-       uint32_t low_sclk_interrupt_threshold = 0;
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkThrottleLowNotification)
-               && (hwmgr->gfx_arbiter.sclk_threshold !=
-                               data->low_sclk_interrupt_threshold)) {
-               data->low_sclk_interrupt_threshold =
-                               hwmgr->gfx_arbiter.sclk_threshold;
-               low_sclk_interrupt_threshold =
-                               data->low_sclk_interrupt_threshold;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
-               result = smu7_copy_bytes_to_smc(
-                               hwmgr,
-                               smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable,
-                                       LowSclkInterruptThreshold),
-                               (uint8_t *)&low_sclk_interrupt_threshold,
-                               sizeof(uint32_t),
-                               SMC_RAM_END);
-       }
-
-       result = tonga_update_and_upload_mc_reg_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((!result),
-                               "Failed to upload MC reg table !",
-                               return result);
-
-       result = tonga_program_mem_timing_parameters(hwmgr);
-       PP_ASSERT_WITH_CODE((result == 0),
-                       "Failed to program memory timing parameters !",
-                       );
-
-       return result;
-}
-
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
-{
-       switch (type) {
-       case SMU_SoftRegisters:
-               switch (member) {
-               case HandshakeDisables:
-                       return offsetof(SMU72_SoftRegisters, HandshakeDisables);
-               case VoltageChangeTimeout:
-                       return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
-               case AverageGraphicsActivity:
-                       return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
-               case PreVBlankGap:
-                       return offsetof(SMU72_SoftRegisters, PreVBlankGap);
-               case VBlankTimeout:
-                       return offsetof(SMU72_SoftRegisters, VBlankTimeout);
-               case UcodeLoadStatus:
-                       return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
-               }
-       case SMU_Discrete_DpmTable:
-               switch (member) {
-               case UvdBootLevel:
-                       return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
-               case VceBootLevel:
-                       return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-               case LowSclkInterruptThreshold:
-                       return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
-               }
-       }
-       pr_warn("can't get the offset of type %x member %x\n", type, member);
-       return 0;
-}
-
-uint32_t tonga_get_mac_definition(uint32_t value)
-{
-       switch (value) {
-       case SMU_MAX_LEVELS_GRAPHICS:
-               return SMU72_MAX_LEVELS_GRAPHICS;
-       case SMU_MAX_LEVELS_MEMORY:
-               return SMU72_MAX_LEVELS_MEMORY;
-       case SMU_MAX_LEVELS_LINK:
-               return SMU72_MAX_LEVELS_LINK;
-       case SMU_MAX_ENTRIES_SMIO:
-               return SMU72_MAX_ENTRIES_SMIO;
-       case SMU_MAX_LEVELS_VDDC:
-               return SMU72_MAX_LEVELS_VDDC;
-       case SMU_MAX_LEVELS_VDDGFX:
-               return SMU72_MAX_LEVELS_VDDGFX;
-       case SMU_MAX_LEVELS_VDDCI:
-               return SMU72_MAX_LEVELS_VDDCI;
-       case SMU_MAX_LEVELS_MVDD:
-               return SMU72_MAX_LEVELS_MVDD;
-       }
-       pr_warn("can't get the mac value %x\n", value);
-
-       return 0;
-}
-
-
-static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       smu_data->smc_state_table.UvdBootLevel = 0;
-       if (table_info->mm_dep_table->count > 0)
-               smu_data->smc_state_table.UvdBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0x00FFFFFF;
-       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
-       cgs_write_ind_register(hwmgr->device,
-                               CGS_IND_REG__SMC,
-                               mm_boot_level_offset, mm_boot_level_value);
-
-       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_UVDDPM) ||
-               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_UVDDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
-       return 0;
-}
-
-static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data =
-                               (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-
-       smu_data->smc_state_table.VceBootLevel =
-               (uint8_t) (table_info->mm_dep_table->count - 1);
-
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                                       offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFF00FFFF;
-       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_VCEDPM_SetEnabledMask,
-                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
-       return 0;
-}
-
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
-       switch (type) {
-       case SMU_UVD_TABLE:
-               tonga_update_uvd_smc_table(hwmgr);
-               break;
-       case SMU_VCE_TABLE:
-               tonga_update_vce_smc_table(hwmgr);
-               break;
-       case SMU_SAMU_TABLE:
-               tonga_update_samu_smc_table(hwmgr);
-               break;
-       default:
-               break;
-       }
-       return 0;
-}
-
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @return   always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-
-       uint32_t tmp;
-       int result;
-       bool error = false;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, DpmTable),
-                               &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.dpm_table_start = tmp;
-
-       error |= (result != 0);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, SoftRegisters),
-                               &tmp, SMC_RAM_END);
-
-       if (!result) {
-               data->soft_regs_start = tmp;
-               smu_data->smu7_data.soft_regs_start = tmp;
-       }
-
-       error |= (result != 0);
-
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, mcRegisterTable),
-                               &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.mc_reg_table_start = tmp;
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, FanTable),
-                               &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.fan_table_start = tmp;
-
-       error |= (result != 0);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
-                               &tmp, SMC_RAM_END);
-
-       if (!result)
-               smu_data->smu7_data.arb_table_start = tmp;
-
-       error |= (result != 0);
-
-       result = smu7_read_smc_sram_dword(hwmgr,
-                               SMU72_FIRMWARE_HEADER_LOCATION +
-                               offsetof(SMU72_Firmware_Header, Version),
-                               &tmp, SMC_RAM_END);
-
-       if (!result)
-               hwmgr->microcode_version_info.SMC = tmp;
-
-       error |= (result != 0);
-
-       return error ? 1 : 0;
-}
-
-/*---------------------------MC----------------------------*/
-
-static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
-       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
-{
-       bool result = true;
-
-       switch (in_reg) {
-       case  mmMC_SEQ_RAS_TIMING:
-               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
-               break;
-
-       case  mmMC_SEQ_DLL_STBY:
-               *out_reg = mmMC_SEQ_DLL_STBY_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD0:
-               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CMD1:
-               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
-               break;
-
-       case  mmMC_SEQ_G5PDX_CTRL:
-               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
-               break;
-
-       case mmMC_SEQ_CAS_TIMING:
-               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING:
-               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
-               break;
-
-       case mmMC_SEQ_MISC_TIMING2:
-               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CMD:
-               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
-               break;
-
-       case mmMC_SEQ_PMG_DVS_CTL:
-               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D0:
-               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_RD_CTL_D1:
-               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D0:
-               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_D1:
-               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
-               break;
-
-       case mmMC_PMG_CMD_EMRS:
-               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS1:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
-               break;
-
-       case mmMC_SEQ_PMG_TIMING:
-               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
-               break;
-
-       case mmMC_PMG_CMD_MRS2:
-               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
-               break;
-
-       case mmMC_SEQ_WR_CTL_2:
-               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
-               break;
-
-       default:
-               result = false;
-               break;
-       }
-
-       return result;
-}
-
-static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
-{
-       uint32_t i;
-       uint16_t address;
-
-       for (i = 0; i < table->last; i++) {
-               table->mc_reg_address[i].s0 =
-                       tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
-                                                       &address) ?
-                                                       address :
-                                                table->mc_reg_address[i].s1;
-       }
-       return 0;
-}
-
-static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
-                                       struct tonga_mc_reg_table *ni_table)
-{
-       uint8_t i, j;
-
-       PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-               "Invalid VramInfo table.", return -EINVAL);
-       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
-               "Invalid VramInfo table.", return -EINVAL);
-
-       for (i = 0; i < table->last; i++)
-               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
-       ni_table->last = table->last;
-
-       for (i = 0; i < table->num_entries; i++) {
-               ni_table->mc_reg_table_entry[i].mclk_max =
-                       table->mc_reg_table_entry[i].mclk_max;
-               for (j = 0; j < table->last; j++) {
-                       ni_table->mc_reg_table_entry[i].mc_data[j] =
-                               table->mc_reg_table_entry[i].mc_data[j];
-               }
-       }
-
-       ni_table->num_entries = table->num_entries;
-
-       return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1.   when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
- *      mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
- *      mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2.   when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
- *      mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3.   need to set these data for each clock range
- * @param    hwmgr the address of the powerplay hardware manager.
- * @param    table the address of MCRegTable
- * @return   always 0
- */
-static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
-                                       struct tonga_mc_reg_table *table)
-{
-       uint8_t i, j, k;
-       uint32_t temp_reg;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       for (i = 0, j = table->last; i < table->last; i++) {
-               PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                       "Invalid VramInfo table.", return -EINVAL);
-
-               switch (table->mc_reg_address[i].s1) {
-
-               case mmMC_SEQ_MISC1:
-                       temp_reg = cgs_read_register(hwmgr->device,
-                                                       mmMC_PMG_CMD_EMRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       ((temp_reg & 0xffff0000)) |
-                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
-                               if (!data->is_memory_gddr5)
-                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-
-                       if (!data->is_memory_gddr5) {
-                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
-                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
-                               for (k = 0; k < table->num_entries; k++)
-                                       table->mc_reg_table_entry[k].mc_data[j] =
-                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
-                               j++;
-                               PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                                       "Invalid VramInfo table.", return -EINVAL);
-                       }
-
-                       break;
-
-               case mmMC_SEQ_RESERVE_M:
-                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
-                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
-                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
-                       for (k = 0; k < table->num_entries; k++) {
-                               table->mc_reg_table_entry[k].mc_data[j] =
-                                       (temp_reg & 0xffff0000) |
-                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-                       }
-                       j++;
-                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
-                               "Invalid VramInfo table.", return -EINVAL);
-                       break;
-
-               default:
-                       break;
-               }
-
-       }
-
-       table->last = j;
-
-       return 0;
-}
-
-static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
-{
-       uint8_t i, j;
-
-       for (i = 0; i < table->last; i++) {
-               for (j = 1; j < table->num_entries; j++) {
-                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
-                               table->mc_reg_table_entry[j].mc_data[i]) {
-                               table->validflag |= (1<<i);
-                               break;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-       int result;
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       pp_atomctrl_mc_reg_table *table;
-       struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
-       uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
-       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
-       if (table == NULL)
-               return -ENOMEM;
-
-       /* Program additional LP registers that are no longer programmed by VBIOS */
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
-                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
-                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
-                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
-       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
-                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
-       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
-       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
-       if (!result)
-               result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
-       if (!result) {
-               tonga_set_s0_mc_reg_index(ni_table);
-               result = tonga_set_mc_special_registers(hwmgr, ni_table);
-       }
-
-       if (!result)
-               tonga_set_valid_flag(ni_table);
-
-       kfree(table);
-
-       return result;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
-       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
-                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
-                       ? true : false;
-}
-
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-               struct amd_pp_profile *request)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
-                       (hwmgr->smu_backend);
-       struct SMU72_Discrete_GraphicsLevel *levels =
-                       smu_data->smc_state_table.GraphicsLevel;
-       uint32_t array = smu_data->smu7_data.dpm_table_start +
-                       offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-       uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
-                       SMU72_MAX_LEVELS_GRAPHICS;
-       uint32_t i;
-
-       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
-               levels[i].ActivityLevel =
-                               cpu_to_be16(request->activity_threshold);
-               levels[i].EnabledForActivity = 1;
-               levels[i].UpHyst = request->up_hyst;
-               levels[i].DownHyst = request->down_hyst;
-       }
-
-       return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
-                               array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
deleted file mode 100644 (file)
index 9d6a78a..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _TONGA_SMC_H
-#define _TONGA_SMC_H
-
-#include "smumgr.h"
-#include "smu72.h"
-
-
-#define ASICID_IS_TONGA_P(wDID, bRID)   \
-       (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
-       || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
-
-
-struct tonga_pt_defaults {
-       uint8_t   svi_load_line_en;
-       uint8_t   svi_load_line_vddC;
-       uint8_t   tdc_vddc_throttle_release_limit_perc;
-       uint8_t   tdc_mawt;
-       uint8_t   tdc_waterfall_ctl;
-       uint8_t   dte_ambient_temp_base;
-       uint32_t  display_cac;
-       uint32_t  bapm_temp_gradient;
-       uint16_t  bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-       uint16_t  bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-};
-
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
-uint32_t tonga_get_mac_definition(uint32_t value);
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
-               struct amd_pp_profile *request);
-#endif
-
index d22cf218cf187c25ff10842eee0ad99c6e361c15..0a8e48bff219e2f3187e1958fb0c78b217d32d36 100644 (file)
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
 #include "cgs_common.h"
-#include "tonga_smc.h"
 #include "smu7_smumgr.h"
 
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+
+#include "atombios.h"
+
+#include "pppcielanes.h"
+#include "pp_endian.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX    1
+#define VOLTAGE_VID_OFFSET_SCALE1   625
+#define VOLTAGE_VID_OFFSET_SCALE2   100
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define VDDC_VDDCI_DELTA            200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,  TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,        BAPM_TEMP_GRADIENT
+ */
+       {1,               0xF,             0xFD,                0x19,
+        5,               45,                 0,              0xB0000,
+        {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+               0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+        {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+               0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+       },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+       {600, 1050, 3, 0},
+       {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+       { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+       { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+       {0, 1, 3, 2, 4, 5},
+       {0, 2, 4, 5, 6, 5}
+};
 
 static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
 {
@@ -95,7 +155,6 @@ static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-
 static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
@@ -160,13 +219,6 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
        return result;
 }
 
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    smcAddress the address in the SMC RAM to access.
- * @param    value to write to the SMC SRAM.
- */
 static int tonga_smu_init(struct pp_hwmgr *hwmgr)
 {
        struct tonga_smumgr *tonga_priv = NULL;
@@ -187,6 +239,3053 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+
+static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+       phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+       uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+       uint32_t i = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                          (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       /* clock - voltage dependency table is empty table */
+       if (allowed_clock_voltage_table->count == 0)
+               return -EINVAL;
+
+       for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+               /* find first sclk bigger than request */
+               if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+                       voltage->VddGfx = phm_get_voltage_index(
+                                       pptable_info->vddgfx_lookup_table,
+                               allowed_clock_voltage_table->entries[i].vddgfx);
+                       voltage->Vddc = phm_get_voltage_index(
+                                               pptable_info->vddc_lookup_table,
+                                 allowed_clock_voltage_table->entries[i].vddc);
+
+                       if (allowed_clock_voltage_table->entries[i].vddci)
+                               voltage->Vddci =
+                                       phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+                       else
+                               voltage->Vddci =
+                                       phm_get_voltage_id(&data->vddci_voltage_table,
+                                               allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+                       if (allowed_clock_voltage_table->entries[i].mvdd)
+                               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+                       voltage->Phases = 1;
+                       return 0;
+               }
+       }
+
+       /* sclk is bigger than max sclk in the dependence table */
+       voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+               allowed_clock_voltage_table->entries[i-1].vddgfx);
+       voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+               allowed_clock_voltage_table->entries[i-1].vddc);
+
+       if (allowed_clock_voltage_table->entries[i-1].vddci)
+               voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+                       allowed_clock_voltage_table->entries[i-1].vddci);
+
+       if (allowed_clock_voltage_table->entries[i-1].mvdd)
+               *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+       return 0;
+}
+
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+               table->VddcLevelCount = data->vddc_voltage_table.count;
+               for (count = 0; count < table->VddcLevelCount; count++) {
+                       table->VddcTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               }
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+       }
+       return 0;
+}
+
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       unsigned int count;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+               table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+               for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+                       table->VddGfxTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               }
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+       }
+       return 0;
+}
+
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+
+       table->VddciLevelCount = data->vddci_voltage_table.count;
+       for (count = 0; count < table->VddciLevelCount; count++) {
+               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+                       table->VddciTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+                       table->SmioTable1.Pattern[count].Voltage =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+                       table->SmioTable1.Pattern[count].Smio =
+                               (uint8_t) count;
+                       table->Smio[count] |=
+                               data->vddci_voltage_table.entries[count].smio_low;
+                       table->VddciTable[count] =
+                               PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+               }
+       }
+
+       table->SmioMask1 = data->vddci_voltage_table.mask_low;
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+       return 0;
+}
+
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t count;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               table->MvddLevelCount = data->mvdd_voltage_table.count;
+               for (count = 0; count < table->MvddLevelCount; count++) {
+                       table->SmioTable2.Pattern[count].Voltage =
+                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                       /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+                       table->SmioTable2.Pattern[count].Smio =
+                               (uint8_t) count;
+                       table->Smio[count] |=
+                               data->mvdd_voltage_table.entries[count].smio_low;
+               }
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+       }
+
+       return 0;
+}
+
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       uint32_t count;
+       uint8_t index = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+                                          pptable_info->vddgfx_lookup_table;
+       struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+                                               pptable_info->vddc_lookup_table;
+
+       /* table is already swapped, so in order to use the value from it
+        * we need to swap it back.
+        */
+       uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+       uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+       for (count = 0; count < vddc_level_count; count++) {
+               /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+               index = phm_get_voltage_index(vddc_lookup_table,
+                       data->vddc_voltage_table.entries[count].value);
+               table->BapmVddcVidLoSidd[count] =
+                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+               table->BapmVddcVidHiSidd[count] =
+                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+               table->BapmVddcVidHiSidd2[count] =
+                       convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+       }
+
+       if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+               /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+               for (count = 0; count < vddgfx_level_count; count++) {
+                       index = phm_get_voltage_index(vddgfx_lookup_table,
+                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+                       table->BapmVddGfxVidHiSidd2[count] =
+                               convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+               }
+       } else {
+               for (count = 0; count < vddc_level_count; count++) {
+                       index = phm_get_voltage_index(vddc_lookup_table,
+                               data->vddc_voltage_table.entries[count].value);
+                       table->BapmVddGfxVidLoSidd[count] =
+                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+                       table->BapmVddGfxVidHiSidd[count] =
+                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+                       table->BapmVddGfxVidHiSidd2[count] =
+                               convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+               }
+       }
+
+       return 0;
+}
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+       SMU72_Discrete_DpmTable *table)
+{
+       int result;
+
+       result = tonga_populate_smc_vddc_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate VDDC voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate VDDCI voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate VDDGFX voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_smc_mvdd_table(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate MVDD voltage table to SMC",
+                       return -EINVAL);
+
+       result = tonga_populate_cac_tables(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+                       "can not populate CAC voltage tables to SMC",
+                       return -EINVAL);
+
+       return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+               struct SMU72_Discrete_Ulv *state)
+{
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       state->CcPwrDynRm = 0;
+       state->CcPwrDynRm1 = 0;
+
+       state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+       state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+                       VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+       state->VddcPhase = 1;
+
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+       CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+       return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+               struct SMU72_Discrete_DpmTable *table)
+{
+       return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t i;
+
+       /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+       for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+               table->LinkLevel[i].PcieGenSpeed  =
+                       (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+               table->LinkLevel[i].PcieLaneCount =
+                       (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+               table->LinkLevel[i].EnabledForActivity =
+                       1;
+               table->LinkLevel[i].SPC =
+                       (uint8_t)(data->pcie_spc_cap & 0xff);
+               table->LinkLevel[i].DownThreshold =
+                       PP_HOST_TO_SMC_UL(5);
+               table->LinkLevel[i].UpThreshold =
+                       PP_HOST_TO_SMC_UL(30);
+       }
+
+       smu_data->smc_state_table.LinkLevelCount =
+               (uint8_t)dpm_table->pcie_speed_table.count;
+       data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+       return 0;
+}
+
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       pp_atomctrl_clock_dividers_vi dividers;
+       uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       uint32_t    reference_clock;
+       uint32_t reference_divider;
+       uint32_t fbdiv;
+       int result;
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+       /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+       reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+       reference_divider = 1 + dividers.uc_pll_ref_div;
+
+       /* low 14 bits is fraction and high 12 bits is divider*/
+       fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+       /* SPLL_FUNC_CNTL setup*/
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+               CG_SPLL_FUNC_CNTL, SPLL_PDIV_A,  dividers.uc_pll_post_div);
+
+       /* SPLL_FUNC_CNTL_3 setup*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+       /* set to use fractional accumulation*/
+       spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+               CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+               pp_atomctrl_internal_ss_info ss_info;
+
+               uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+               if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+                       /*
+                       * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+                       * ss_info.speed_spectrum_rate -- in unit of khz
+                       */
+                       /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+                       uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+                       /* clkv = 2 * D * fbdiv / NS */
+                       uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+                       cg_spll_spread_spectrum =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+                       cg_spll_spread_spectrum_2 =
+                               PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+               }
+       }
+
+       sclk->SclkFrequency        = engine_clock;
+       sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
+       sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
+       sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
+       sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+       sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
+
+       return 0;
+}
+
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+                                               uint32_t engine_clock,
+                               uint16_t sclk_activity_level_threshold,
+                               SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+       int result;
+       uint32_t mvdd;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                           (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+       /* populate graphics levels*/
+       result = tonga_get_dependency_volt_by_clk(hwmgr,
+               pptable_info->vdd_dep_on_sclk, engine_clock,
+               &graphic_level->MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((!result),
+               "can not find VDDC voltage value for VDDC "
+               "engine clock dependency table", return result);
+
+       /* SCLK frequency in units of 10KHz*/
+       graphic_level->SclkFrequency = engine_clock;
+       /* Indicates maximum activity level for this performance level. 50% for now*/
+       graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+       graphic_level->CcPwrDynRm = 0;
+       graphic_level->CcPwrDynRm1 = 0;
+       /* this level can be used if activity is high enough.*/
+       graphic_level->EnabledForActivity = 0;
+       /* this level can be used for throttling.*/
+       graphic_level->EnabledForThrottle = 1;
+       graphic_level->UpHyst = 0;
+       graphic_level->DownHyst = 0;
+       graphic_level->VoltageDownHyst = 0;
+       graphic_level->PowerThrottle = 0;
+
+       data->display_timing.min_clock_in_sr =
+                       hwmgr->display_config.min_core_set_clock_in_sr;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep))
+               graphic_level->DeepSleepDivId =
+                               smu7_get_sleep_divider_id_from_clock(engine_clock,
+                                               data->display_timing.min_clock_in_sr);
+
+       /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+       graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       if (!result) {
+               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+               /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+               CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+       }
+
+       return result;
+}
+
+static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+       uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+       uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+       uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+                                               SMU72_MAX_LEVELS_GRAPHICS;
+
+       SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+       uint32_t i, max_entry;
+       uint8_t highest_pcie_level_enabled = 0;
+       uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+       uint8_t count = 0;
+       int result = 0;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->sclk_table.count; i++) {
+               result = tonga_populate_single_graphic_level(hwmgr,
+                                       dpm_table->sclk_table.dpm_levels[i].value,
+                                       (uint16_t)smu_data->activity_target[i],
+                                       &(smu_data->smc_state_table.GraphicsLevel[i]));
+               if (result != 0)
+                       return result;
+
+               /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+               if (i > 1)
+                       smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+       }
+
+       /* Only enable level 0 for now. */
+       smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+       /* set highest level watermark to high */
+       if (dpm_table->sclk_table.count > 1)
+               smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+                       PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       smu_data->smc_state_table.GraphicsDpmLevelCount =
+               (uint8_t)dpm_table->sclk_table.count;
+       data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+               phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+       if (pcie_table != NULL) {
+               PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+                       "There must be 1 or more PCIE levels defined in PPTable.",
+                       return -EINVAL);
+               max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+               for (i = 0; i < dpm_table->sclk_table.count; i++) {
+                       smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+                               (uint8_t) ((i < max_entry) ? i : max_entry);
+               }
+       } else {
+               if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+                       pr_err("Pcie Dpm Enablemask is 0 !");
+
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                       (1<<(highest_pcie_level_enabled+1))) != 0)) {
+                       highest_pcie_level_enabled++;
+               }
+
+               while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                       (1<<lowest_pcie_level_enabled)) == 0)) {
+                       lowest_pcie_level_enabled++;
+               }
+
+               while ((count < highest_pcie_level_enabled) &&
+                               ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+                                       (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+                       count++;
+               }
+               mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+                       (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+               /* set pcieDpmLevel to highest_pcie_level_enabled*/
+               for (i = 2; i < dpm_table->sclk_table.count; i++)
+                       smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+               smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+               /* set pcieDpmLevel to mid_pcie_level_enabled*/
+               smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+       }
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
+                               (uint8_t *)levels, (uint32_t)level_array_size,
+                                                               SMC_RAM_END);
+
+       return result;
+}
+
+static int tonga_calculate_mclk_params(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU72_Discrete_MemoryLevel *mclk,
+               bool strobe_mode,
+               bool dllStateOn
+               )
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+       uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+       uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+       uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+       uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+       uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+       uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+       uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+       pp_atomctrl_memory_clock_param mpll_param;
+       int result;
+
+       result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+                               memory_clock, &mpll_param, strobe_mode);
+       PP_ASSERT_WITH_CODE(
+                       !result,
+                       "Error retrieving Memory Clock Parameters from VBIOS.",
+                       return result);
+
+       /* MPLL_FUNC_CNTL setup*/
+       mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+                                       mpll_param.bw_ctrl);
+
+       /* MPLL_FUNC_CNTL_1 setup*/
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                       MPLL_FUNC_CNTL_1, CLKF,
+                                       mpll_param.mpll_fb_divider.cl_kf);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                       MPLL_FUNC_CNTL_1, CLKFRAC,
+                                       mpll_param.mpll_fb_divider.clk_frac);
+       mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
+                                               MPLL_FUNC_CNTL_1, VCO_MODE,
+                                               mpll_param.vco_mode);
+
+       /* MPLL_AD_FUNC_CNTL setup*/
+       mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+                                       MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+                                       mpll_param.mpll_post_divider);
+
+       if (data->is_memory_gddr5) {
+               /* MPLL_DQ_FUNC_CNTL setup*/
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                               MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+                                               mpll_param.yclk_sel);
+               mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
+                                               MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+                                               mpll_param.mpll_post_divider);
+       }
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+               /*
+                ************************************
+                Fref = Reference Frequency
+                NF = Feedback divider ratio
+                NR = Reference divider ratio
+                Fnom = Nominal VCO output frequency = Fref * NF / NR
+                Fs = Spreading Rate
+                D = Percentage down-spread / 2
+                Fint = Reference input frequency to PFD = Fref / NR
+                NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+                CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+                NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+                CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+                *************************************
+                */
+               pp_atomctrl_internal_ss_info ss_info;
+               uint32_t freq_nom;
+               uint32_t tmp;
+               uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+               /* for GDDR5 for all modes and DDR3 */
+               if (1 == mpll_param.qdr)
+                       freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+               else
+                       freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+               /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
+               tmp = (freq_nom / reference_clock);
+               tmp = tmp * tmp;
+
+               if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+                       /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+                       /* ss.Info.speed_spectrum_rate -- in unit of khz */
+                       /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+                       /*     = reference_clock * 5 / speed_spectrum_rate */
+                       uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+                       /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+                       /*     = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+                       uint32_t clkv =
+                               (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+                                                       ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+                       mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+               }
+       }
+
+       /* MCLK_PWRMGT_CNTL setup */
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+       mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+       /* Save the result data to outpupt memory level structure */
+       mclk->MclkFrequency   = memory_clock;
+       mclk->MpllFuncCntl    = mpll_func_cntl;
+       mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
+       mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
+       mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
+       mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
+       mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
+       mclk->DllCntl         = dll_cntl;
+       mclk->MpllSs1         = mpll_ss1;
+       mclk->MpllSs2         = mpll_ss2;
+
+       return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+               bool strobe_mode)
+{
+       uint8_t mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 47500)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+       } else {
+               if (memory_clock < 65000)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 135000)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+       }
+
+       return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+       uint8_t mc_para_index;
+
+       if (memory_clock < 10000)
+               mc_para_index = 0;
+       else if (memory_clock >= 80000)
+               mc_para_index = 0x0f;
+       else
+               mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+       return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+               struct pp_hwmgr *hwmgr,
+               uint32_t memory_clock,
+               SMU72_Discrete_MemoryLevel *memory_level
+               )
+{
+       uint32_t mvdd = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       int result = 0;
+       bool dll_state_on;
+       struct cgs_display_info info = {0};
+       uint32_t mclk_edc_wr_enable_threshold = 40000;
+       uint32_t mclk_stutter_mode_threshold = 30000;
+       uint32_t mclk_edc_enable_threshold = 40000;
+       uint32_t mclk_strobe_mode_threshold = 40000;
+
+       if (NULL != pptable_info->vdd_dep_on_mclk) {
+               result = tonga_get_dependency_volt_by_clk(hwmgr,
+                               pptable_info->vdd_dep_on_mclk,
+                               memory_clock,
+                               &memory_level->MinVoltage, &mvdd);
+               PP_ASSERT_WITH_CODE(
+                       !result,
+                       "can not find MinVddc voltage value from memory VDDC "
+                       "voltage dependency table",
+                       return result);
+       }
+
+       if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+               memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+       else
+               memory_level->MinMvdd = mvdd;
+
+       memory_level->EnabledForThrottle = 1;
+       memory_level->EnabledForActivity = 0;
+       memory_level->UpHyst = 0;
+       memory_level->DownHyst = 100;
+       memory_level->VoltageDownHyst = 0;
+
+       /* Indicates maximum activity level for this performance level.*/
+       memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+       memory_level->StutterEnable = 0;
+       memory_level->StrobeEnable = 0;
+       memory_level->EdcReadEnable = 0;
+       memory_level->EdcWriteEnable = 0;
+       memory_level->RttEnable = 0;
+
+       /* default set to low watermark. Highest level will be set to high later.*/
+       memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+       cgs_get_active_displays_info(hwmgr->device, &info);
+       data->display_timing.num_existing_displays = info.display_count;
+
+       if ((mclk_stutter_mode_threshold != 0) &&
+           (memory_clock <= mclk_stutter_mode_threshold) &&
+           (!data->is_uvd_enabled)
+           && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+           && (data->display_timing.num_existing_displays <= 2)
+           && (data->display_timing.num_existing_displays != 0))
+               memory_level->StutterEnable = 1;
+
+       /* decide strobe mode*/
+       memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+               (memory_clock <= mclk_strobe_mode_threshold);
+
+       /* decide EDC mode and memory clock ratio*/
+       if (data->is_memory_gddr5) {
+               memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+                                       memory_level->StrobeEnable);
+
+               if ((mclk_edc_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_enable_threshold)) {
+                       memory_level->EdcReadEnable = 1;
+               }
+
+               if ((mclk_edc_wr_enable_threshold != 0) &&
+                               (memory_clock > mclk_edc_wr_enable_threshold)) {
+                       memory_level->EdcWriteEnable = 1;
+               }
+
+               if (memory_level->StrobeEnable) {
+                       if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+                                       ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+                       } else {
+                               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+                       }
+
+               } else {
+                       dll_state_on = data->dll_default_on;
+               }
+       } else {
+               memory_level->StrobeRatio =
+                       tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+               dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+       }
+
+       result = tonga_calculate_mclk_params(hwmgr,
+               memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+       if (!result) {
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+               /* MCLK frequency in units of 10KHz*/
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+               /* Indicates maximum activity level for this performance level.*/
+               CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+               CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+       }
+
+       return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smu_backend);
+       struct smu7_dpm_table *dpm_table = &data->dpm_table;
+       int result;
+
+       /* populate MCLK dpm table to SMU7 */
+       uint32_t level_array_address =
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+       uint32_t level_array_size =
+                               sizeof(SMU72_Discrete_MemoryLevel) *
+                               SMU72_MAX_LEVELS_MEMORY;
+       SMU72_Discrete_MemoryLevel *levels =
+                               smu_data->smc_state_table.MemoryLevel;
+       uint32_t i;
+
+       memset(levels, 0x00, level_array_size);
+
+       for (i = 0; i < dpm_table->mclk_table.count; i++) {
+               PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+                       "can not populate memory level as memory clock is zero",
+                       return -EINVAL);
+               result = tonga_populate_single_memory_level(
+                               hwmgr,
+                               dpm_table->mclk_table.dpm_levels[i].value,
+                               &(smu_data->smc_state_table.MemoryLevel[i]));
+               if (result)
+                       return result;
+       }
+
+       /* Only enable level 0 for now.*/
+       smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+       /*
+       * in order to prevent MC activity from stutter mode to push DPM up.
+       * the UVD change complements this by putting the MCLK in a higher state
+       * by default such that we are not effected by up threshold or and MCLK DPM latency.
+       */
+       smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+       smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+       data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+       /* set highest level watermark to high*/
+       smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+       /* level count will send to smc once at init smc table and never change*/
+       result = smu7_copy_bytes_to_smc(hwmgr,
+               level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+               SMC_RAM_END);
+
+       return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+                               uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i = 0;
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+               /* find mvdd value which clock is more than request */
+               for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+                       if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+                               /* Always round to higher voltage. */
+                               smio_pattern->Voltage =
+                                     data->mvdd_voltage_table.entries[i].value;
+                               break;
+                       }
+               }
+
+               PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+                       "MVDD Voltage is outside the supported range.",
+                       return -EINVAL);
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+       SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct pp_atomctrl_clock_dividers_vi dividers;
+
+       SMIO_Pattern voltage_level;
+       uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+       uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+       uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
+       uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+       /* The ACPI state should not do DPM on DC (or ever).*/
+       table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       table->ACPILevel.MinVoltage =
+                       smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+       /* assign zero for now*/
+       table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+       /* get the engine clock dividers for this clock value*/
+       result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+               table->ACPILevel.SclkFrequency,  &dividers);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error retrieving Engine Clock dividers from VBIOS.",
+               return result);
+
+       /* divider ID for required SCLK*/
+       table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+       table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+       table->ACPILevel.DeepSleepDivId = 0;
+
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                                       SPLL_PWRON, 0);
+       spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+                                               SPLL_RESET, 1);
+       spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+                                               SCLK_MUX_SEL, 4);
+
+       table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+       table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+       table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+       table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+       table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+       table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+       table->ACPILevel.CcPwrDynRm = 0;
+       table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+       /* For various features to be enabled/disabled while this level is active.*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+       /* SCLK frequency in units of 10KHz*/
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+       /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+       table->MemoryACPILevel.MinVoltage =
+                           smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+       /*  CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+       if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+               table->MemoryACPILevel.MinMvdd =
+                       PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+       else
+               table->MemoryACPILevel.MinMvdd = 0;
+
+       /* Force reset on DLL*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+       /* Disable DLL in ACPIState*/
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+       mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+               MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+       /* Enable DLL bypass signal*/
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK0_BYPASS, 0);
+       dll_cntl            = PHM_SET_FIELD(dll_cntl,
+               DLL_CNTL, MRDCK1_BYPASS, 0);
+
+       table->MemoryACPILevel.DllCntl            =
+               PP_HOST_TO_SMC_UL(dll_cntl);
+       table->MemoryACPILevel.MclkPwrmgtCntl     =
+               PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+       table->MemoryACPILevel.MpllAdFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+       table->MemoryACPILevel.MpllDqFuncCntl     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl       =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+       table->MemoryACPILevel.MpllFuncCntl_1     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+       table->MemoryACPILevel.MpllFuncCntl_2     =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+       table->MemoryACPILevel.MpllSs1            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+       table->MemoryACPILevel.MpllSs2            =
+               PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+       table->MemoryACPILevel.EnabledForThrottle = 0;
+       table->MemoryACPILevel.EnabledForActivity = 0;
+       table->MemoryACPILevel.UpHyst = 0;
+       table->MemoryACPILevel.DownHyst = 100;
+       table->MemoryACPILevel.VoltageDownHyst = 0;
+       /* Indicates maximum activity level for this performance level.*/
+       table->MemoryACPILevel.ActivityLevel =
+                       PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+       table->MemoryACPILevel.StutterEnable = 0;
+       table->MemoryACPILevel.StrobeEnable = 0;
+       table->MemoryACPILevel.EdcReadEnable = 0;
+       table->MemoryACPILevel.EdcWriteEnable = 0;
+       table->MemoryACPILevel.RttEnable = 0;
+
+       return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+                                       SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                               pptable_info->mm_dep_table;
+
+       table->UvdLevelCount = (uint8_t) (mm_table->count);
+       table->UvdBootLevel = 0;
+
+       for (count = 0; count < table->UvdLevelCount; count++) {
+               table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+               table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+               table->UvdLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                                               mm_table->entries[count].vddc);
+               table->UvdLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                                               mm_table->entries[count].vddgfx) : 0;
+               table->UvdLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                                            mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->UvdLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(
+                                       hwmgr,
+                                       table->UvdLevel[count].VclkFrequency,
+                                       &dividers);
+
+               PP_ASSERT_WITH_CODE((!result),
+                                   "can not find divide id for Vclk clock",
+                                       return result);
+
+               table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                                                         table->UvdLevel[count].DclkFrequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                                   "can not find divide id for Dclk clock",
+                                       return result);
+
+               table->UvdLevel[count].DclkDivider =
+                                       (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+               CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+       }
+
+       return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                             (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                                    pptable_info->mm_dep_table;
+
+       table->VceLevelCount = (uint8_t) (mm_table->count);
+       table->VceBootLevel = 0;
+
+       for (count = 0; count < table->VceLevelCount; count++) {
+               table->VceLevel[count].Frequency =
+                       mm_table->entries[count].eclk;
+               table->VceLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                               mm_table->entries[count].vddc);
+               table->VceLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                               mm_table->entries[count].vddgfx) : 0;
+               table->VceLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->VceLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                                       table->VceLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                               "can not find divide id for VCE engine clock",
+                               return result);
+
+               table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+       }
+
+       return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                                   pptable_info->mm_dep_table;
+
+       table->AcpLevelCount = (uint8_t) (mm_table->count);
+       table->AcpBootLevel = 0;
+
+       for (count = 0; count < table->AcpLevelCount; count++) {
+               table->AcpLevel[count].Frequency =
+                       pptable_info->mm_dep_table->entries[count].aclk;
+               table->AcpLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                       mm_table->entries[count].vddc);
+               table->AcpLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                               mm_table->entries[count].vddgfx) : 0;
+               table->AcpLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->AcpLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                       table->AcpLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                       "can not find divide id for engine clock", return result);
+
+               table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+       }
+
+       return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       uint8_t count;
+       pp_atomctrl_clock_dividers_vi dividers;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct phm_ppt_v1_information *pptable_info =
+                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+                                                   pptable_info->mm_dep_table;
+
+       table->SamuBootLevel = 0;
+       table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+       for (count = 0; count < table->SamuLevelCount; count++) {
+               /* not sure whether we need evclk or not */
+               table->SamuLevel[count].Frequency =
+                       pptable_info->mm_dep_table->entries[count].samclock;
+               table->SamuLevel[count].MinVoltage.Vddc =
+                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
+                               mm_table->entries[count].vddc);
+               table->SamuLevel[count].MinVoltage.VddGfx =
+                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+                               mm_table->entries[count].vddgfx) : 0;
+               table->SamuLevel[count].MinVoltage.Vddci =
+                       phm_get_voltage_id(&data->vddci_voltage_table,
+                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               table->SamuLevel[count].MinVoltage.Phases = 1;
+
+               /* retrieve divider value for VBIOS */
+               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+                                       table->SamuLevel[count].Frequency, &dividers);
+               PP_ASSERT_WITH_CODE((!result),
+                       "can not find divide id for samu clock", return result);
+
+               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+       }
+
+       return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+               struct pp_hwmgr *hwmgr,
+               uint32_t engine_clock,
+               uint32_t memory_clock,
+               struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+               )
+{
+       uint32_t dramTiming;
+       uint32_t dramTiming2;
+       uint32_t burstTime;
+       int result;
+
+       result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+                               engine_clock, memory_clock);
+
+       PP_ASSERT_WITH_CODE(result == 0,
+               "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+       dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+       dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+       burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+       arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
+       arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+       arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+       return 0;
+}
+
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       int result = 0;
+       SMU72_Discrete_MCArbDramTimingTable  arb_regs;
+       uint32_t i, j;
+
+       memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+                       result = tonga_populate_memory_timing_parameters
+                               (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+                                data->dpm_table.mclk_table.dpm_levels[j].value,
+                                &arb_regs.entries[i][j]);
+
+                       if (result)
+                               break;
+               }
+       }
+
+       if (!result) {
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.arb_table_start,
+                               (uint8_t *)&arb_regs,
+                               sizeof(SMU72_Discrete_MCArbDramTimingTable),
+                               SMC_RAM_END
+                               );
+       }
+
+       return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       table->GraphicsBootLevel = 0;
+       table->MemoryBootLevel = 0;
+
+       /* find boot level from dpm table*/
+       result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+       data->vbios_boot_state.sclk_bootup_value,
+       (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+       if (result != 0) {
+               smu_data->smc_state_table.GraphicsBootLevel = 0;
+               pr_err("[powerplay] VBIOS did not find boot engine "
+                               "clock value in dependency table. "
+                               "Using Graphics DPM level 0 !");
+               result = 0;
+       }
+
+       result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+               data->vbios_boot_state.mclk_bootup_value,
+               (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+       if (result != 0) {
+               smu_data->smc_state_table.MemoryBootLevel = 0;
+               pr_err("[powerplay] VBIOS did not find boot "
+                               "engine clock value in dependency table."
+                               "Using Memory DPM level 0 !");
+               result = 0;
+       }
+
+       table->BootVoltage.Vddc =
+               phm_get_voltage_id(&(data->vddc_voltage_table),
+                       data->vbios_boot_state.vddc_bootup_value);
+       table->BootVoltage.VddGfx =
+               phm_get_voltage_id(&(data->vddgfx_voltage_table),
+                       data->vbios_boot_state.vddgfx_bootup_value);
+       table->BootVoltage.Vddci =
+               phm_get_voltage_id(&(data->vddci_voltage_table),
+                       data->vbios_boot_state.vddci_bootup_value);
+       table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+       CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+       return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+                       volt_with_cks, value;
+       uint16_t clock_freq_u16;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+                       volt_offset = 0;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+       uint32_t hw_revision, dev_id;
+       struct cgs_system_info sys_info = {0};
+
+       stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+       sys_info.size = sizeof(struct cgs_system_info);
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       hw_revision = (uint32_t)sys_info.value;
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       dev_id = (uint32_t)sys_info.value;
+
+       /* Read SMU_Eefuse to read and calculate RO and determine
+        * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+        */
+       efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (146 * 4));
+       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixSMU_EFUSE_0 + (148 * 4));
+       efuse &= 0xFF000000;
+       efuse = efuse >> 24;
+       efuse2 &= 0xF;
+
+       if (efuse2 == 1)
+               ro = (2300 - 1350) * efuse / 255 + 1350;
+       else
+               ro = (2500 - 1000) * efuse / 255 + 1000;
+
+       if (ro >= 1660)
+               type = 0;
+       else
+               type = 1;
+
+       /* Populate Stretch amount */
+       smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+       for (i = 0; i < sclk_table->count; i++) {
+               smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+                               sclk_table->entries[i].cks_enable << i;
+               if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+                       volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+                               (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+                               (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+                       volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+                               (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+                               (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+               } else {
+                       volt_without_cks = (uint32_t)((14041 *
+                               (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+                               (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+                       volt_with_cks = (uint32_t)((13946 *
+                               (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+                               (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+               }
+               if (volt_without_cks >= volt_with_cks)
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+               smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+       }
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       STRETCH_ENABLE, 0x0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       staticEnable, 0x1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+                       masterReset, 0x0);
+
+       /* Populate CKS Lookup Table */
+       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+               stretch_amount2 = 0;
+       else if (stretch_amount == 3 || stretch_amount == 4)
+               stretch_amount2 = 1;
+       else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_ClockStretcher);
+               PP_ASSERT_WITH_CODE(false,
+                               "Stretch Amount in PPTable not supported\n",
+                               return -EINVAL);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL);
+       value &= 0xFFC2FF87;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+                       tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+                       tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+                       GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+                       SclkFrequency) / 100);
+       if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+                       clock_freq_u16 &&
+           tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+                       clock_freq_u16) {
+               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+               value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+               value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+               value |= (tonga_clock_stretch_amount_conversion
+                               [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+                                [stretch_amount]) << 3;
+       }
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].minFreq);
+       CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+                       CKS_LOOKUPTableEntry[0].maxFreq);
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+                       tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+       smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+                       (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixPWR_CKS_CNTL, value);
+
+       /* Populate DDT Lookup Table */
+       for (i = 0; i < 4; i++) {
+               /* Assign the minimum and maximum VID stored
+                * in the last row of Clock Stretcher Voltage Table.
+                */
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].minVID =
+                               (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+               smu_data->smc_state_table.ClockStretcherDataTable.
+               ClockStretcherDataTableEntry[i].maxVID =
+                               (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+               /* Loop through each SCLK and check the frequency
+                * to see if it lies within the frequency for clock stretcher.
+                */
+               for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+                       cks_setting = 0;
+                       clock_freq = PP_SMC_TO_HOST_UL(
+                                       smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+                       /* Check the allowed frequency against the sclk level[j].
+                        *  Sclk's endianness has already been converted,
+                        *  and it's in 10Khz unit,
+                        *  as opposed to Data table, which is in Mhz unit.
+                        */
+                       if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+                               cks_setting |= 0x2;
+                               if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+                                       cks_setting |= 0x1;
+                       }
+                       smu_data->smc_state_table.ClockStretcherDataTable.
+                       ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+               }
+               CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+                               ClockStretcherDataTable.
+                               ClockStretcherDataTableEntry[i].setting);
+       }
+
+       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       ixPWR_CKS_CNTL);
+       value &= 0xFFFFFFFE;
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                                       ixPWR_CKS_CNTL, value);
+
+       return 0;
+}
+
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+                       SMU72_Discrete_DpmTable *table)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint16_t config;
+
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+               /*  Splitted mode */
+               config = VR_SVI2_PLANE_1;
+               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+                       config = VR_SVI2_PLANE_2;
+                       table->VRConfig |= config;
+               } else {
+                       pr_err("VDDC and VDDGFX should "
+                               "be both on SVI2 control in splitted mode !\n");
+               }
+       } else {
+               /* Merged mode  */
+               config = VR_MERGED_WITH_VDDC;
+               table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+               /* Set Vddc Voltage Controller  */
+               if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+                       config = VR_SVI2_PLANE_1;
+                       table->VRConfig |= config;
+               } else {
+                       pr_err("VDDC should be on "
+                                       "SVI2 control in merged mode !\n");
+               }
+       }
+
+       /* Set Vddci Voltage Controller  */
+       if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+               config = VR_SVI2_PLANE_2;  /* only in merged mode */
+               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+       } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+               config = VR_SMIO_PATTERN_1;
+               table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+       }
+
+       /* Set Mvdd Voltage Controller */
+       if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+               config = VR_SMIO_PATTERN_2;
+               table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+       }
+
+       return 0;
+}
+
+static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t tmp;
+       int result;
+
+       /*
+       * This is a read-modify-write on the first byte of the ARB table.
+       * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+       * is the field 'current'.
+       * This solution is ugly, but we never write the whole table only
+       * individual fields in it.
+       * In reality this field should not be in that structure
+       * but in a soft register.
+       */
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+       if (result != 0)
+               return result;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+       return smu7_write_smc_sram_dword(hwmgr,
+                       smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+       SMU72_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+       int  i, j, k;
+       const uint16_t *pdef1, *pdef2;
+
+       dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usTDP * 256));
+       dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+                       (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+       PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+                       "Target Operating Temp is out of Range !",
+                       );
+
+       dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+       dpm_table->GpuTjHyst = 8;
+
+       dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+       dpm_table->BAPM_TEMP_GRADIENT =
+                               PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+       pdef1 = defaults->bapmti_r;
+       pdef2 = defaults->bapmti_rc;
+
+       for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+               for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+                       for (k = 0; k < SMU72_DTE_SINKS; k++) {
+                               dpm_table->BAPMTI_R[i][j][k] =
+                                               PP_HOST_TO_SMC_US(*pdef1);
+                               dpm_table->BAPMTI_RC[i][j][k] =
+                                               PP_HOST_TO_SMC_US(*pdef2);
+                               pdef1++;
+                               pdef2++;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+       smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+       smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+       smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+       smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+       return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+       uint16_t tdc_limit;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       /* TDC number of fraction bits are changed from 8 to 7
+        * for Fiji as requested by SMC team
+        */
+       tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+       smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+                       CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+       smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+                       defaults->tdc_vddc_throttle_release_limit_perc;
+       smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+       return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smu_backend);
+       const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+       uint32_t temp;
+
+       if (smu7_read_smc_sram_dword(hwmgr,
+                       fuse_table_offset +
+                       offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+                       (uint32_t *)&temp, SMC_RAM_END))
+               PP_ASSERT_WITH_CODE(false,
+                               "Attempt to read PmFuses.DW6 "
+                               "(SviLoadLineEn) from SMC Failed !",
+                               return -EINVAL);
+       else
+               smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+       return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+       return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+       if ((hwmgr->thermal_controller.advanceFanControlParameters.
+                       usFanOutputSensitivity & (1 << 15)) ||
+               (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+               hwmgr->thermal_controller.advanceFanControlParameters.
+               usFanOutputSensitivity = hwmgr->thermal_controller.
+                       advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+       smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+                       PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+                                       advanceFanControlParameters.usFanOutputSensitivity);
+       return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+       int i;
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+       /* Currently not used. Set all to zero. */
+       for (i = 0; i < 16; i++)
+               smu_data->power_tune_table.GnbLPML[i] = 0;
+
+       return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+       uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+       struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+       hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+       lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+       smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+       smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+                       CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+       return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t pm_fuse_table_offset;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment)) {
+               if (smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, PmFuseTable),
+                               &pm_fuse_table_offset, SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to get pm_fuse_table_offset Failed !",
+                               return -EINVAL);
+
+               /* DW6 */
+               if (tonga_populate_svi_load_line(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate SviLoadLine Failed !",
+                               return -EINVAL);
+               /* DW7 */
+               if (tonga_populate_tdc_limit(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to populate TDCLimit Failed !",
+                                       return -EINVAL);
+               /* DW8 */
+               if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate TdcWaterfallCtl Failed !",
+                               return -EINVAL);
+
+               /* DW9-DW12 */
+               if (tonga_populate_temperature_scaler(hwmgr) != 0)
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate LPMLTemperatureScaler Failed !",
+                               return -EINVAL);
+
+               /* DW13-DW14 */
+               if (tonga_populate_fuzzy_fan(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate Fuzzy Fan "
+                               "Control parameters Failed !",
+                               return -EINVAL);
+
+               /* DW15-DW18 */
+               if (tonga_populate_gnb_lpml(hwmgr))
+                       PP_ASSERT_WITH_CODE(false,
+                               "Attempt to populate GnbLPML Failed !",
+                               return -EINVAL);
+
+               /* DW20 */
+               if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+                       PP_ASSERT_WITH_CODE(
+                               false,
+                               "Attempt to populate BapmVddCBaseLeakage "
+                               "Hi and Lo Sidd Failed !",
+                               return -EINVAL);
+
+               if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+                               (uint8_t *)&smu_data->power_tune_table,
+                               sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to download PmFuseTable Failed !",
+                                       return -EINVAL);
+       }
+       return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+                                SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+       const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
+
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+               if (smu_data->mc_reg_table.validflag & 1<<j) {
+                       PP_ASSERT_WITH_CODE(
+                               i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+                               "Index of mc_reg_table->address[] array "
+                               "out of boundary",
+                               return -EINVAL);
+                       mc_reg_table->address[i].s0 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+
+       mc_reg_table->last = (uint8_t)i;
+
+       return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+       const struct tonga_mc_reg_entry *entry,
+       SMU72_Discrete_MCRegisterSet *data,
+       uint32_t num_entries, uint32_t valid_flag)
+{
+       uint32_t i, j;
+
+       for (i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & 1<<j) {
+                       data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+               struct pp_hwmgr *hwmgr,
+               const uint32_t memory_clock,
+               SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+               )
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t i = 0;
+
+       for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+               if (memory_clock <=
+                       smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+                       break;
+               }
+       }
+
+       if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, smu_data->mc_reg_table.last,
+                               smu_data->mc_reg_table.validflag);
+
+       return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+               SMU72_Discrete_MCRegisters *mc_regs)
+{
+       int result = 0;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       int res;
+       uint32_t i;
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               res = tonga_convert_mc_reg_table_entry_to_smc(
+                               hwmgr,
+                               data->dpm_table.mclk_table.dpm_levels[i].value,
+                               &mc_regs->data[i]
+                               );
+
+               if (0 != res)
+                       result = res;
+       }
+
+       return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       uint32_t address;
+       int32_t result;
+
+       if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+               return 0;
+
+
+       memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+       if (result != 0)
+               return result;
+
+
+       address = smu_data->smu7_data.mc_reg_table_start +
+                       (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+       return  smu7_copy_bytes_to_smc(
+                       hwmgr, address,
+                       (uint8_t *)&smu_data->mc_regs.data[0],
+                       sizeof(SMU72_Discrete_MCRegisterSet) *
+                       data->dpm_table.mclk_table.count,
+                       SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+       memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+       result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize MCRegTable for the MC register addresses !",
+               return result;);
+
+       result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize MCRegTable for driver state !",
+               return result;);
+
+       return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+                       (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       struct  phm_ppt_v1_information *table_info =
+                       (struct  phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (table_info &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+                       table_info->cac_dtp_table->usPowerTuneDataSetID)
+               smu_data->power_tune_defaults =
+                               &tonga_power_tune_data_set_array
+                               [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+       else
+               smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       struct SMU72_Discrete_GraphicsLevel *levels =
+                               data->smc_state_table.GraphicsLevel;
+       unsigned min_level = 1;
+
+       hwmgr->default_gfx_power_profile.activity_threshold =
+                       be16_to_cpu(levels[0].ActivityLevel);
+       hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+       hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+       hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+       hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+       hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+       /* Workaround compute SDMA instability: disable lowest SCLK
+        * DPM level. Optimize compute power profile: Use only highest
+        * 2 power levels (if more than 2 are available), Hysteresis:
+        * 0ms up, 5ms down
+        */
+       if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+               min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+       else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+               min_level = 1;
+       else
+               min_level = 0;
+       hwmgr->default_compute_power_profile.min_sclk =
+                       be32_to_cpu(levels[min_level].SclkFrequency);
+       hwmgr->default_compute_power_profile.up_hyst = 0;
+       hwmgr->default_compute_power_profile.down_hyst = 5;
+
+       hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+       hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smu_backend);
+       SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       uint8_t i;
+       pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+       memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+       tonga_initialize_power_tune_defaults(hwmgr);
+
+       if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+               tonga_populate_smc_voltage_tables(hwmgr, table);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StepVddc))
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (data->is_memory_gddr5)
+               table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+       if (i == 1 || i == 0)
+               table->SystemFlags |= 0x40;
+
+       if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+               result = tonga_populate_ulv_state(hwmgr, table);
+               PP_ASSERT_WITH_CODE(!result,
+                       "Failed to initialize ULV state !",
+                       return result;);
+
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_ULV_PARAMETER, 0x40035);
+       }
+
+       result = tonga_populate_smc_link_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Link Level !", return result);
+
+       result = tonga_populate_all_graphic_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Graphics Level !", return result);
+
+       result = tonga_populate_all_memory_levels(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Memory Level !", return result);
+
+       result = tonga_populate_smc_acpi_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize ACPI Level !", return result);
+
+       result = tonga_populate_smc_vce_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize VCE Level !", return result);
+
+       result = tonga_populate_smc_acp_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize ACP Level !", return result);
+
+       result = tonga_populate_smc_samu_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize SAMU Level !", return result);
+
+       /* Since only the initial state is completely set up at this
+       * point (the other states are just copies of the boot state) we only
+       * need to populate the  ARB settings for the initial state.
+       */
+       result = tonga_program_memory_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to Write ARB settings for the initial state.",
+               return result;);
+
+       result = tonga_populate_smc_uvd_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize UVD Level !", return result);
+
+       result = tonga_populate_smc_boot_level(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to initialize Boot Level !", return result);
+
+       tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to populate BAPM Parameters !", return result);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ClockStretcher)) {
+               result = tonga_populate_clock_stretcher_data_table(hwmgr);
+               PP_ASSERT_WITH_CODE(!result,
+                       "Failed to populate Clock Stretcher Data Table !",
+                       return result;);
+       }
+       table->GraphicsVoltageChangeEnable  = 1;
+       table->GraphicsThermThrottleEnable  = 1;
+       table->GraphicsInterval = 1;
+       table->VoltageInterval  = 1;
+       table->ThermalInterval  = 1;
+       table->TemperatureLimitHigh =
+               table_info->cac_dtp_table->usTargetOperatingTemp *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->TemperatureLimitLow =
+               (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+               SMU7_Q88_FORMAT_CONVERSION_UNIT;
+       table->MemoryVoltageChangeEnable  = 1;
+       table->MemoryInterval  = 1;
+       table->VoltageResponseTime  = 0;
+       table->PhaseResponseTime  = 0;
+       table->MemoryThermThrottleEnable  = 1;
+
+       /*
+       * Cail reads current link status and reports it as cap (we cannot
+       * change this due to some previous issues we had)
+       * SMC drops the link status to lowest level after enabling
+       * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+       * but this time Cail reads current link status which was set to low by
+       * SMC and reports it as cap to powerplay
+       * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+       */
+       PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+                       "There must be 1 or more PCIE levels defined in PPTable.",
+                       return -EINVAL);
+
+       table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+       table->PCIeGenInterval  = 1;
+
+       result = tonga_populate_vr_config(hwmgr, table);
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to populate VRConfig setting !", return result);
+
+       table->ThermGpio  = 17;
+       table->SclkStepSize = 0x4000;
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+                                               &gpio_pin_assignment)) {
+               table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot);
+       } else {
+               table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                                               &gpio_pin_assignment)) {
+               table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition);
+       } else {
+               table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition);
+       }
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_Falcon_QuickTransition);
+
+       if (0) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_AutomaticDCTransition);
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_Falcon_QuickTransition);
+       }
+
+       if (atomctrl_get_pp_assign_pin(hwmgr,
+                       THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+               table->ThermOutPolarity =
+                       (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+                       (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+               /* if required, combine VRHot/PCC with thermal out GPIO*/
+               if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_RegulatorHot) &&
+                       phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+                       table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+               }
+       } else {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalOutGPIO);
+
+               table->ThermOutGpio = 17;
+               table->ThermOutPolarity = 1;
+               table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+       }
+
+       for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+               table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+       CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+       CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+       CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+       CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+       /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+       result = smu7_copy_bytes_to_smc(
+                       hwmgr,
+                       smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+                       (uint8_t *)&(table->SystemFlags),
+                       sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+                       SMC_RAM_END);
+
+       PP_ASSERT_WITH_CODE(!result,
+               "Failed to upload dpm data to SMC memory !", return result;);
+
+       result = tonga_init_arb_table_index(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "Failed to upload arb data to SMC memory !", return result);
+
+       tonga_populate_pm_fuses(hwmgr);
+       PP_ASSERT_WITH_CODE((!result),
+               "Failed to populate initialize pm fuses !", return result);
+
+       result = tonga_populate_initial_mc_reg_table(hwmgr);
+       PP_ASSERT_WITH_CODE((!result),
+               "Failed to populate initialize MC Reg table !", return result);
+
+       tonga_save_default_power_profile(hwmgr);
+
+       return 0;
+}
+
+static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smu_backend);
+       SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+       uint32_t duty100;
+       uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       uint16_t fdo_min, slope1, slope2;
+       uint32_t reference_clock;
+       int res;
+       uint64_t tmp64;
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_MicrocodeFanControl))
+               return 0;
+
+       if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       if (0 == smu_data->smu7_data.fan_table_start) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+                                               CGS_IND_REG__SMC,
+                                               CG_FDO_CTRL1, FMAX_DUTY100);
+
+       if (0 == duty100) {
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                               PHM_PlatformCaps_MicrocodeFanControl);
+               return 0;
+       }
+
+       tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (uint16_t)tmp64;
+
+       t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+                  hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+       t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+                 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+       pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+                   hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+       pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+                   hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+       slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+       fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+       fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+       fan_table.Slope1 = cpu_to_be16(slope1);
+       fan_table.Slope2 = cpu_to_be16(slope2);
+
+       fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+       fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+       fan_table.HystUp = cpu_to_be16(1);
+
+       fan_table.HystSlope = cpu_to_be16(1);
+
+       fan_table.TempRespLim = cpu_to_be16(5);
+
+       reference_clock = smu7_get_xclk(hwmgr);
+
+       fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+       fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+       fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+       fan_table.FanControl_GL_Flag = 1;
+
+       res = smu7_copy_bytes_to_smc(hwmgr,
+                                       smu_data->smu7_data.fan_table_start,
+                                       (uint8_t *)&fan_table,
+                                       (uint32_t)sizeof(fan_table),
+                                       SMC_RAM_END);
+
+       return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       if (data->need_update_smu7_dpm_table &
+               (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+               return tonga_program_memory_timing_parameters(hwmgr);
+
+       return 0;
+}
+
+static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data =
+                       (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+       int result = 0;
+       uint32_t low_sclk_interrupt_threshold = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkThrottleLowNotification)
+               && (hwmgr->gfx_arbiter.sclk_threshold !=
+                               data->low_sclk_interrupt_threshold)) {
+               data->low_sclk_interrupt_threshold =
+                               hwmgr->gfx_arbiter.sclk_threshold;
+               low_sclk_interrupt_threshold =
+                               data->low_sclk_interrupt_threshold;
+
+               CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+               result = smu7_copy_bytes_to_smc(
+                               hwmgr,
+                               smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable,
+                                       LowSclkInterruptThreshold),
+                               (uint8_t *)&low_sclk_interrupt_threshold,
+                               sizeof(uint32_t),
+                               SMC_RAM_END);
+       }
+
+       result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((!result),
+                               "Failed to upload MC reg table !",
+                               return result);
+
+       result = tonga_program_mem_timing_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE((result == 0),
+                       "Failed to program memory timing parameters !",
+                       );
+
+       return result;
+}
+
+static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+       switch (type) {
+       case SMU_SoftRegisters:
+               switch (member) {
+               case HandshakeDisables:
+                       return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+               case VoltageChangeTimeout:
+                       return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+               case AverageGraphicsActivity:
+                       return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+               case PreVBlankGap:
+                       return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+               case VBlankTimeout:
+                       return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+               case UcodeLoadStatus:
+                       return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+               case DRAM_LOG_ADDR_H:
+                       return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H);
+               case DRAM_LOG_ADDR_L:
+                       return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L);
+               case DRAM_LOG_PHY_ADDR_H:
+                       return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+               case DRAM_LOG_PHY_ADDR_L:
+                       return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+               case DRAM_LOG_BUFF_SIZE:
+                       return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+               }
+       case SMU_Discrete_DpmTable:
+               switch (member) {
+               case UvdBootLevel:
+                       return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+               case VceBootLevel:
+                       return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+               case SamuBootLevel:
+                       return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+               case LowSclkInterruptThreshold:
+                       return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+               }
+       }
+       pr_warn("can't get the offset of type %x member %x\n", type, member);
+       return 0;
+}
+
+static uint32_t tonga_get_mac_definition(uint32_t value)
+{
+       switch (value) {
+       case SMU_MAX_LEVELS_GRAPHICS:
+               return SMU72_MAX_LEVELS_GRAPHICS;
+       case SMU_MAX_LEVELS_MEMORY:
+               return SMU72_MAX_LEVELS_MEMORY;
+       case SMU_MAX_LEVELS_LINK:
+               return SMU72_MAX_LEVELS_LINK;
+       case SMU_MAX_ENTRIES_SMIO:
+               return SMU72_MAX_ENTRIES_SMIO;
+       case SMU_MAX_LEVELS_VDDC:
+               return SMU72_MAX_LEVELS_VDDC;
+       case SMU_MAX_LEVELS_VDDGFX:
+               return SMU72_MAX_LEVELS_VDDGFX;
+       case SMU_MAX_LEVELS_VDDCI:
+               return SMU72_MAX_LEVELS_VDDCI;
+       case SMU_MAX_LEVELS_MVDD:
+               return SMU72_MAX_LEVELS_MVDD;
+       }
+       pr_warn("can't get the mac value %x\n", value);
+
+       return 0;
+}
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       smu_data->smc_state_table.UvdBootLevel = 0;
+       if (table_info->mm_dep_table->count > 0)
+               smu_data->smc_state_table.UvdBootLevel =
+                               (uint8_t) (table_info->mm_dep_table->count - 1);
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0x00FFFFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+       cgs_write_ind_register(hwmgr->device,
+                               CGS_IND_REG__SMC,
+                               mm_boot_level_offset, mm_boot_level_value);
+
+       if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_UVDDPM) ||
+               phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+       return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data =
+                               (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+       smu_data->smc_state_table.VceBootLevel =
+               (uint8_t) (table_info->mm_dep_table->count - 1);
+
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                                       offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFF00FFFF;
+       mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+       return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+       smu_data->smc_state_table.SamuBootLevel = 0;
+       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+                               offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+       mm_boot_level_offset /= 4;
+       mm_boot_level_offset *= 4;
+       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset);
+       mm_boot_level_value &= 0xFFFFFF00;
+       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+       cgs_write_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_StablePState))
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
+                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+       return 0;
+}
+
+static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               tonga_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               tonga_update_vce_smc_table(hwmgr);
+               break;
+       case SMU_SAMU_TABLE:
+               tonga_update_samu_smc_table(hwmgr);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+       uint32_t tmp;
+       int result;
+       bool error = false;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, DpmTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.dpm_table_start = tmp;
+
+       error |= (result != 0);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, SoftRegisters),
+                               &tmp, SMC_RAM_END);
+
+       if (!result) {
+               data->soft_regs_start = tmp;
+               smu_data->smu7_data.soft_regs_start = tmp;
+       }
+
+       error |= (result != 0);
+
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, mcRegisterTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.mc_reg_table_start = tmp;
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, FanTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.fan_table_start = tmp;
+
+       error |= (result != 0);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               smu_data->smu7_data.arb_table_start = tmp;
+
+       error |= (result != 0);
+
+       result = smu7_read_smc_sram_dword(hwmgr,
+                               SMU72_FIRMWARE_HEADER_LOCATION +
+                               offsetof(SMU72_Firmware_Header, Version),
+                               &tmp, SMC_RAM_END);
+
+       if (!result)
+               hwmgr->microcode_version_info.SMC = tmp;
+
+       error |= (result != 0);
+
+       return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+       return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+       bool result = true;
+
+       switch (in_reg) {
+       case  mmMC_SEQ_RAS_TIMING:
+               *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+               break;
+
+       case  mmMC_SEQ_DLL_STBY:
+               *out_reg = mmMC_SEQ_DLL_STBY_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD0:
+               *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CMD1:
+               *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+               break;
+
+       case  mmMC_SEQ_G5PDX_CTRL:
+               *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+               break;
+
+       case mmMC_SEQ_CAS_TIMING:
+               *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING:
+               *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+               break;
+
+       case mmMC_SEQ_MISC_TIMING2:
+               *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CMD:
+               *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+               break;
+
+       case mmMC_SEQ_PMG_DVS_CTL:
+               *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D0:
+               *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_RD_CTL_D1:
+               *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D0:
+               *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_D1:
+               *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+               break;
+
+       case mmMC_PMG_CMD_EMRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS1:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+
+       case mmMC_SEQ_PMG_TIMING:
+               *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+               break;
+
+       case mmMC_PMG_CMD_MRS2:
+               *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+
+       case mmMC_SEQ_WR_CTL_2:
+               *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+               break;
+
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+       uint32_t i;
+       uint16_t address;
+
+       for (i = 0; i < table->last; i++) {
+               table->mc_reg_address[i].s0 =
+                       tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+                                                       &address) ?
+                                                       address :
+                                                table->mc_reg_address[i].s1;
+       }
+       return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+                                       struct tonga_mc_reg_table *ni_table)
+{
+       uint8_t i, j;
+
+       PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+               "Invalid VramInfo table.", return -EINVAL);
+       PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+               "Invalid VramInfo table.", return -EINVAL);
+
+       for (i = 0; i < table->last; i++)
+               ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+       ni_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               ni_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       ni_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+
+       ni_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+                                       struct tonga_mc_reg_table *table)
+{
+       uint8_t i, j, k;
+       uint32_t temp_reg;
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                       "Invalid VramInfo table.", return -EINVAL);
+
+               switch (table->mc_reg_address[i].s1) {
+
+               case mmMC_SEQ_MISC1:
+                       temp_reg = cgs_read_register(hwmgr->device,
+                                                       mmMC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+                               if (!data->is_memory_gddr5)
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+
+                       if (!data->is_memory_gddr5) {
+                               table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++)
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               j++;
+                               PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                                       "Invalid VramInfo table.", return -EINVAL);
+                       }
+
+                       break;
+
+               case mmMC_SEQ_RESERVE_M:
+                       temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       }
+                       j++;
+                       PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+                               "Invalid VramInfo table.", return -EINVAL);
+                       break;
+
+               default:
+                       break;
+               }
+
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+       uint8_t i, j;
+
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+                               table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->validflag |= (1<<i);
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+       int result;
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+       pp_atomctrl_mc_reg_table *table;
+       struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+       uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+       table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+       if (table == NULL)
+               return -ENOMEM;
+
+       /* Program additional LP registers that are no longer programmed by VBIOS */
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+       cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+                       cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+       memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+       result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+       if (!result)
+               result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+       if (!result) {
+               tonga_set_s0_mc_reg_index(ni_table);
+               result = tonga_set_mc_special_registers(hwmgr, ni_table);
+       }
+
+       if (!result)
+               tonga_set_valid_flag(ni_table);
+
+       kfree(table);
+
+       return result;
+}
+
+static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                       CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+                       ? true : false;
+}
+
+static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+               struct amd_pp_profile *request)
+{
+       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
+                       (hwmgr->smu_backend);
+       struct SMU72_Discrete_GraphicsLevel *levels =
+                       smu_data->smc_state_table.GraphicsLevel;
+       uint32_t array = smu_data->smu7_data.dpm_table_start +
+                       offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+       uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
+                       SMU72_MAX_LEVELS_GRAPHICS;
+       uint32_t i;
+
+       for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+               levels[i].ActivityLevel =
+                               cpu_to_be16(request->activity_threshold);
+               levels[i].EnabledForActivity = 1;
+               levels[i].UpHyst = request->up_hyst;
+               levels[i].DownHyst = request->down_hyst;
+       }
+
+       return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+                               array_size, SMC_RAM_END);
+}
+
 const struct pp_smumgr_func tonga_smu_funcs = {
        .smu_init = &tonga_smu_init,
        .smu_fini = &smu7_smu_fini,
index 8c4f761d5bc8e210c205e6c98b7a0d9592531d7e..5d70a00348e2721061d04f55e482207181b4ed8e 100644 (file)
 #define _TONGA_SMUMGR_H_
 
 #include "smu72_discrete.h"
-
 #include "smu7_smumgr.h"
+#include "smu72.h"
+
+
+#define ASICID_IS_TONGA_P(wDID, bRID)   \
+       (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+       || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
+
+struct tonga_pt_defaults {
+       uint8_t   svi_load_line_en;
+       uint8_t   svi_load_line_vddC;
+       uint8_t   tdc_vddc_throttle_release_limit_perc;
+       uint8_t   tdc_mawt;
+       uint8_t   tdc_waterfall_ctl;
+       uint8_t   dte_ambient_temp_base;
+       uint32_t  display_cac;
+       uint32_t  bapm_temp_gradient;
+       uint16_t  bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+       uint16_t  bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+};
 
 struct tonga_mc_reg_entry {
        uint32_t mclk_max;
index 08e1332d814a2f090885f46c433f8446a3b87f0a..e4d3b4ec4e92c23891e728fb6cbd3ea9ec7639f2 100644 (file)
@@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
        entity->rq = rq;
        entity->sched = sched;
 
+       spin_lock_init(&entity->rq_lock);
        spin_lock_init(&entity->queue_lock);
        r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
        if (r)
@@ -204,7 +205,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity)
 {
-       struct amd_sched_rq *rq = entity->rq;
        int r;
 
        if (!amd_sched_entity_is_initialized(sched, entity))
@@ -218,7 +218,7 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
        else
                r = wait_event_killable(sched->job_scheduled,
                                        amd_sched_entity_is_idle(entity));
-       amd_sched_rq_remove_entity(rq, entity);
+       amd_sched_entity_set_rq(entity, NULL);
        if (r) {
                struct amd_sched_job *job;
 
@@ -257,6 +257,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
        dma_fence_put(f);
 }
 
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+                            struct amd_sched_rq *rq)
+{
+       if (entity->rq == rq)
+               return;
+
+       spin_lock(&entity->rq_lock);
+
+       if (entity->rq)
+               amd_sched_rq_remove_entity(entity->rq, entity);
+
+       entity->rq = rq;
+       if (rq)
+               amd_sched_rq_add_entity(rq, entity);
+
+       spin_unlock(&entity->rq_lock);
+}
+
 bool amd_sched_dependency_optimized(struct dma_fence* fence,
                                    struct amd_sched_entity *entity)
 {
@@ -354,7 +372,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
        /* first job wakes up scheduler */
        if (first) {
                /* Add the entity to the run queue */
+               spin_lock(&entity->rq_lock);
                amd_sched_rq_add_entity(entity->rq, entity);
+               spin_unlock(&entity->rq_lock);
                amd_sched_wakeup(sched);
        }
        return added;
@@ -386,6 +406,7 @@ static void amd_sched_job_finish(struct work_struct *work)
                        schedule_delayed_work(&next->work_tdr, sched->timeout);
        }
        spin_unlock(&sched->job_list_lock);
+       dma_fence_put(&s_job->s_fence->finished);
        sched->ops->free_job(s_job);
 }
 
@@ -566,6 +587,7 @@ static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
                container_of(cb, struct amd_sched_fence, cb);
        struct amd_gpu_scheduler *sched = s_fence->sched;
 
+       dma_fence_get(&s_fence->finished);
        atomic_dec(&sched->hw_rq_count);
        amd_sched_fence_finished(s_fence);
 
@@ -618,9 +640,6 @@ static int amd_sched_main(void *param)
                fence = sched->ops->run_job(sched_job);
                amd_sched_fence_scheduled(s_fence);
 
-               /* amd_sched_process_job drops the job's reference of the fence. */
-               sched_job->s_fence = NULL;
-
                if (fence) {
                        s_fence->parent = dma_fence_get(fence);
                        r = dma_fence_add_callback(fence, &s_fence->cb,
index f9d8f28efd1619e9e8d8a5042982ca9d90818582..52c8e544762445888b36168775687a0d666ab638 100644 (file)
@@ -39,6 +39,7 @@ struct amd_sched_rq;
 struct amd_sched_entity {
        struct list_head                list;
        struct amd_sched_rq             *rq;
+       spinlock_t                      rq_lock;
        struct amd_gpu_scheduler        *sched;
 
        spinlock_t                      queue_lock;
@@ -115,9 +116,14 @@ struct amd_sched_backend_ops {
 
 enum amd_sched_priority {
        AMD_SCHED_PRIORITY_MIN,
-       AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN,
+       AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
+       AMD_SCHED_PRIORITY_NORMAL,
+       AMD_SCHED_PRIORITY_HIGH_SW,
+       AMD_SCHED_PRIORITY_HIGH_HW,
        AMD_SCHED_PRIORITY_KERNEL,
-       AMD_SCHED_PRIORITY_MAX
+       AMD_SCHED_PRIORITY_MAX,
+       AMD_SCHED_PRIORITY_INVALID = -1,
+       AMD_SCHED_PRIORITY_UNSET = -2
 };
 
 /**
@@ -150,6 +156,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity);
 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+                            struct amd_sched_rq *rq);
 
 int amd_sched_fence_slab_init(void);
 void amd_sched_fence_slab_fini(void);
@@ -167,4 +175,11 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
 bool amd_sched_dependency_optimized(struct dma_fence* fence,
                                    struct amd_sched_entity *entity);
 void amd_sched_job_kickout(struct amd_sched_job *s_job);
+
+static inline enum amd_sched_priority
+amd_sched_get_job_priority(struct amd_sched_job *job)
+{
+       return (job->s_entity->rq - job->sched->sched_rq);
+}
+
 #endif
index 5182e3d5557d27304822baa7241dda7c5433bc50..66d23b619db19f09da9d2777e6c142eb38b2ca0b 100644 (file)
@@ -47,6 +47,7 @@ i915-y += i915_cmd_parser.o \
          i915_gem_tiling.o \
          i915_gem_timeline.o \
          i915_gem_userptr.o \
+         i915_gemfs.o \
          i915_trace_points.o \
          i915_vma.o \
          intel_breadcrumbs.o \
@@ -59,6 +60,8 @@ i915-y += i915_cmd_parser.o \
 
 # general-purpose microcontroller (GuC) support
 i915-y += intel_uc.o \
+         intel_uc_fw.o \
+         intel_guc.o \
          intel_guc_ct.o \
          intel_guc_log.o \
          intel_guc_loader.o \
index d5892d24f0b6ad2c9330f5dff3aedeae820050a1..f6ded475bb2cc4dec19697b01e1e37c2c015f7dd 100644 (file)
@@ -174,6 +174,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT:
+       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
                atomic_set(&workload->shadow_ctx_active, 0);
                break;
        default:
index b4a6ac60e7c611faa93a25baaba9767bcf23e9e4..0bb6e01121fcf9a8156314149e93a1ecdc6183c1 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/sort.h>
 #include <linux/sched/mm.h>
 #include "intel_drv.h"
+#include "i915_guc_submission.h"
 
 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
 {
@@ -97,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static char get_global_flag(struct drm_i915_gem_object *obj)
 {
-       return !list_empty(&obj->userfault_link) ? 'g' : ' ';
+       return obj->userfault_count ? 'g' : ' ';
 }
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -118,6 +119,36 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
        return size;
 }
 
+static const char *
+stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
+{
+       size_t x = 0;
+
+       switch (page_sizes) {
+       case 0:
+               return "";
+       case I915_GTT_PAGE_SIZE_4K:
+               return "4K";
+       case I915_GTT_PAGE_SIZE_64K:
+               return "64K";
+       case I915_GTT_PAGE_SIZE_2M:
+               return "2M";
+       default:
+               if (!buf)
+                       return "M";
+
+               if (page_sizes & I915_GTT_PAGE_SIZE_2M)
+                       x += snprintf(buf + x, len - x, "2M, ");
+               if (page_sizes & I915_GTT_PAGE_SIZE_64K)
+                       x += snprintf(buf + x, len - x, "64K, ");
+               if (page_sizes & I915_GTT_PAGE_SIZE_4K)
+                       x += snprintf(buf + x, len - x, "4K, ");
+               buf[x-2] = '\0';
+
+               return buf;
+       }
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -155,9 +186,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
-               seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+               seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
                           i915_vma_is_ggtt(vma) ? "g" : "pp",
-                          vma->node.start, vma->node.size);
+                          vma->node.start, vma->node.size,
+                          stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
                if (i915_vma_is_ggtt(vma)) {
                        switch (vma->ggtt_view.type) {
                        case I915_GGTT_VIEW_NORMAL:
@@ -402,10 +434,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_device *dev = &dev_priv->drm;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       u32 count, mapped_count, purgeable_count, dpy_count;
-       u64 size, mapped_size, purgeable_size, dpy_size;
+       u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
+       u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
        struct drm_i915_gem_object *obj;
+       unsigned int page_sizes = 0;
        struct drm_file *file;
+       char buf[80];
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -419,6 +453,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
        size = count = 0;
        mapped_size = mapped_count = 0;
        purgeable_size = purgeable_count = 0;
+       huge_size = huge_count = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
                size += obj->base.size;
                ++count;
@@ -432,6 +467,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                        mapped_count++;
                        mapped_size += obj->base.size;
                }
+
+               if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+                       huge_count++;
+                       huge_size += obj->base.size;
+                       page_sizes |= obj->mm.page_sizes.sg;
+               }
        }
        seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
@@ -454,6 +495,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                        mapped_count++;
                        mapped_size += obj->base.size;
                }
+
+               if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+                       huge_count++;
+                       huge_size += obj->base.size;
+                       page_sizes |= obj->mm.page_sizes.sg;
+               }
        }
        seq_printf(m, "%u bound objects, %llu bytes\n",
                   count, size);
@@ -461,11 +508,18 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                   purgeable_count, purgeable_size);
        seq_printf(m, "%u mapped objects, %llu bytes\n",
                   mapped_count, mapped_size);
+       seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
+                  huge_count,
+                  stringify_page_sizes(page_sizes, buf, sizeof(buf)),
+                  huge_size);
        seq_printf(m, "%u display objects (pinned), %llu bytes\n",
                   dpy_count, dpy_size);
 
        seq_printf(m, "%llu [%llu] gtt total\n",
                   ggtt->base.total, ggtt->mappable_end);
+       seq_printf(m, "Supported page sizes: %s\n",
+                  stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
+                                       buf, sizeof(buf)));
 
        seq_putc(m, '\n');
        print_batch_pool_stats(m, dev_priv);
@@ -1026,6 +1080,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int ret = 0;
 
        intel_runtime_pm_get(dev_priv);
@@ -1041,9 +1096,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "Current P-state: %d\n",
                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               u32 freq_sts;
+               u32 rpmodectl, freq_sts;
+
+               mutex_lock(&dev_priv->pcu_lock);
+
+               rpmodectl = I915_READ(GEN6_RP_CONTROL);
+               seq_printf(m, "Video Turbo Mode: %s\n",
+                          yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+               seq_printf(m, "HW control enabled: %s\n",
+                          yesno(rpmodectl & GEN6_RP_ENABLE));
+               seq_printf(m, "SW control enabled: %s\n",
+                          yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+                                 GEN6_RP_MEDIA_SW_MODE));
 
-               mutex_lock(&dev_priv->rps.hw_lock);
                freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
                seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
                seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
@@ -1052,21 +1117,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
 
                seq_printf(m, "current GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+                          intel_gpu_freq(dev_priv, rps->cur_freq));
 
                seq_printf(m, "max GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, rps->max_freq));
 
                seq_printf(m, "min GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+                          intel_gpu_freq(dev_priv, rps->min_freq));
 
                seq_printf(m, "idle GPU freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+                          intel_gpu_freq(dev_priv, rps->idle_freq));
 
                seq_printf(m,
                           "efficient (RPe) frequency: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
-               mutex_unlock(&dev_priv->rps.hw_lock);
+                          intel_gpu_freq(dev_priv, rps->efficient_freq));
+               mutex_unlock(&dev_priv->pcu_lock);
        } else if (INTEL_GEN(dev_priv) >= 6) {
                u32 rp_state_limits;
                u32 gt_perf_status;
@@ -1136,10 +1201,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                        pm_iir = I915_READ(GEN8_GT_IIR(2));
                        pm_mask = I915_READ(GEN6_PMINTRMSK);
                }
+               seq_printf(m, "Video Turbo Mode: %s\n",
+                          yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+               seq_printf(m, "HW control enabled: %s\n",
+                          yesno(rpmodectl & GEN6_RP_ENABLE));
+               seq_printf(m, "SW control enabled: %s\n",
+                          yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+                                 GEN6_RP_MEDIA_SW_MODE));
                seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
                           pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
                seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
-                          dev_priv->rps.pm_intrmsk_mbz);
+                          rps->pm_intrmsk_mbz);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "Render p-state ratio: %d\n",
                           (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
@@ -1159,8 +1231,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
                seq_printf(m, "RP PREV UP: %d (%dus)\n",
                           rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
-               seq_printf(m, "Up threshold: %d%%\n",
-                          dev_priv->rps.up_threshold);
+               seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
 
                seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
                           rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1168,8 +1239,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
                seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
                           rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
-               seq_printf(m, "Down threshold: %d%%\n",
-                          dev_priv->rps.down_threshold);
+               seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
 
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
@@ -1191,22 +1261,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, rps->max_freq));
 
                seq_printf(m, "Current freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+                          intel_gpu_freq(dev_priv, rps->cur_freq));
                seq_printf(m, "Actual freq: %d MHz\n", cagf);
                seq_printf(m, "Idle freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+                          intel_gpu_freq(dev_priv, rps->idle_freq));
                seq_printf(m, "Min freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+                          intel_gpu_freq(dev_priv, rps->min_freq));
                seq_printf(m, "Boost freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+                          intel_gpu_freq(dev_priv, rps->boost_freq));
                seq_printf(m, "Max freq: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                          intel_gpu_freq(dev_priv, rps->max_freq));
                seq_printf(m,
                           "efficient (RPe) frequency: %d MHz\n",
-                          intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+                          intel_gpu_freq(dev_priv, rps->efficient_freq));
        } else {
                seq_puts(m, "no P-state info available\n");
        }
@@ -1447,21 +1517,11 @@ static void print_rc6_res(struct seq_file *m,
 static int vlv_drpc_info(struct seq_file *m)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       u32 rpmodectl1, rcctl1, pw_status;
+       u32 rcctl1, pw_status;
 
        pw_status = I915_READ(VLV_GTLC_PW_STATUS);
-       rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
 
-       seq_printf(m, "Video Turbo Mode: %s\n",
-                  yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
-       seq_printf(m, "Turbo enabled: %s\n",
-                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
-       seq_printf(m, "HW control enabled: %s\n",
-                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
-       seq_printf(m, "SW control enabled: %s\n",
-                  yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
-                         GEN6_RP_MEDIA_SW_MODE));
        seq_printf(m, "RC6 Enabled: %s\n",
                   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
                                        GEN6_RC_CTL_EI_MODE(1))));
@@ -1479,7 +1539,7 @@ static int vlv_drpc_info(struct seq_file *m)
 static int gen6_drpc_info(struct seq_file *m)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+       u32 gt_core_status, rcctl1, rc6vids = 0;
        u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
        unsigned forcewake_count;
        int count = 0;
@@ -1498,24 +1558,16 @@ static int gen6_drpc_info(struct seq_file *m)
        gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
        trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
 
-       rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
        if (INTEL_GEN(dev_priv) >= 9) {
                gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
                gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
        }
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
-       seq_printf(m, "Video Turbo Mode: %s\n",
-                  yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
-       seq_printf(m, "HW control enabled: %s\n",
-                  yesno(rpmodectl1 & GEN6_RP_ENABLE));
-       seq_printf(m, "SW control enabled: %s\n",
-                  yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
-                         GEN6_RP_MEDIA_SW_MODE));
        seq_printf(m, "RC1e Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
        seq_printf(m, "RC6 Enabled: %s\n",
@@ -1778,6 +1830,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int ret = 0;
        int gpu_freq, ia_freq;
        unsigned int max_gpu_freq, min_gpu_freq;
@@ -1789,19 +1842,17 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 
        intel_runtime_pm_get(dev_priv);
 
-       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+       ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
        if (ret)
                goto out;
 
        if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
-               min_gpu_freq =
-                       dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
-               max_gpu_freq =
-                       dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+               min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
+               max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
        } else {
-               min_gpu_freq = dev_priv->rps.min_freq_softlimit;
-               max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+               min_gpu_freq = rps->min_freq_softlimit;
+               max_gpu_freq = rps->max_freq_softlimit;
        }
 
        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1820,7 +1871,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                           ((ia_freq >> 8) & 0xff) * 100);
        }
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
 out:
        intel_runtime_pm_put(dev_priv);
@@ -2254,25 +2305,26 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_device *dev = &dev_priv->drm;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        struct drm_file *file;
 
-       seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+       seq_printf(m, "RPS enabled? %d\n", rps->enabled);
        seq_printf(m, "GPU busy? %s [%d requests]\n",
                   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Boosts outstanding? %d\n",
-                  atomic_read(&dev_priv->rps.num_waiters));
+                  atomic_read(&rps->num_waiters));
        seq_printf(m, "Frequency requested %d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+                  intel_gpu_freq(dev_priv, rps->cur_freq));
        seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+                  intel_gpu_freq(dev_priv, rps->min_freq),
+                  intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
+                  intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
+                  intel_gpu_freq(dev_priv, rps->max_freq));
        seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
-                  intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                  intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+                  intel_gpu_freq(dev_priv, rps->idle_freq),
+                  intel_gpu_freq(dev_priv, rps->efficient_freq),
+                  intel_gpu_freq(dev_priv, rps->boost_freq));
 
        mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
@@ -2284,15 +2336,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                seq_printf(m, "%s [%d]: %d boosts\n",
                           task ? task->comm : "<unknown>",
                           task ? task->pid : -1,
-                          atomic_read(&file_priv->rps.boosts));
+                          atomic_read(&file_priv->rps_client.boosts));
                rcu_read_unlock();
        }
        seq_printf(m, "Kernel (anonymous) boosts: %d\n",
-                  atomic_read(&dev_priv->rps.boosts));
+                  atomic_read(&rps->boosts));
        mutex_unlock(&dev->filelist_mutex);
 
        if (INTEL_GEN(dev_priv) >= 6 &&
-           dev_priv->rps.enabled &&
+           rps->enabled &&
            dev_priv->gt.active_requests) {
                u32 rpup, rpupei;
                u32 rpdown, rpdownei;
@@ -2305,13 +2357,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
                seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
-                          rps_power_to_str(dev_priv->rps.power));
+                          rps_power_to_str(rps->power));
                seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
                           rpup && rpupei ? 100 * rpup / rpupei : 0,
-                          dev_priv->rps.up_threshold);
+                          rps->up_threshold);
                seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
                           rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
-                          dev_priv->rps.down_threshold);
+                          rps->down_threshold);
        } else {
                seq_puts(m, "\nRPS Autotuning inactive\n");
        }
@@ -3238,9 +3290,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
 static int i915_engine_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct i915_gpu_error *error = &dev_priv->gpu_error;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       struct drm_printer p;
 
        intel_runtime_pm_get(dev_priv);
 
@@ -3249,149 +3301,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
        seq_printf(m, "Global active requests: %d\n",
                   dev_priv->gt.active_requests);
 
-       for_each_engine(engine, dev_priv, id) {
-               struct intel_breadcrumbs *b = &engine->breadcrumbs;
-               struct drm_i915_gem_request *rq;
-               struct rb_node *rb;
-               u64 addr;
-
-               seq_printf(m, "%s\n", engine->name);
-               seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
-                          intel_engine_get_seqno(engine),
-                          intel_engine_last_submit(engine),
-                          engine->hangcheck.seqno,
-                          jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
-                          engine->timeline->inflight_seqnos);
-               seq_printf(m, "\tReset count: %d\n",
-                          i915_reset_engine_count(error, engine));
-
-               rcu_read_lock();
-
-               seq_printf(m, "\tRequests:\n");
-
-               rq = list_first_entry(&engine->timeline->requests,
-                                     struct drm_i915_gem_request, link);
-               if (&rq->link != &engine->timeline->requests)
-                       print_request(m, rq, "\t\tfirst  ");
-
-               rq = list_last_entry(&engine->timeline->requests,
-                                    struct drm_i915_gem_request, link);
-               if (&rq->link != &engine->timeline->requests)
-                       print_request(m, rq, "\t\tlast   ");
-
-               rq = i915_gem_find_active_request(engine);
-               if (rq) {
-                       print_request(m, rq, "\t\tactive ");
-                       seq_printf(m,
-                                  "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
-                                  rq->head, rq->postfix, rq->tail,
-                                  rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
-                                  rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
-               }
-
-               seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
-                          I915_READ(RING_START(engine->mmio_base)),
-                          rq ? i915_ggtt_offset(rq->ring->vma) : 0);
-               seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
-                          I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
-                          rq ? rq->ring->head : 0);
-               seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
-                          I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
-                          rq ? rq->ring->tail : 0);
-               seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
-                          I915_READ(RING_CTL(engine->mmio_base)),
-                          I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
-
-               rcu_read_unlock();
-
-               addr = intel_engine_get_active_head(engine);
-               seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
-                          upper_32_bits(addr), lower_32_bits(addr));
-               addr = intel_engine_get_last_batch_head(engine);
-               seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
-                          upper_32_bits(addr), lower_32_bits(addr));
-
-               if (i915_modparams.enable_execlists) {
-                       const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-                       struct intel_engine_execlists * const execlists = &engine->execlists;
-                       u32 ptr, read, write;
-                       unsigned int idx;
-
-                       seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
-                                  I915_READ(RING_EXECLIST_STATUS_LO(engine)),
-                                  I915_READ(RING_EXECLIST_STATUS_HI(engine)));
-
-                       ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
-                       read = GEN8_CSB_READ_PTR(ptr);
-                       write = GEN8_CSB_WRITE_PTR(ptr);
-                       seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
-                                  read, execlists->csb_head,
-                                  write,
-                                  intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
-                                  yesno(test_bit(ENGINE_IRQ_EXECLIST,
-                                                 &engine->irq_posted)));
-                       if (read >= GEN8_CSB_ENTRIES)
-                               read = 0;
-                       if (write >= GEN8_CSB_ENTRIES)
-                               write = 0;
-                       if (read > write)
-                               write += GEN8_CSB_ENTRIES;
-                       while (read < write) {
-                               idx = ++read % GEN8_CSB_ENTRIES;
-                               seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
-                                          idx,
-                                          I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
-                                          hws[idx * 2],
-                                          I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
-                                          hws[idx * 2 + 1]);
-                       }
-
-                       rcu_read_lock();
-                       for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
-                               unsigned int count;
-
-                               rq = port_unpack(&execlists->port[idx], &count);
-                               if (rq) {
-                                       seq_printf(m, "\t\tELSP[%d] count=%d, ",
-                                                  idx, count);
-                                       print_request(m, rq, "rq: ");
-                               } else {
-                                       seq_printf(m, "\t\tELSP[%d] idle\n",
-                                                  idx);
-                               }
-                       }
-                       rcu_read_unlock();
-
-                       spin_lock_irq(&engine->timeline->lock);
-                       for (rb = execlists->first; rb; rb = rb_next(rb)) {
-                               struct i915_priolist *p =
-                                       rb_entry(rb, typeof(*p), node);
-
-                               list_for_each_entry(rq, &p->requests,
-                                                   priotree.link)
-                                       print_request(m, rq, "\t\tQ ");
-                       }
-                       spin_unlock_irq(&engine->timeline->lock);
-               } else if (INTEL_GEN(dev_priv) > 6) {
-                       seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
-                                  I915_READ(RING_PP_DIR_BASE(engine)));
-                       seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
-                                  I915_READ(RING_PP_DIR_BASE_READ(engine)));
-                       seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
-                                  I915_READ(RING_PP_DIR_DCLV(engine)));
-               }
-
-               spin_lock_irq(&b->rb_lock);
-               for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-                       struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
-                       seq_printf(m, "\t%s [%d] waiting for %x\n",
-                                  w->tsk->comm, w->tsk->pid, w->seqno);
-               }
-               spin_unlock_irq(&b->rb_lock);
-
-               seq_puts(m, "\n");
-       }
+       p = drm_seq_file_printer(m);
+       for_each_engine(engine, dev_priv, id)
+               intel_engine_dump(engine, &p);
 
        intel_runtime_pm_put(dev_priv);
 
@@ -4258,8 +4170,7 @@ fault_irq_set(struct drm_i915_private *i915,
        mutex_unlock(&i915->drm.struct_mutex);
 
        /* Flush idle worker to disarm irq */
-       while (flush_delayed_work(&i915->gt.idle_work))
-               ;
+       drain_delayed_work(&i915->gt.idle_work);
 
        return 0;
 
@@ -4392,7 +4303,7 @@ i915_max_freq_get(void *data, u64 *val)
        if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+       *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
        return 0;
 }
 
@@ -4400,6 +4311,7 @@ static int
 i915_max_freq_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 hw_max, hw_min;
        int ret;
 
@@ -4408,7 +4320,7 @@ i915_max_freq_set(void *data, u64 val)
 
        DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
-       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+       ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
        if (ret)
                return ret;
 
@@ -4417,20 +4329,20 @@ i915_max_freq_set(void *data, u64 val)
         */
        val = intel_freq_opcode(dev_priv, val);
 
-       hw_max = dev_priv->rps.max_freq;
-       hw_min = dev_priv->rps.min_freq;
+       hw_max = rps->max_freq;
+       hw_min = rps->min_freq;
 
-       if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
-               mutex_unlock(&dev_priv->rps.hw_lock);
+       if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
+               mutex_unlock(&dev_priv->pcu_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.max_freq_softlimit = val;
+       rps->max_freq_softlimit = val;
 
        if (intel_set_rps(dev_priv, val))
                DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        return 0;
 }
@@ -4447,7 +4359,7 @@ i915_min_freq_get(void *data, u64 *val)
        if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
-       *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+       *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
        return 0;
 }
 
@@ -4455,6 +4367,7 @@ static int
 i915_min_freq_set(void *data, u64 val)
 {
        struct drm_i915_private *dev_priv = data;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 hw_max, hw_min;
        int ret;
 
@@ -4463,7 +4376,7 @@ i915_min_freq_set(void *data, u64 val)
 
        DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
-       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+       ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
        if (ret)
                return ret;
 
@@ -4472,21 +4385,21 @@ i915_min_freq_set(void *data, u64 val)
         */
        val = intel_freq_opcode(dev_priv, val);
 
-       hw_max = dev_priv->rps.max_freq;
-       hw_min = dev_priv->rps.min_freq;
+       hw_max = rps->max_freq;
+       hw_min = rps->min_freq;
 
        if (val < hw_min ||
-           val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
-               mutex_unlock(&dev_priv->rps.hw_lock);
+           val > hw_max || val > rps->max_freq_softlimit) {
+               mutex_unlock(&dev_priv->pcu_lock);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_freq_softlimit = val;
+       rps->min_freq_softlimit = val;
 
        if (intel_set_rps(dev_priv, val))
                DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        return 0;
 }
index 59ac9199b35dc6b93eb28edfb7d9f4794b904816..3db5851756f060fd23241aa4dd448f4d71530368 100644 (file)
@@ -367,9 +367,18 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = i915_gem_mmap_gtt_version();
                break;
        case I915_PARAM_HAS_SCHEDULER:
-               value = dev_priv->engine[RCS] &&
-                       dev_priv->engine[RCS]->schedule;
+               value = 0;
+               if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
+                       value |= I915_SCHEDULER_CAP_ENABLED;
+                       value |= I915_SCHEDULER_CAP_PRIORITY;
+
+                       if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
+                           i915_modparams.enable_execlists &&
+                           !i915_modparams.enable_guc_submission)
+                               value |= I915_SCHEDULER_CAP_PREEMPTION;
+               }
                break;
+
        case I915_PARAM_MMAP_VERSION:
                /* Remember to bump this if the version changes! */
        case I915_PARAM_HAS_GEM:
@@ -606,9 +615,10 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
        intel_uc_fini_hw(dev_priv);
        i915_gem_cleanup_engines(dev_priv);
        i915_gem_contexts_fini(dev_priv);
-       i915_gem_cleanup_userptr(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       i915_gem_cleanup_userptr(dev_priv);
+
        i915_gem_drain_freed_objects(dev_priv);
 
        WARN_ON(!list_empty(&dev_priv->contexts.list));
@@ -1007,6 +1017,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 
        intel_uncore_init(dev_priv);
 
+       intel_uc_init_mmio(dev_priv);
+
        ret = intel_engines_init_mmio(dev_priv);
        if (ret)
                goto err_uncore;
@@ -1580,7 +1592,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        intel_display_set_init_power(dev_priv, false);
 
-       fw_csr = !IS_GEN9_LP(dev_priv) &&
+       fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
                suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
        /*
         * In case of firmware assisted context save/restore don't manually
@@ -2070,11 +2082,14 @@ static int i915_pm_resume(struct device *kdev)
 /* freeze: before creating the hibernation_image */
 static int i915_pm_freeze(struct device *kdev)
 {
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
        int ret;
 
-       ret = i915_pm_suspend(kdev);
-       if (ret)
-               return ret;
+       if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
+               ret = i915_drm_suspend(dev);
+               if (ret)
+                       return ret;
+       }
 
        ret = i915_gem_freeze(kdev_to_i915(kdev));
        if (ret)
@@ -2085,11 +2100,14 @@ static int i915_pm_freeze(struct device *kdev)
 
 static int i915_pm_freeze_late(struct device *kdev)
 {
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
        int ret;
 
-       ret = i915_pm_suspend_late(kdev);
-       if (ret)
-               return ret;
+       if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
+               ret = i915_drm_suspend_late(dev, true);
+               if (ret)
+                       return ret;
+       }
 
        ret = i915_gem_freeze_late(kdev_to_i915(kdev));
        if (ret)
@@ -2485,7 +2503,7 @@ static int intel_runtime_suspend(struct device *kdev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
+       if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
                return -ENODEV;
 
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2527,12 +2545,12 @@ static int intel_runtime_suspend(struct device *kdev)
        intel_uncore_suspend(dev_priv);
 
        enable_rpm_wakeref_asserts(dev_priv);
-       WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+       WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
 
        if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
                DRM_ERROR("Unclaimed access detected prior to suspending\n");
 
-       dev_priv->pm.suspended = true;
+       dev_priv->runtime_pm.suspended = true;
 
        /*
         * FIXME: We really should find a document that references the arguments
@@ -2578,11 +2596,11 @@ static int intel_runtime_resume(struct device *kdev)
 
        DRM_DEBUG_KMS("Resuming device\n");
 
-       WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+       WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
        disable_rpm_wakeref_asserts(dev_priv);
 
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
-       dev_priv->pm.suspended = false;
+       dev_priv->runtime_pm.suspended = false;
        if (intel_uncore_unclaimed_mmio(dev_priv))
                DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
index 7ca11318ac6908b6a343a2aca4839ceaa8f4c934..c7b2ca6aff05e6a8876ffbffdd5fa77df1b19025 100644 (file)
@@ -80,8 +80,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20170929"
-#define DRIVER_TIMESTAMP       1506682238
+#define DRIVER_DATE            "20171012"
+#define DRIVER_TIMESTAMP       1507831511
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -609,7 +609,7 @@ struct drm_i915_file_private {
 
        struct intel_rps_client {
                atomic_t boosts;
-       } rps;
+       } rps_client;
 
        unsigned int bsd_engine;
 
@@ -783,6 +783,7 @@ struct intel_csr {
        func(has_l3_dpf); \
        func(has_llc); \
        func(has_logical_ring_contexts); \
+       func(has_logical_ring_preemption); \
        func(has_overlay); \
        func(has_pipe_cxsr); \
        func(has_pooled_eu); \
@@ -868,6 +869,8 @@ struct intel_device_info {
        u8 num_sprites[I915_MAX_PIPES];
        u8 num_scalers[I915_MAX_PIPES];
 
+       unsigned int page_sizes; /* page sizes supported by the HW */
+
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
 #undef DEFINE_FLAG
@@ -981,6 +984,7 @@ struct i915_gpu_state {
                        pid_t pid;
                        u32 handle;
                        u32 hw_id;
+                       int priority;
                        int ban_score;
                        int active;
                        int guilty;
@@ -1003,6 +1007,7 @@ struct i915_gpu_state {
                        long jiffies;
                        pid_t pid;
                        u32 context;
+                       int priority;
                        int ban_score;
                        u32 seqno;
                        u32 head;
@@ -1312,7 +1317,7 @@ struct intel_rps_ei {
        u32 media_c0;
 };
 
-struct intel_gen6_power_mgmt {
+struct intel_rps {
        /*
         * work, interrupts_enabled and pm_iir are protected by
         * dev_priv->irq_lock
@@ -1353,20 +1358,26 @@ struct intel_gen6_power_mgmt {
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
        bool enabled;
-       struct delayed_work autoenable_work;
        atomic_t num_waiters;
        atomic_t boosts;
 
        /* manual wa residency calculations */
        struct intel_rps_ei ei;
+};
 
-       /*
-        * Protects RPS/RC6 register access and PCU communication.
-        * Must be taken after struct_mutex if nested. Note that
-        * this lock may be held for long periods of time when
-        * talking to hw - so only take it when talking to hw!
-        */
-       struct mutex hw_lock;
+struct intel_rc6 {
+       bool enabled;
+};
+
+struct intel_llc_pstate {
+       bool enabled;
+};
+
+struct intel_gen6_power_mgmt {
+       struct intel_rps rps;
+       struct intel_rc6 rc6;
+       struct intel_llc_pstate llc_pstate;
+       struct delayed_work autoenable_work;
 };
 
 /* defined intel_pm.c */
@@ -1508,6 +1519,11 @@ struct i915_gem_mm {
        /** Usable portion of the GTT for GEM */
        dma_addr_t stolen_base; /* limited to low memory (32-bit) */
 
+       /**
+        * tmpfs instance used for shmem backed objects
+        */
+       struct vfsmount *gemfs;
+
        /** PPGTT used for aliasing the PPGTT with the GTT */
        struct i915_hw_ppgtt *aliasing_ppgtt;
 
@@ -2251,8 +2267,11 @@ struct drm_i915_private {
        wait_queue_head_t gmbus_wait_queue;
 
        struct pci_dev *bridge_dev;
-       struct i915_gem_context *kernel_context;
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
+       /* Context used internally to idle the GPU and setup initial state */
+       struct i915_gem_context *kernel_context;
+       /* Context only to be used for injecting preemption commands */
+       struct i915_gem_context *preempt_context;
        struct i915_vma *semaphore;
 
        struct drm_dma_handle *status_page_dmah;
@@ -2408,8 +2427,16 @@ struct drm_i915_private {
        /* Cannot be determined by PCIID. You must always read a register. */
        u32 edram_cap;
 
-       /* gen6+ rps state */
-       struct intel_gen6_power_mgmt rps;
+       /*
+        * Protects RPS/RC6 register access and PCU communication.
+        * Must be taken after struct_mutex if nested. Note that
+        * this lock may be held for long periods of time when
+        * talking to hw - so only take it when talking to hw!
+        */
+       struct mutex pcu_lock;
+
+       /* gen6+ GT PM state */
+       struct intel_gen6_power_mgmt gt_pm;
 
        /* ilk-only ips/rps state. Everything in here is protected by the global
         * mchdev_lock in intel_pm.c */
@@ -2520,7 +2547,7 @@ struct drm_i915_private {
                bool distrust_bios_wm;
        } wm;
 
-       struct i915_runtime_pm pm;
+       struct i915_runtime_pm runtime_pm;
 
        struct {
                bool initialized;
@@ -2859,6 +2886,21 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
             (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?           \
             (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
 
+static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
+{
+       unsigned int page_sizes;
+
+       page_sizes = 0;
+       while (sg) {
+               GEM_BUG_ON(sg->offset);
+               GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
+               page_sizes |= sg->length;
+               sg = __sg_next(sg);
+       }
+
+       return page_sizes;
+}
+
 static inline unsigned int i915_sg_segment_size(void)
 {
        unsigned int size = swiotlb_max_segment();
@@ -3088,6 +3130,10 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define USES_PPGTT(dev_priv)           (i915_modparams.enable_ppgtt)
 #define USES_FULL_PPGTT(dev_priv)      (i915_modparams.enable_ppgtt >= 2)
 #define USES_FULL_48BIT_PPGTT(dev_priv)        (i915_modparams.enable_ppgtt == 3)
+#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
+       GEM_BUG_ON((sizes) == 0); \
+       ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+})
 
 #define HAS_OVERLAY(dev_priv)           ((dev_priv)->info.has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
@@ -3504,7 +3550,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
                                unsigned long n);
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-                                struct sg_table *pages);
+                                struct sg_table *pages,
+                                unsigned int sg_page_sizes);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
@@ -3726,8 +3773,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
 }
 
 /* i915_gem_fence_reg.c */
-int __must_check i915_vma_get_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
 struct drm_i915_fence_reg *
 i915_reserve_fence(struct drm_i915_private *dev_priv);
 void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
index 73eeb6b1f1cd6682bab4e6eea6c5aca44128d259..20fcac37c85a0f2332a84c8d4922e26dc88b7466 100644 (file)
@@ -35,6 +35,7 @@
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 #include "intel_mocs.h"
+#include "i915_gemfs.h"
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
 #include <linux/reservation.h>
@@ -161,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
        drm_dma_handle_t *phys;
@@ -170,9 +170,10 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        struct scatterlist *sg;
        char *vaddr;
        int i;
+       int err;
 
        if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /* Always aligning to the object size, allows a single allocation
         * to handle all possible callers, and given typical object sizes,
@@ -182,7 +183,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                             roundup_pow_of_two(obj->base.size),
                             roundup_pow_of_two(obj->base.size));
        if (!phys)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        vaddr = phys->vaddr;
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -191,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
                page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page)) {
-                       st = ERR_CAST(page);
+                       err = PTR_ERR(page);
                        goto err_phys;
                }
 
@@ -208,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st) {
-               st = ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
                goto err_phys;
        }
 
        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
                kfree(st);
-               st = ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
                goto err_phys;
        }
 
@@ -226,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        sg_dma_len(sg) = obj->base.size;
 
        obj->phys_handle = phys;
-       return st;
+
+       __i915_gem_object_set_pages(obj, st, sg->length);
+
+       return 0;
 
 err_phys:
        drm_pci_free(obj->base.dev, phys);
-       return st;
+
+       return err;
 }
 
 static void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -353,7 +358,7 @@ static long
 i915_gem_object_wait_fence(struct dma_fence *fence,
                           unsigned int flags,
                           long timeout,
-                          struct intel_rps_client *rps)
+                          struct intel_rps_client *rps_client)
 {
        struct drm_i915_gem_request *rq;
 
@@ -386,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
         * forcing the clocks too high for the whole system, we only allow
         * each client to waitboost once in a busy period.
         */
-       if (rps) {
+       if (rps_client) {
                if (INTEL_GEN(rq->i915) >= 6)
-                       gen6_rps_boost(rq, rps);
+                       gen6_rps_boost(rq, rps_client);
                else
-                       rps = NULL;
+                       rps_client = NULL;
        }
 
        timeout = i915_wait_request(rq, flags, timeout);
@@ -406,7 +411,7 @@ static long
 i915_gem_object_wait_reservation(struct reservation_object *resv,
                                 unsigned int flags,
                                 long timeout,
-                                struct intel_rps_client *rps)
+                                struct intel_rps_client *rps_client)
 {
        unsigned int seq = __read_seqcount_begin(&resv->seq);
        struct dma_fence *excl;
@@ -425,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
                for (i = 0; i < count; i++) {
                        timeout = i915_gem_object_wait_fence(shared[i],
                                                             flags, timeout,
-                                                            rps);
+                                                            rps_client);
                        if (timeout < 0)
                                break;
 
@@ -442,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
        }
 
        if (excl && timeout >= 0) {
-               timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+               timeout = i915_gem_object_wait_fence(excl, flags, timeout,
+                                                    rps_client);
                prune_fences = timeout >= 0;
        }
 
@@ -538,7 +544,7 @@ int
 i915_gem_object_wait(struct drm_i915_gem_object *obj,
                     unsigned int flags,
                     long timeout,
-                    struct intel_rps_client *rps)
+                    struct intel_rps_client *rps_client)
 {
        might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
@@ -550,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
 
        timeout = i915_gem_object_wait_reservation(obj->resv,
                                                   flags, timeout,
-                                                  rps);
+                                                  rps_client);
        return timeout < 0 ? timeout : 0;
 }
 
@@ -558,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
 {
        struct drm_i915_file_private *fpriv = file->driver_priv;
 
-       return &fpriv->rps;
+       return &fpriv->rps_client;
 }
 
 static int
@@ -1050,7 +1056,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 
        intel_runtime_pm_get(i915);
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                      PIN_MAPPABLE | PIN_NONBLOCK);
+                                      PIN_MAPPABLE |
+                                      PIN_NONFAULT |
+                                      PIN_NONBLOCK);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
                node.allocated = false;
@@ -1234,7 +1242,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 
        intel_runtime_pm_get(i915);
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                      PIN_MAPPABLE | PIN_NONBLOCK);
+                                      PIN_MAPPABLE |
+                                      PIN_NONFAULT |
+                                      PIN_NONBLOCK);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
                node.allocated = false;
@@ -1905,22 +1915,27 @@ int i915_gem_fault(struct vm_fault *vmf)
        if (ret)
                goto err_unpin;
 
-       ret = i915_vma_get_fence(vma);
+       ret = i915_vma_pin_fence(vma);
        if (ret)
                goto err_unpin;
 
-       /* Mark as being mmapped into userspace for later revocation */
-       assert_rpm_wakelock_held(dev_priv);
-       if (list_empty(&obj->userfault_link))
-               list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
-
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
                               (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
                               &ggtt->mappable);
+       if (ret)
+               goto err_fence;
 
+       /* Mark as being mmapped into userspace for later revocation */
+       assert_rpm_wakelock_held(dev_priv);
+       if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+               list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+       GEM_BUG_ON(!obj->userfault_count);
+
+err_fence:
+       i915_vma_unpin_fence(vma);
 err_unpin:
        __i915_vma_unpin(vma);
 err_unlock:
@@ -1972,6 +1987,25 @@ err:
        return ret;
 }
 
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+
+       GEM_BUG_ON(!obj->userfault_count);
+
+       obj->userfault_count = 0;
+       list_del(&obj->userfault_link);
+       drm_vma_node_unmap(&obj->base.vma_node,
+                          obj->base.dev->anon_inode->i_mapping);
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!i915_vma_is_ggtt(vma))
+                       break;
+
+               i915_vma_unset_userfault(vma);
+       }
+}
+
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -2002,12 +2036,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        lockdep_assert_held(&i915->drm.struct_mutex);
        intel_runtime_pm_get(i915);
 
-       if (list_empty(&obj->userfault_link))
+       if (!obj->userfault_count)
                goto out;
 
-       list_del_init(&obj->userfault_link);
-       drm_vma_node_unmap(&obj->base.vma_node,
-                          obj->base.dev->anon_inode->i_mapping);
+       __i915_gem_object_release_mmap(obj);
 
        /* Ensure that the CPU's PTE are revoked and there are not outstanding
         * memory transactions from userspace before we return. The TLB
@@ -2035,11 +2067,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
         */
 
        list_for_each_entry_safe(obj, on,
-                                &dev_priv->mm.userfault_list, userfault_link) {
-               list_del_init(&obj->userfault_link);
-               drm_vma_node_unmap(&obj->base.vma_node,
-                                  obj->base.dev->anon_inode->i_mapping);
-       }
+                                &dev_priv->mm.userfault_list, userfault_link)
+               __i915_gem_object_release_mmap(obj);
 
        /* The fence will be lost when the device powers down. If any were
         * in use by hardware (i.e. they are pinned), we should not be powering
@@ -2062,7 +2091,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
                if (!reg->vma)
                        continue;
 
-               GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+               GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
                reg->dirty = true;
        }
 }
@@ -2261,6 +2290,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
        if (!IS_ERR(pages))
                obj->ops->put_pages(obj, pages);
 
+       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -2291,8 +2322,7 @@ static bool i915_sg_trim(struct sg_table *orig_st)
        return true;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const unsigned long page_count = obj->base.size / PAGE_SIZE;
@@ -2304,6 +2334,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        unsigned int max_segment = i915_sg_segment_size();
+       unsigned int sg_page_sizes;
        gfp_t noreclaim;
        int ret;
 
@@ -2316,12 +2347,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
 rebuild_st:
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Get the list of pages out of our struct file.  They'll be pinned
@@ -2335,6 +2366,7 @@ rebuild_st:
 
        sg = st->sgl;
        st->nents = 0;
+       sg_page_sizes = 0;
        for (i = 0; i < page_count; i++) {
                const unsigned int shrink[] = {
                        I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
@@ -2387,8 +2419,10 @@ rebuild_st:
                if (!i ||
                    sg->length >= max_segment ||
                    page_to_pfn(page) != last_pfn + 1) {
-                       if (i)
+                       if (i) {
+                               sg_page_sizes |= sg->length;
                                sg = sg_next(sg);
+                       }
                        st->nents++;
                        sg_set_page(sg, page, PAGE_SIZE, 0);
                } else {
@@ -2399,8 +2433,10 @@ rebuild_st:
                /* Check that the i965g/gm workaround works. */
                WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
        }
-       if (sg) /* loop terminated early; short sg table */
+       if (sg) { /* loop terminated early; short sg table */
+               sg_page_sizes |= sg->length;
                sg_mark_end(sg);
+       }
 
        /* Trim unused sg entries to avoid wasting memory. */
        i915_sg_trim(st);
@@ -2429,7 +2465,9 @@ rebuild_st:
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);
 
-       return st;
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+       return 0;
 
 err_sg:
        sg_mark_end(sg);
@@ -2450,12 +2488,17 @@ err_pages:
        if (ret == -ENOSPC)
                ret = -ENOMEM;
 
-       return ERR_PTR(ret);
+       return ret;
 }
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-                                struct sg_table *pages)
+                                struct sg_table *pages,
+                                unsigned int sg_page_sizes)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned long supported = INTEL_INFO(i915)->page_sizes;
+       int i;
+
        lockdep_assert_held(&obj->mm.lock);
 
        obj->mm.get_page.sg_pos = pages->sgl;
@@ -2469,23 +2512,40 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                __i915_gem_object_pin_pages(obj);
                obj->mm.quirked = true;
        }
+
+       GEM_BUG_ON(!sg_page_sizes);
+       obj->mm.page_sizes.phys = sg_page_sizes;
+
+       /*
+        * Calculate the supported page-sizes which fit into the given
+        * sg_page_sizes. This will give us the page-sizes which we may be able
+        * to use opportunistically when later inserting into the GTT. For
+        * example if phys=2G, then in theory we should be able to use 1G, 2M,
+        * 64K or 4K pages, although in practice this will depend on a number of
+        * other factors.
+        */
+       obj->mm.page_sizes.sg = 0;
+       for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+               if (obj->mm.page_sizes.phys & ~0u << i)
+                       obj->mm.page_sizes.sg |= BIT(i);
+       }
+
+       GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
 }
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct sg_table *pages;
+       int err;
 
        if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
                DRM_DEBUG("Attempting to obtain a purgeable object\n");
                return -EFAULT;
        }
 
-       pages = obj->ops->get_pages(obj);
-       if (unlikely(IS_ERR(pages)))
-               return PTR_ERR(pages);
+       err = obj->ops->get_pages(obj);
+       GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
 
-       __i915_gem_object_set_pages(obj, pages);
-       return 0;
+       return err;
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -2796,7 +2856,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request = NULL;
 
-       /* Prevent the signaler thread from updating the request
+       /*
+        * During the reset sequence, we must prevent the engine from
+        * entering RC6. As the context state is undefined until we restart
+        * the engine, if it does enter RC6 during the reset, the state
+        * written to the powercontext is undefined and so we may lose
+        * GPU state upon resume, i.e. fail to restart after a reset.
+        */
+       intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+       /*
+        * Prevent the signaler thread from updating the request
         * state (by calling dma_fence_signal) as we are processing
         * the reset. The write from the GPU of the seqno is
         * asynchronous and the signaler thread may see a different
@@ -2807,7 +2877,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
         */
        kthread_park(engine->breadcrumbs.signaler);
 
-       /* Prevent request submission to the hardware until we have
+       /*
+        * Prevent request submission to the hardware until we have
         * completed the reset in i915_gem_reset_finish(). If a request
         * is completed by one engine, it may then queue a request
         * to a second via its engine->irq_tasklet *just* as we are
@@ -2997,6 +3068,8 @@ void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
        tasklet_enable(&engine->execlists.irq_tasklet);
        kthread_unpark(engine->breadcrumbs.signaler);
+
+       intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
 }
 
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
@@ -3016,49 +3089,76 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
 {
        GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
        dma_fence_set_error(&request->fence, -EIO);
+
        i915_gem_request_submit(request);
-       intel_engine_init_global_seqno(request->engine, request->global_seqno);
 }
 
-static void engine_set_wedged(struct intel_engine_cs *engine)
+static void nop_complete_submit_request(struct drm_i915_gem_request *request)
 {
-       /* We need to be sure that no thread is running the old callback as
-        * we install the nop handler (otherwise we would submit a request
-        * to hardware that will never complete). In order to prevent this
-        * race, we wait until the machine is idle before making the swap
-        * (using stop_machine()).
-        */
-       engine->submit_request = nop_submit_request;
+       unsigned long flags;
 
-       /* Mark all executing requests as skipped */
-       engine->cancel_requests(engine);
+       GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
+       dma_fence_set_error(&request->fence, -EIO);
 
-       /* Mark all pending requests as complete so that any concurrent
-        * (lockless) lookup doesn't try and wait upon the request as we
-        * reset it.
-        */
-       intel_engine_init_global_seqno(engine,
-                                      intel_engine_last_submit(engine));
+       spin_lock_irqsave(&request->engine->timeline->lock, flags);
+       __i915_gem_request_submit(request);
+       intel_engine_init_global_seqno(request->engine, request->global_seqno);
+       spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
 }
 
-static int __i915_gem_set_wedged_BKL(void *data)
+void i915_gem_set_wedged(struct drm_i915_private *i915)
 {
-       struct drm_i915_private *i915 = data;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
+       /*
+        * First, stop submission to hw, but do not yet complete requests by
+        * rolling the global seqno forward (since this would complete requests
+        * for which we haven't set the fence error to EIO yet).
+        */
        for_each_engine(engine, i915, id)
-               engine_set_wedged(engine);
+               engine->submit_request = nop_submit_request;
 
-       set_bit(I915_WEDGED, &i915->gpu_error.flags);
-       wake_up_all(&i915->gpu_error.reset_queue);
+       /*
+        * Make sure no one is running the old callback before we proceed with
+        * cancelling requests and resetting the completion tracking. Otherwise
+        * we might submit a request to the hardware which never completes.
+        */
+       synchronize_rcu();
 
-       return 0;
-}
+       for_each_engine(engine, i915, id) {
+               /* Mark all executing requests as skipped */
+               engine->cancel_requests(engine);
 
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
-{
-       stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
+               /*
+                * Only once we've force-cancelled all in-flight requests can we
+                * start to complete all requests.
+                */
+               engine->submit_request = nop_complete_submit_request;
+       }
+
+       /*
+        * Make sure no request can slip through without getting completed by
+        * either this call here to intel_engine_init_global_seqno, or the one
+        * in nop_complete_submit_request.
+        */
+       synchronize_rcu();
+
+       for_each_engine(engine, i915, id) {
+               unsigned long flags;
+
+               /* Mark all pending requests as complete so that any concurrent
+                * (lockless) lookup doesn't try and wait upon the request as we
+                * reset it.
+                */
+               spin_lock_irqsave(&engine->timeline->lock, flags);
+               intel_engine_init_global_seqno(engine,
+                                              intel_engine_last_submit(engine));
+               spin_unlock_irqrestore(&engine->timeline->lock, flags);
+       }
+
+       set_bit(I915_WEDGED, &i915->gpu_error.flags);
+       wake_up_all(&i915->gpu_error.reset_queue);
 }
 
 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@@ -3959,42 +4059,47 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
+       if (!view && flags & PIN_MAPPABLE) {
+               /* If the required space is larger than the available
+                * aperture, we will not able to find a slot for the
+                * object and unbinding the object now will be in
+                * vain. Worse, doing so may cause us to ping-pong
+                * the object in and out of the Global GTT and
+                * waste a lot of cycles under the mutex.
+                */
+               if (obj->base.size > dev_priv->ggtt.mappable_end)
+                       return ERR_PTR(-E2BIG);
+
+               /* If NONBLOCK is set the caller is optimistically
+                * trying to cache the full object within the mappable
+                * aperture, and *must* have a fallback in place for
+                * situations where we cannot bind the object. We
+                * can be a little more lax here and use the fallback
+                * more often to avoid costly migrations of ourselves
+                * and other objects within the aperture.
+                *
+                * Half-the-aperture is used as a simple heuristic.
+                * More interesting would to do search for a free
+                * block prior to making the commitment to unbind.
+                * That caters for the self-harm case, and with a
+                * little more heuristics (e.g. NOFAULT, NOEVICT)
+                * we could try to minimise harm to others.
+                */
+               if (flags & PIN_NONBLOCK &&
+                   obj->base.size > dev_priv->ggtt.mappable_end / 2)
+                       return ERR_PTR(-ENOSPC);
+       }
+
        vma = i915_vma_instance(obj, vm, view);
        if (unlikely(IS_ERR(vma)))
                return vma;
 
        if (i915_vma_misplaced(vma, size, alignment, flags)) {
-               if (flags & PIN_NONBLOCK &&
-                   (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
-                       return ERR_PTR(-ENOSPC);
+               if (flags & PIN_NONBLOCK) {
+                       if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
+                               return ERR_PTR(-ENOSPC);
 
-               if (flags & PIN_MAPPABLE) {
-                       /* If the required space is larger than the available
-                        * aperture, we will not able to find a slot for the
-                        * object and unbinding the object now will be in
-                        * vain. Worse, doing so may cause us to ping-pong
-                        * the object in and out of the Global GTT and
-                        * waste a lot of cycles under the mutex.
-                        */
-                       if (vma->fence_size > dev_priv->ggtt.mappable_end)
-                               return ERR_PTR(-E2BIG);
-
-                       /* If NONBLOCK is set the caller is optimistically
-                        * trying to cache the full object within the mappable
-                        * aperture, and *must* have a fallback in place for
-                        * situations where we cannot bind the object. We
-                        * can be a little more lax here and use the fallback
-                        * more often to avoid costly migrations of ourselves
-                        * and other objects within the aperture.
-                        *
-                        * Half-the-aperture is used as a simple heuristic.
-                        * More interesting would to do search for a free
-                        * block prior to making the commitment to unbind.
-                        * That caters for the self-harm case, and with a
-                        * little more heuristics (e.g. NOFAULT, NOEVICT)
-                        * we could try to minimise harm to others.
-                        */
-                       if (flags & PIN_NONBLOCK &&
+                       if (flags & PIN_MAPPABLE &&
                            vma->fence_size > dev_priv->ggtt.mappable_end / 2)
                                return ERR_PTR(-ENOSPC);
                }
@@ -4221,7 +4326,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        mutex_init(&obj->mm.lock);
 
        INIT_LIST_HEAD(&obj->global_link);
-       INIT_LIST_HEAD(&obj->userfault_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4251,6 +4355,30 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .pwrite = i915_gem_object_pwrite_gtt,
 };
 
+static int i915_gem_object_create_shmem(struct drm_device *dev,
+                                       struct drm_gem_object *obj,
+                                       size_t size)
+{
+       struct drm_i915_private *i915 = to_i915(dev);
+       unsigned long flags = VM_NORESERVE;
+       struct file *filp;
+
+       drm_gem_private_object_init(dev, obj, size);
+
+       if (i915->mm.gemfs)
+               filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
+                                                flags);
+       else
+               filp = shmem_file_setup("i915", size, flags);
+
+       if (IS_ERR(filp))
+               return PTR_ERR(filp);
+
+       obj->filp = filp;
+
+       return 0;
+}
+
 struct drm_i915_gem_object *
 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 {
@@ -4275,7 +4403,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
        if (obj == NULL)
                return ERR_PTR(-ENOMEM);
 
-       ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
+       ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
        if (ret)
                goto fail;
 
@@ -4378,6 +4506,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
        llist_for_each_entry_safe(obj, on, freed, freed) {
                GEM_BUG_ON(obj->bind_count);
+               GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
@@ -4547,8 +4676,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        /* As the idle_work is rearming if it detects a race, play safe and
         * repeat the flush until it is definitely idle.
         */
-       while (flush_delayed_work(&dev_priv->gt.idle_work))
-               ;
+       drain_delayed_work(&dev_priv->gt.idle_work);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
@@ -4595,6 +4723,7 @@ void i915_gem_resume(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev->struct_mutex);
        i915_gem_restore_gtt_mappings(dev_priv);
+       i915_gem_restore_fences(dev_priv);
 
        /* As we didn't flush the kernel context before suspend, we cannot
         * guarantee that the context image is complete. So let's just reset
@@ -4757,6 +4886,15 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       /*
+        * We need to fallback to 4K pages since gvt gtt handling doesn't
+        * support huge page entries - we will need to check either hypervisor
+        * mm can support huge guest page or just do emulation in gvt.
+        */
+       if (intel_vgpu_active(dev_priv))
+               mkwrite_device_info(dev_priv)->page_sizes =
+                       I915_GTT_PAGE_SIZE_4K;
+
        dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
 
        if (!i915_modparams.enable_execlists) {
@@ -4914,6 +5052,10 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 
        spin_lock_init(&dev_priv->fb_tracking.lock);
 
+       err = i915_gemfs_init(dev_priv);
+       if (err)
+               DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
+
        return 0;
 
 err_priorities:
@@ -4952,6 +5094,8 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
 
        /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
        rcu_barrier();
+
+       i915_gemfs_fini(dev_priv);
 }
 
 int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -5341,6 +5485,7 @@ err_unlock:
 #include "selftests/scatterlist.c"
 #include "selftests/mock_gem_device.c"
 #include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
 #include "selftests/i915_gem_object.c"
 #include "selftests/i915_gem_coherency.c"
 #endif
index 921ee369c74dc61d0e90785dd56a8b3a0f209590..5bf96a258509989e26b1ea4397ad8f8e7ffd9d81 100644 (file)
@@ -416,14 +416,43 @@ out:
        return ctx;
 }
 
+static struct i915_gem_context *
+create_kernel_context(struct drm_i915_private *i915, int prio)
+{
+       struct i915_gem_context *ctx;
+
+       ctx = i915_gem_create_context(i915, NULL);
+       if (IS_ERR(ctx))
+               return ctx;
+
+       i915_gem_context_clear_bannable(ctx);
+       ctx->priority = prio;
+       ctx->ring_size = PAGE_SIZE;
+
+       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+       return ctx;
+}
+
+static void
+destroy_kernel_context(struct i915_gem_context **ctxp)
+{
+       struct i915_gem_context *ctx;
+
+       /* Keep the context ref so that we can free it immediately ourselves */
+       ctx = i915_gem_context_get(fetch_and_zero(ctxp));
+       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+       context_close(ctx);
+       i915_gem_context_free(ctx);
+}
+
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
+       int err;
 
-       /* Init should only be called once per module load. Eventually the
-        * restriction on the context_disabled check can be loosened. */
-       if (WARN_ON(dev_priv->kernel_context))
-               return 0;
+       GEM_BUG_ON(dev_priv->kernel_context);
 
        INIT_LIST_HEAD(&dev_priv->contexts.list);
        INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -441,28 +470,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
        ida_init(&dev_priv->contexts.hw_ida);
 
-       ctx = i915_gem_create_context(dev_priv, NULL);
+       /* lowest priority; idle task */
+       ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
        if (IS_ERR(ctx)) {
-               DRM_ERROR("Failed to create default global context (error %ld)\n",
-                         PTR_ERR(ctx));
-               return PTR_ERR(ctx);
+               DRM_ERROR("Failed to create default global context\n");
+               err = PTR_ERR(ctx);
+               goto err;
        }
-
-       /* For easy recognisablity, we want the kernel context to be 0 and then
+       /*
+        * For easy recognisablity, we want the kernel context to be 0 and then
         * all user contexts will have non-zero hw_id.
         */
        GEM_BUG_ON(ctx->hw_id);
-
-       i915_gem_context_clear_bannable(ctx);
-       ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
        dev_priv->kernel_context = ctx;
 
-       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+       /* highest priority; preempting task */
+       ctx = create_kernel_context(dev_priv, INT_MAX);
+       if (IS_ERR(ctx)) {
+               DRM_ERROR("Failed to create default preempt context\n");
+               err = PTR_ERR(ctx);
+               goto err_kernel_context;
+       }
+       dev_priv->preempt_context = ctx;
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         dev_priv->engine[RCS]->context_size ? "logical" :
                         "fake");
        return 0;
+
+err_kernel_context:
+       destroy_kernel_context(&dev_priv->kernel_context);
+err:
+       return err;
 }
 
 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -507,15 +546,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *ctx;
-
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       /* Keep the context so that we can free it immediately ourselves */
-       ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
-       GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-       context_close(ctx);
-       i915_gem_context_free(ctx);
+       destroy_kernel_context(&i915->preempt_context);
+       destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
        ida_destroy(&i915->contexts.hw_ida);
@@ -1036,6 +1070,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
        case I915_CONTEXT_PARAM_BANNABLE:
                args->value = i915_gem_context_is_bannable(ctx);
                break;
+       case I915_CONTEXT_PARAM_PRIORITY:
+               args->value = ctx->priority;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -1091,6 +1128,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                else
                        i915_gem_context_clear_bannable(ctx);
                break;
+
+       case I915_CONTEXT_PARAM_PRIORITY:
+               {
+                       int priority = args->value;
+
+                       if (args->size)
+                               ret = -EINVAL;
+                       else if (!to_i915(dev)->engine[RCS]->schedule)
+                               ret = -ENODEV;
+                       else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+                                priority < I915_CONTEXT_MIN_USER_PRIORITY)
+                               ret = -EINVAL;
+                       else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+                                !capable(CAP_SYS_NICE))
+                               ret = -EPERM;
+                       else
+                               ctx->priority = priority;
+               }
+               break;
+
        default:
                ret = -EINVAL;
                break;
index 6176e589cf09f9b287cf25700a27a170255d6fe8..864439a214c83c32d663e49bf05c50cd96f3ffb3 100644 (file)
@@ -256,11 +256,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
        return drm_gem_dmabuf_export(dev, &exp_info);
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-       return dma_buf_map_attachment(obj->base.import_attach,
-                                     DMA_BIDIRECTIONAL);
+       struct sg_table *pages;
+       unsigned int sg_page_sizes;
+
+       pages = dma_buf_map_attachment(obj->base.import_attach,
+                                      DMA_BIDIRECTIONAL);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+       __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+       return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
index 4df039ef2ce316509ecc6faa04e707d135acf507..a5a5b7e6daae355b9f5a80e39ea2cbaac9173e90 100644 (file)
@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
        if (i915_vma_is_pinned(vma))
                return false;
 
-       if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
+       if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
                return false;
 
        list_add(&vma->evict_link, unwind);
@@ -315,6 +315,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                        break;
                }
 
+               if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
+                       ret = -ENOSPC;
+                       break;
+               }
+
                /* Overlap of objects in the same batch? */
                if (i915_vma_is_pinned(vma)) {
                        ret = -ENOSPC;
index d733c4d5a5002c047fed386a9531081a80373197..3d7190764f1012ed97265c34d79c14554d8c4252 100644 (file)
@@ -367,12 +367,12 @@ eb_pin_vma(struct i915_execbuffer *eb,
                return false;
 
        if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
-               if (unlikely(i915_vma_get_fence(vma))) {
+               if (unlikely(i915_vma_pin_fence(vma))) {
                        i915_vma_unpin(vma);
                        return false;
                }
 
-               if (i915_vma_pin_fence(vma))
+               if (vma->fence)
                        exec_flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
@@ -385,7 +385,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
        GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
 
        if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
-               i915_vma_unpin_fence(vma);
+               __i915_vma_unpin_fence(vma);
 
        __i915_vma_unpin(vma);
 }
@@ -563,13 +563,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
        }
 
        if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
-               err = i915_vma_get_fence(vma);
+               err = i915_vma_pin_fence(vma);
                if (unlikely(err)) {
                        i915_vma_unpin(vma);
                        return err;
                }
 
-               if (i915_vma_pin_fence(vma))
+               if (vma->fence)
                        exec_flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
@@ -974,7 +974,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                        return ERR_PTR(err);
 
                vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                              PIN_MAPPABLE | PIN_NONBLOCK);
+                                              PIN_MAPPABLE |
+                                              PIN_NONBLOCK |
+                                              PIN_NONFAULT);
                if (IS_ERR(vma)) {
                        memset(&cache->node, 0, sizeof(cache->node));
                        err = drm_mm_insert_node_in_range
index 2783d63bd1adac44fc06da4922893051c6966bbb..012250f25255fa86c47fdd3aa4480cca09b7b281 100644 (file)
@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
                /* Ensure that all userspace CPU access is completed before
                 * stealing the fence.
                 */
-               i915_gem_release_mmap(fence->vma->obj);
+               GEM_BUG_ON(fence->vma->fence != fence);
+               i915_vma_revoke_mmap(fence->vma);
 
                fence->vma->fence = NULL;
                fence->vma = NULL;
@@ -280,8 +281,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
  *
  * 0 on success, negative error code on failure.
  */
-int
-i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_put_fence(struct i915_vma *vma)
 {
        struct drm_i915_fence_reg *fence = vma->fence;
 
@@ -299,6 +299,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
        struct drm_i915_fence_reg *fence;
 
        list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+               GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
                if (fence->pin_count)
                        continue;
 
@@ -313,7 +315,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
 }
 
 /**
- * i915_vma_get_fence - set up fencing for a vma
+ * i915_vma_pin_fence - set up fencing for a vma
  * @vma: vma to map through a fence reg
  *
  * When mapping objects through the GTT, userspace wants to be able to write
@@ -331,10 +333,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
  * 0 on success, negative error code on failure.
  */
 int
-i915_vma_get_fence(struct i915_vma *vma)
+i915_vma_pin_fence(struct i915_vma *vma)
 {
        struct drm_i915_fence_reg *fence;
        struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+       int err;
 
        /* Note that we revoke fences on runtime suspend. Therefore the user
         * must keep the device awake whilst using the fence.
@@ -344,6 +347,8 @@ i915_vma_get_fence(struct i915_vma *vma)
        /* Just update our place in the LRU if our fence is getting reused. */
        if (vma->fence) {
                fence = vma->fence;
+               GEM_BUG_ON(fence->vma != vma);
+               fence->pin_count++;
                if (!fence->dirty) {
                        list_move_tail(&fence->link,
                                       &fence->i915->mm.fence_list);
@@ -353,10 +358,25 @@ i915_vma_get_fence(struct i915_vma *vma)
                fence = fence_find(vma->vm->i915);
                if (IS_ERR(fence))
                        return PTR_ERR(fence);
+
+               GEM_BUG_ON(fence->pin_count);
+               fence->pin_count++;
        } else
                return 0;
 
-       return fence_update(fence, set);
+       err = fence_update(fence, set);
+       if (err)
+               goto out_unpin;
+
+       GEM_BUG_ON(fence->vma != set);
+       GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+       if (set)
+               return 0;
+
+out_unpin:
+       fence->pin_count--;
+       return err;
 }
 
 /**
@@ -429,8 +449,10 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
 
+               GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
                if (fence->vma)
-                       i915_gem_release_mmap(fence->vma->obj);
+                       i915_vma_revoke_mmap(fence->vma);
        }
 }
 
@@ -450,13 +472,15 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
                struct i915_vma *vma = reg->vma;
 
+               GEM_BUG_ON(vma && vma->fence != reg);
+
                /*
                 * Commit delayed tiling changes if we have an object still
                 * attached to the fence, otherwise just clear the fence.
                 */
                if (vma && !i915_gem_object_is_tiled(vma->obj)) {
                        GEM_BUG_ON(!reg->dirty);
-                       GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+                       GEM_BUG_ON(i915_vma_has_userfault(vma));
 
                        list_move(&reg->link, &dev_priv->mm.fence_list);
                        vma->fence = NULL;
index 4c82ceb8d3186845c2383c624ddaea8edaeba2d0..daba55a4fce232f61b8e19edea96eadcd53c235d 100644 (file)
@@ -135,11 +135,12 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt)
 {
-       bool has_aliasing_ppgtt;
        bool has_full_ppgtt;
        bool has_full_48bit_ppgtt;
 
-       has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
+       if (!dev_priv->info.has_aliasing_ppgtt)
+               return 0;
+
        has_full_ppgtt = dev_priv->info.has_full_ppgtt;
        has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
 
@@ -149,9 +150,6 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
        }
 
-       if (!has_aliasing_ppgtt)
-               return 0;
-
        /*
         * We don't allow disabling PPGTT for gen9+ as it's a requirement for
         * execlists, the sole mechanism available to submit work.
@@ -188,7 +186,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                        return 2;
        }
 
-       return has_aliasing_ppgtt ? 1 : 0;
+       return 1;
 }
 
 static int ppgtt_bind_vma(struct i915_vma *vma,
@@ -205,8 +203,6 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
                        return ret;
        }
 
-       vma->pages = vma->obj->mm.pages;
-
        /* Currently applicable only to VLV */
        pte_flags = 0;
        if (vma->obj->gt_ro)
@@ -222,6 +218,30 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
        vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 }
 
+static int ppgtt_set_pages(struct i915_vma *vma)
+{
+       GEM_BUG_ON(vma->pages);
+
+       vma->pages = vma->obj->mm.pages;
+
+       vma->page_sizes = vma->obj->mm.page_sizes;
+
+       return 0;
+}
+
+static void clear_pages(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!vma->pages);
+
+       if (vma->pages != vma->obj->mm.pages) {
+               sg_free_table(vma->pages);
+               kfree(vma->pages);
+       }
+       vma->pages = NULL;
+
+       memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
                                  enum i915_cache_level level)
 {
@@ -497,22 +517,63 @@ static void fill_page_dma_32(struct i915_address_space *vm,
 static int
 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 {
-       struct page *page;
+       struct page *page = NULL;
        dma_addr_t addr;
+       int order;
 
-       page = alloc_page(gfp | __GFP_ZERO);
-       if (unlikely(!page))
-               return -ENOMEM;
+       /*
+        * In order to utilize 64K pages for an object with a size < 2M, we will
+        * need to support a 64K scratch page, given that every 16th entry for a
+        * page-table operating in 64K mode must point to a properly aligned 64K
+        * region, including any PTEs which happen to point to scratch.
+        *
+        * This is only relevant for the 48b PPGTT where we support
+        * huge-gtt-pages, see also i915_vma_insert().
+        *
+        * TODO: we should really consider write-protecting the scratch-page and
+        * sharing between ppgtt
+        */
+       if (i915_vm_is_48bit(vm) &&
+           HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+               order = get_order(I915_GTT_PAGE_SIZE_64K);
+               page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
+               if (page) {
+                       addr = dma_map_page(vm->dma, page, 0,
+                                           I915_GTT_PAGE_SIZE_64K,
+                                           PCI_DMA_BIDIRECTIONAL);
+                       if (unlikely(dma_mapping_error(vm->dma, addr))) {
+                               __free_pages(page, order);
+                               page = NULL;
+                       }
 
-       addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
-                           PCI_DMA_BIDIRECTIONAL);
-       if (unlikely(dma_mapping_error(vm->dma, addr))) {
-               __free_page(page);
-               return -ENOMEM;
+                       if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
+                               dma_unmap_page(vm->dma, addr,
+                                              I915_GTT_PAGE_SIZE_64K,
+                                              PCI_DMA_BIDIRECTIONAL);
+                               __free_pages(page, order);
+                               page = NULL;
+                       }
+               }
+       }
+
+       if (!page) {
+               order = 0;
+               page = alloc_page(gfp | __GFP_ZERO);
+               if (unlikely(!page))
+                       return -ENOMEM;
+
+               addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+                                   PCI_DMA_BIDIRECTIONAL);
+               if (unlikely(dma_mapping_error(vm->dma, addr))) {
+                       __free_page(page);
+                       return -ENOMEM;
+               }
        }
 
        vm->scratch_page.page = page;
        vm->scratch_page.daddr = addr;
+       vm->scratch_page.order = order;
+
        return 0;
 }
 
@@ -520,8 +581,9 @@ static void cleanup_scratch_page(struct i915_address_space *vm)
 {
        struct i915_page_dma *p = &vm->scratch_page;
 
-       dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       __free_page(p->page);
+       dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+                      PCI_DMA_BIDIRECTIONAL);
+       __free_pages(p->page, p->order);
 }
 
 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -989,6 +1051,105 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 
        gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
                                      cache_level);
+
+       vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
+                                          struct i915_page_directory_pointer **pdps,
+                                          struct sgt_dma *iter,
+                                          enum i915_cache_level cache_level)
+{
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       u64 start = vma->node.start;
+       dma_addr_t rem = iter->sg->length;
+
+       do {
+               struct gen8_insert_pte idx = gen8_insert_pte(start);
+               struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
+               struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+               unsigned int page_size;
+               bool maybe_64K = false;
+               gen8_pte_t encode = pte_encode;
+               gen8_pte_t *vaddr;
+               u16 index, max;
+
+               if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+                   IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+                   rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
+                       index = idx.pde;
+                       max = I915_PDES;
+                       page_size = I915_GTT_PAGE_SIZE_2M;
+
+                       encode |= GEN8_PDE_PS_2M;
+
+                       vaddr = kmap_atomic_px(pd);
+               } else {
+                       struct i915_page_table *pt = pd->page_table[idx.pde];
+
+                       index = idx.pte;
+                       max = GEN8_PTES;
+                       page_size = I915_GTT_PAGE_SIZE;
+
+                       if (!index &&
+                           vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+                           IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+                           (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+                            rem >= (max - index) << PAGE_SHIFT))
+                               maybe_64K = true;
+
+                       vaddr = kmap_atomic_px(pt);
+               }
+
+               do {
+                       GEM_BUG_ON(iter->sg->length < page_size);
+                       vaddr[index++] = encode | iter->dma;
+
+                       start += page_size;
+                       iter->dma += page_size;
+                       rem -= page_size;
+                       if (iter->dma >= iter->max) {
+                               iter->sg = __sg_next(iter->sg);
+                               if (!iter->sg)
+                                       break;
+
+                               rem = iter->sg->length;
+                               iter->dma = sg_dma_address(iter->sg);
+                               iter->max = iter->dma + rem;
+
+                               if (maybe_64K && index < max &&
+                                   !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+                                     (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+                                      rem >= (max - index) << PAGE_SHIFT)))
+                                       maybe_64K = false;
+
+                               if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+                                       break;
+                       }
+               } while (rem >= page_size && index < max);
+
+               kunmap_atomic(vaddr);
+
+               /*
+                * Is it safe to mark the 2M block as 64K? -- Either we have
+                * filled whole page-table with 64K entries, or filled part of
+                * it and have reached the end of the sg table and we have
+                * enough padding.
+                */
+               if (maybe_64K &&
+                   (index == max ||
+                    (i915_vm_has_scratch_64K(vma->vm) &&
+                     !iter->sg && IS_ALIGNED(vma->node.start +
+                                             vma->node.size,
+                                             I915_GTT_PAGE_SIZE_2M)))) {
+                       vaddr = kmap_atomic_px(pd);
+                       vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+                       kunmap_atomic(vaddr);
+                       page_size = I915_GTT_PAGE_SIZE_64K;
+               }
+
+               vma->page_sizes.gtt |= page_size;
+       } while (iter->sg);
 }
 
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
@@ -1003,11 +1164,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
                .max = iter.dma + iter.sg->length,
        };
        struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
-       struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
-       while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
-                                            &idx, cache_level))
-               GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+       if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+       } else {
+               struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+
+               while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+                                                    &iter, &idx, cache_level))
+                       GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+
+               vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+       }
 }
 
 static void gen8_free_page_tables(struct i915_address_space *vm,
@@ -1452,6 +1620,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        ppgtt->base.cleanup = gen8_ppgtt_cleanup;
        ppgtt->base.unbind_vma = ppgtt_unbind_vma;
        ppgtt->base.bind_vma = ppgtt_bind_vma;
+       ppgtt->base.set_pages = ppgtt_set_pages;
+       ppgtt->base.clear_pages = clear_pages;
        ppgtt->debug_dump = gen8_dump_ppgtt;
 
        return 0;
@@ -1726,6 +1896,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                }
        } while (1);
        kunmap_atomic(vaddr);
+
+       vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
@@ -1894,6 +2066,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.unbind_vma = ppgtt_unbind_vma;
        ppgtt->base.bind_vma = ppgtt_bind_vma;
+       ppgtt->base.set_pages = ppgtt_set_pages;
+       ppgtt->base.clear_pages = clear_pages;
        ppgtt->base.cleanup = gen6_ppgtt_cleanup;
        ppgtt->debug_dump = gen6_dump_ppgtt;
 
@@ -1961,6 +2135,23 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
        else if (IS_GEN9_LP(dev_priv))
                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+
+       /*
+        * To support 64K PTEs we need to first enable the use of the
+        * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+        * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+        * shouldn't be needed after GEN10.
+        *
+        * 64K pages were first introduced from BDW+, although technically they
+        * only *work* from gen9+. For pre-BDW we instead have the option for
+        * 32K pages, but we don't currently have any support for it in our
+        * driver.
+        */
+       if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
+           INTEL_GEN(dev_priv) <= 10)
+               I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
+                          I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
+                          GAMW_ECO_ENABLE_64K_IPS_FIELD);
 }
 
 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
@@ -2405,12 +2596,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        struct drm_i915_gem_object *obj = vma->obj;
        u32 pte_flags;
 
-       if (unlikely(!vma->pages)) {
-               int ret = i915_get_ggtt_vma_pages(vma);
-               if (ret)
-                       return ret;
-       }
-
        /* Currently applicable only to VLV */
        pte_flags = 0;
        if (obj->gt_ro)
@@ -2420,6 +2605,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
        intel_runtime_pm_put(i915);
 
+       vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
        /*
         * Without aliasing PPGTT there's no difference between
         * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
@@ -2447,12 +2634,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
        u32 pte_flags;
        int ret;
 
-       if (unlikely(!vma->pages)) {
-               ret = i915_get_ggtt_vma_pages(vma);
-               if (ret)
-                       return ret;
-       }
-
        /* Currently applicable only to VLV */
        pte_flags = 0;
        if (vma->obj->gt_ro)
@@ -2467,7 +2648,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                                                             vma->node.start,
                                                             vma->size);
                        if (ret)
-                               goto err_pages;
+                               return ret;
                }
 
                appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
@@ -2481,17 +2662,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
        }
 
        return 0;
-
-err_pages:
-       if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
-               if (vma->pages != vma->obj->mm.pages) {
-                       GEM_BUG_ON(!vma->pages);
-                       sg_free_table(vma->pages);
-                       kfree(vma->pages);
-               }
-               vma->pages = NULL;
-       }
-       return ret;
 }
 
 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
@@ -2529,6 +2699,21 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
        dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
 }
 
+static int ggtt_set_pages(struct i915_vma *vma)
+{
+       int ret;
+
+       GEM_BUG_ON(vma->pages);
+
+       ret = i915_get_ggtt_vma_pages(vma);
+       if (ret)
+               return ret;
+
+       vma->page_sizes = vma->obj->mm.page_sizes;
+
+       return 0;
+}
+
 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
                                  unsigned long color,
                                  u64 *start,
@@ -3151,6 +3336,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.cleanup = gen6_gmch_remove;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->base.set_pages = ggtt_set_pages;
+       ggtt->base.clear_pages = clear_pages;
        ggtt->base.insert_page = gen8_ggtt_insert_page;
        ggtt->base.clear_range = nop_clear_range;
        if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
@@ -3209,6 +3396,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.insert_entries = gen6_ggtt_insert_entries;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->base.set_pages = ggtt_set_pages;
+       ggtt->base.clear_pages = clear_pages;
        ggtt->base.cleanup = gen6_gmch_remove;
 
        ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3254,6 +3443,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.clear_range = i915_ggtt_clear_range;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->base.set_pages = ggtt_set_pages;
+       ggtt->base.clear_pages = clear_pages;
        ggtt->base.cleanup = i915_gmch_remove;
 
        ggtt->invalidate = gmch_ggtt_invalidate;
index f62fb903dc24a6abbd7d6abec55900a81e243c89..93211a96fdadd2ec10962f6fac34a160ad311433 100644 (file)
 #include "i915_gem_request.h"
 #include "i915_selftest.h"
 
-#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_PAGE_SIZE_4K BIT(12)
+#define I915_GTT_PAGE_SIZE_64K BIT(16)
+#define I915_GTT_PAGE_SIZE_2M BIT(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
 
 #define I915_FENCE_REG_NONE -1
@@ -148,6 +154,9 @@ typedef u64 gen8_ppgtt_pml4e_t;
 #define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
 #define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
 
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M   BIT(7)
+
 struct sg_table;
 
 struct intel_rotation_info {
@@ -207,6 +216,7 @@ struct i915_vma;
 
 struct i915_page_dma {
        struct page *page;
+       int order;
        union {
                dma_addr_t daddr;
 
@@ -329,6 +339,8 @@ struct i915_address_space {
        int (*bind_vma)(struct i915_vma *vma,
                        enum i915_cache_level cache_level,
                        u32 flags);
+       int (*set_pages)(struct i915_vma *vma);
+       void (*clear_pages)(struct i915_vma *vma);
 
        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
 };
@@ -341,6 +353,12 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
        return (vm->total - 1) >> 32;
 }
 
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+       return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
index c1f64ddaf8aa627010a73d4a08bc55d46e32d745..ee83ec838ee75ecf79f3a0514c0cc6678546df98 100644 (file)
@@ -44,12 +44,12 @@ static void internal_free_pages(struct sg_table *st)
        kfree(st);
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *st;
        struct scatterlist *sg;
+       unsigned int sg_page_sizes;
        unsigned int npages;
        int max_order;
        gfp_t gfp;
@@ -78,16 +78,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 create_st:
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        npages = obj->base.size / PAGE_SIZE;
        if (sg_alloc_table(st, npages, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        sg = st->sgl;
        st->nents = 0;
+       sg_page_sizes = 0;
 
        do {
                int order = min(fls(npages) - 1, max_order);
@@ -105,6 +106,7 @@ create_st:
                } while (1);
 
                sg_set_page(sg, page, PAGE_SIZE << order, 0);
+               sg_page_sizes |= PAGE_SIZE << order;
                st->nents++;
 
                npages -= 1 << order;
@@ -132,13 +134,17 @@ create_st:
         * object are only valid whilst active and pinned.
         */
        obj->mm.madv = I915_MADV_DONTNEED;
-       return st;
+
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+       return 0;
 
 err:
        sg_set_page(sg, NULL, 0, 0);
        sg_mark_end(sg);
        internal_free_pages(st);
-       return ERR_PTR(-ENOMEM);
+
+       return -ENOMEM;
 }
 
 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
index c30d8f80818587bfe4500a08b377b2ac45836f68..d67f1cbe842d23aa3c6d318aef60b5a0467d6151 100644 (file)
@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops {
         * being released or under memory pressure (where we attempt to
         * reap pages for the shrinker).
         */
-       struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+       int (*get_pages)(struct drm_i915_gem_object *);
        void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
        int (*pwrite)(struct drm_i915_gem_object *,
@@ -123,6 +123,7 @@ struct drm_i915_gem_object {
        /**
         * Whether the object is currently in the GGTT mmap.
         */
+       unsigned int userfault_count;
        struct list_head userfault_link;
 
        struct list_head batch_pool_link;
@@ -169,6 +170,35 @@ struct drm_i915_gem_object {
                struct sg_table *pages;
                void *mapping;
 
+               /* TODO: whack some of this into the error state */
+               struct i915_page_sizes {
+                       /**
+                        * The sg mask of the pages sg_table. i.e the mask of
+                        * of the lengths for each sg entry.
+                        */
+                       unsigned int phys;
+
+                       /**
+                        * The gtt page sizes we are allowed to use given the
+                        * sg mask and the supported page sizes. This will
+                        * express the smallest unit we can use for the whole
+                        * object, as well as the larger sizes we may be able
+                        * to use opportunistically.
+                        */
+                       unsigned int sg;
+
+                       /**
+                        * The actual gtt page size usage. Since we can have
+                        * multiple vma associated with this object we need to
+                        * prevent any trampling of state, hence a copy of this
+                        * struct also lives in each vma, therefore the gtt
+                        * value here should only be read/write through the vma.
+                        */
+                       unsigned int gtt;
+               } page_sizes;
+
+               I915_SELFTEST_DECLARE(unsigned int page_mask);
+
                struct i915_gem_object_page_iter {
                        struct scatterlist *sg_pos;
                        unsigned int sg_idx; /* in pages, but 32bit eek! */
index 4eb1a76731b2251c9bb8a67b0cbbf8098c4cece1..d140fcf5c6a396d7a288d6b6e8d26b0da82ae284 100644 (file)
@@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt)
        INIT_LIST_HEAD(&pt->signalers_list);
        INIT_LIST_HEAD(&pt->waiters_list);
        INIT_LIST_HEAD(&pt->link);
-       pt->priority = INT_MIN;
+       pt->priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 
        spin_lock_irq(&request->lock);
        if (request->waitboost)
-               atomic_dec(&request->i915->rps.num_waiters);
+               atomic_dec(&request->i915->gt_pm.rps.num_waiters);
        dma_fence_signal_locked(&request->fence);
        spin_unlock_irq(&request->lock);
 
@@ -556,7 +556,16 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
        switch (state) {
        case FENCE_COMPLETE:
                trace_i915_gem_request_submit(request);
+               /*
+                * We need to serialize use of the submit_request() callback with its
+                * hotplugging performed during an emergency i915_gem_set_wedged().
+                * We use the RCU mechanism to mark the critical section in order to
+                * force i915_gem_set_wedged() to wait until the submit_request() is
+                * completed before proceeding.
+                */
+               rcu_read_lock();
                request->engine->submit_request(request);
+               rcu_read_unlock();
                break;
 
        case FENCE_FREE:
@@ -587,6 +596,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
+       /*
+        * Preempt contexts are reserved for exclusive use to inject a
+        * preemption context switch. They are never to be used for any trivial
+        * request!
+        */
+       GEM_BUG_ON(ctx == dev_priv->preempt_context);
+
        /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
         * EIO if the GPU is already wedged.
         */
index 96eb52471dad4f2af0ba2dfa15863d8861f66896..26249f39de67eb84c144703c83540ec853ac9969 100644 (file)
@@ -30,6 +30,8 @@
 #include "i915_gem.h"
 #include "i915_sw_fence.h"
 
+#include <uapi/drm/i915_drm.h>
+
 struct drm_file;
 struct drm_i915_gem_object;
 struct drm_i915_gem_request;
@@ -69,9 +71,14 @@ struct i915_priotree {
        struct list_head waiters_list; /* those after us, they depend upon us */
        struct list_head link;
        int priority;
-#define I915_PRIORITY_MAX 1024
-#define I915_PRIORITY_NORMAL 0
-#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
+enum {
+       I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+       I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+       I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+       I915_PRIORITY_INVALID = INT_MIN
 };
 
 struct i915_gem_capture_list {
index 507c9f0d8df1267a5a05c285800a8ac3baef77b7..54fd4cfa9d071036224186c45ae02558742ee013 100644 (file)
@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev,
        return st;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 {
-       return i915_pages_create_for_stolen(obj->base.dev,
-                                           obj->stolen->start,
-                                           obj->stolen->size);
+       struct sg_table *pages =
+               i915_pages_create_for_stolen(obj->base.dev,
+                                            obj->stolen->start,
+                                            obj->stolen->size);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
+
+       return 0;
 }
 
 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
index 2d4996de733120b155a613f5f60137b3afcecbcb..4d712a4db63b4f50f87f6d8fc304e43450e64a0d 100644 (file)
@@ -164,7 +164,6 @@ static struct i915_mmu_notifier *
 i915_mmu_notifier_create(struct mm_struct *mm)
 {
        struct i915_mmu_notifier *mn;
-       int ret;
 
        mn = kmalloc(sizeof(*mn), GFP_KERNEL);
        if (mn == NULL)
@@ -179,14 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
                return ERR_PTR(-ENOMEM);
        }
 
-        /* Protected by mmap_sem (write-lock) */
-       ret = __mmu_notifier_register(&mn->mn, mm);
-       if (ret) {
-               destroy_workqueue(mn->wq);
-               kfree(mn);
-               return ERR_PTR(ret);
-       }
-
        return mn;
 }
 
@@ -210,23 +201,40 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 static struct i915_mmu_notifier *
 i915_mmu_notifier_find(struct i915_mm_struct *mm)
 {
-       struct i915_mmu_notifier *mn = mm->mn;
+       struct i915_mmu_notifier *mn;
+       int err = 0;
 
        mn = mm->mn;
        if (mn)
                return mn;
 
+       mn = i915_mmu_notifier_create(mm->mm);
+       if (IS_ERR(mn))
+               err = PTR_ERR(mn);
+
        down_write(&mm->mm->mmap_sem);
        mutex_lock(&mm->i915->mm_lock);
-       if ((mn = mm->mn) == NULL) {
-               mn = i915_mmu_notifier_create(mm->mm);
-               if (!IS_ERR(mn))
-                       mm->mn = mn;
+       if (mm->mn == NULL && !err) {
+               /* Protected by mmap_sem (write-lock) */
+               err = __mmu_notifier_register(&mn->mn, mm->mm);
+               if (!err) {
+                       /* Protected by mm_lock */
+                       mm->mn = fetch_and_zero(&mn);
+               }
+       } else {
+               /* someone else raced and successfully installed the mmu
+                * notifier, we can cancel our own errors */
+               err = 0;
        }
        mutex_unlock(&mm->i915->mm_lock);
        up_write(&mm->mm->mmap_sem);
 
-       return mn;
+       if (mn) {
+               destroy_workqueue(mn->wq);
+               kfree(mn);
+       }
+
+       return err ? ERR_PTR(err) : mm->mn;
 }
 
 static int
@@ -405,6 +413,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
 {
        unsigned int max_segment = i915_sg_segment_size();
        struct sg_table *st;
+       unsigned int sg_page_sizes;
        int ret;
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -434,6 +443,10 @@ alloc_table:
                return ERR_PTR(ret);
        }
 
+       sg_page_sizes = i915_sg_page_sizes(st->sgl);
+
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
        return st;
 }
 
@@ -521,7 +534,6 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                        pages = __i915_gem_userptr_alloc_pages(obj, pvec,
                                                               npages);
                        if (!IS_ERR(pages)) {
-                               __i915_gem_object_set_pages(obj, pages);
                                pinned = 0;
                                pages = NULL;
                        }
@@ -582,8 +594,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
        return ERR_PTR(-EAGAIN);
 }
 
-static struct sg_table *
-i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
        const int num_pages = obj->base.size >> PAGE_SHIFT;
        struct mm_struct *mm = obj->userptr.mm->mm;
@@ -612,9 +623,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
        if (obj->userptr.work) {
                /* active flag should still be held for the pending work */
                if (IS_ERR(obj->userptr.work))
-                       return ERR_CAST(obj->userptr.work);
+                       return PTR_ERR(obj->userptr.work);
                else
-                       return ERR_PTR(-EAGAIN);
+                       return -EAGAIN;
        }
 
        pvec = NULL;
@@ -650,7 +661,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
                release_pages(pvec, pinned, 0);
        kvfree(pvec);
 
-       return pages;
+       return PTR_ERR_OR_ZERO(pages);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
new file mode 100644 (file)
index 0000000..e299385
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright Â© 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+
+#include "i915_drv.h"
+#include "i915_gemfs.h"
+
+int i915_gemfs_init(struct drm_i915_private *i915)
+{
+       struct file_system_type *type;
+       struct vfsmount *gemfs;
+
+       type = get_fs_type("tmpfs");
+       if (!type)
+               return -ENODEV;
+
+       gemfs = kern_mount(type);
+       if (IS_ERR(gemfs))
+               return PTR_ERR(gemfs);
+
+       /*
+        * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
+        * likely 2M. Note that within_size may overallocate huge-pages, if say
+        * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
+        * memory pressure shmem should split any huge-pages which can be
+        * shrunk.
+        */
+
+       if (has_transparent_hugepage()) {
+               struct super_block *sb = gemfs->mnt_sb;
+               char options[] = "huge=within_size";
+               int flags = 0;
+               int err;
+
+               err = sb->s_op->remount_fs(sb, &flags, options);
+               if (err) {
+                       kern_unmount(gemfs);
+                       return err;
+               }
+       }
+
+       i915->mm.gemfs = gemfs;
+
+       return 0;
+}
+
+void i915_gemfs_fini(struct drm_i915_private *i915)
+{
+       kern_unmount(i915->mm.gemfs);
+}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.h b/drivers/gpu/drm/i915/i915_gemfs.h
new file mode 100644 (file)
index 0000000..cca8bdc
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright Â© 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEMFS_H__
+#define __I915_GEMFS_H__
+
+struct drm_i915_private;
+
+int i915_gemfs_init(struct drm_i915_private *i915);
+
+void i915_gemfs_fini(struct drm_i915_private *i915);
+
+#endif
index c14552ab270bc34a8d67936e74b7a0b72e05b2e9..653fb69e7ecb0ab038069681f72d5f5c32b1e220 100644 (file)
@@ -377,9 +377,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
        if (!erq->seqno)
                return;
 
-       err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+       err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
                   prefix, erq->pid, erq->ban_score,
-                  erq->context, erq->seqno,
+                  erq->context, erq->seqno, erq->priority,
                   jiffies_to_msecs(jiffies - erq->jiffies),
                   erq->head, erq->tail);
 }
@@ -388,9 +388,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
                                const char *header,
                                const struct drm_i915_error_context *ctx)
 {
-       err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
+       err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
                   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-                  ctx->ban_score, ctx->guilty, ctx->active);
+                  ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
 }
 
 static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -1271,6 +1271,7 @@ static void record_request(struct drm_i915_gem_request *request,
                           struct drm_i915_error_request *erq)
 {
        erq->context = request->ctx->hw_id;
+       erq->priority = request->priotree.priority;
        erq->ban_score = atomic_read(&request->ctx->ban_score);
        erq->seqno = request->global_seqno;
        erq->jiffies = request->emitted_jiffies;
@@ -1364,6 +1365,7 @@ static void record_context(struct drm_i915_error_context *e,
 
        e->handle = ctx->user_handle;
        e->hw_id = ctx->hw_id;
+       e->priority = ctx->priority;
        e->ban_score = atomic_read(&ctx->ban_score);
        e->guilty = atomic_read(&ctx->guilty_count);
        e->active = atomic_read(&ctx->active_count);
@@ -1672,8 +1674,8 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
                                   struct i915_gpu_state *error)
 {
        error->awake = dev_priv->gt.awake;
-       error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
-       error->suspended = dev_priv->pm.suspended;
+       error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
+       error->suspended = dev_priv->runtime_pm.suspended;
 
        error->iommu = -1;
 #ifdef CONFIG_INTEL_IOMMU
index 04f1281d81a5c7b04e21a6d88ac7f541246a613a..a2e8114b739d9182bf005a35514b83e397c29790 100644 (file)
  * IN THE SOFTWARE.
  *
  */
-#include <linux/circ_buf.h>
-#include "i915_drv.h"
-#include "intel_uc.h"
 
+#include <linux/circ_buf.h>
 #include <trace/events/dma_fence.h>
 
+#include "i915_guc_submission.h"
+#include "i915_drv.h"
+
 /**
  * DOC: GuC-based command submission
  *
@@ -337,7 +338,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 
        for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
                struct intel_context *ce = &ctx->engine[engine->id];
-               uint32_t guc_engine_id = engine->guc_id;
+               u32 guc_engine_id = engine->guc_id;
                struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
 
                /* TODO: We have a design issue to be solved here. Only when we
@@ -387,13 +388,13 @@ static void guc_stage_desc_init(struct intel_guc *guc,
        gfx_addr = guc_ggtt_offset(client->vma);
        desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
                                client->doorbell_offset;
-       desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
+       desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
        desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
        desc->process_desc = gfx_addr + client->proc_desc_offset;
        desc->wq_addr = gfx_addr + GUC_DB_SIZE;
        desc->wq_size = GUC_WQ_SIZE;
 
-       desc->desc_private = (uintptr_t)client;
+       desc->desc_private = ptr_to_u64(client);
 }
 
 static void guc_stage_desc_fini(struct intel_guc *guc,
@@ -499,7 +500,7 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
        const unsigned int engine_id = engine->id;
        unsigned int n;
 
-       for (n = 0; n < ARRAY_SIZE(execlists->port); n++) {
+       for (n = 0; n < execlists_num_ports(execlists); n++) {
                struct drm_i915_gem_request *rq;
                unsigned int count;
 
@@ -643,48 +644,6 @@ static void i915_guc_irq_handler(unsigned long data)
  * path of i915_guc_submit() above.
  */
 
-/**
- * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
- * @guc:       the guc
- * @size:      size of area to allocate (both virtual space and memory)
- *
- * This is a wrapper to create an object for use with the GuC. In order to
- * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
- * both some backing storage and a range inside the Global GTT. We must pin
- * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
- * range is reserved inside GuC.
- *
- * Return:     A i915_vma if successful, otherwise an ERR_PTR.
- */
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
-       int ret;
-
-       obj = i915_gem_object_create(dev_priv, size);
-       if (IS_ERR(obj))
-               return ERR_CAST(obj);
-
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
-       if (IS_ERR(vma))
-               goto err;
-
-       ret = i915_vma_pin(vma, 0, PAGE_SIZE,
-                          PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
-       if (ret) {
-               vma = ERR_PTR(ret);
-               goto err;
-       }
-
-       return vma;
-
-err:
-       i915_gem_object_put(obj);
-       return vma;
-}
-
 /* Check that a doorbell register is in the expected state */
 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
 {
@@ -796,8 +755,8 @@ static int guc_init_doorbell_hw(struct intel_guc *guc)
  */
 static struct i915_guc_client *
 guc_client_alloc(struct drm_i915_private *dev_priv,
-                uint32_t engines,
-                uint32_t priority,
+                u32 engines,
+                u32 priority,
                 struct i915_gem_context *ctx)
 {
        struct i915_guc_client *client;
@@ -1069,6 +1028,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
 
 static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int irqs;
@@ -1105,12 +1065,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
         * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
         * result in the register bit being left SET!
         */
-       dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
-       dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+       rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+       rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 }
 
 static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int irqs;
@@ -1129,8 +1090,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_VCS2_VCS1_IER, 0);
        I915_WRITE(GUC_WD_VECS_IER, 0);
 
-       dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-       dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+       rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+       rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
 }
 
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
@@ -1212,55 +1173,3 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
        guc_client_free(guc->execbuf_client);
        guc->execbuf_client = NULL;
 }
-
-/**
- * intel_guc_suspend() - notify GuC entering suspend state
- * @dev_priv:  i915 device private
- */
-int intel_guc_suspend(struct drm_i915_private *dev_priv)
-{
-       struct intel_guc *guc = &dev_priv->guc;
-       struct i915_gem_context *ctx;
-       u32 data[3];
-
-       if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
-               return 0;
-
-       gen9_disable_guc_interrupts(dev_priv);
-
-       ctx = dev_priv->kernel_context;
-
-       data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
-       /* any value greater than GUC_POWER_D0 */
-       data[1] = GUC_POWER_D1;
-       /* first page is shared data with GuC */
-       data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
-
-       return intel_guc_send(guc, data, ARRAY_SIZE(data));
-}
-
-/**
- * intel_guc_resume() - notify GuC resuming from suspend state
- * @dev_priv:  i915 device private
- */
-int intel_guc_resume(struct drm_i915_private *dev_priv)
-{
-       struct intel_guc *guc = &dev_priv->guc;
-       struct i915_gem_context *ctx;
-       u32 data[3];
-
-       if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
-               return 0;
-
-       if (i915_modparams.guc_log_level >= 0)
-               gen9_enable_guc_interrupts(dev_priv);
-
-       ctx = dev_priv->kernel_context;
-
-       data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
-       data[1] = GUC_POWER_D0;
-       /* first page is shared data with GuC */
-       data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
-
-       return intel_guc_send(guc, data, ARRAY_SIZE(data));
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.h b/drivers/gpu/drm/i915/i915_guc_submission.h
new file mode 100644 (file)
index 0000000..cb4353b
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_GUC_SUBMISSION_H_
+#define _I915_GUC_SUBMISSION_H_
+
+#include <linux/spinlock.h>
+
+#include "i915_gem.h"
+
+struct drm_i915_private;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The specs sometimes refer to this object as a "GuC context", but we use
+ * the term "client" to avoid confusion with hardware contexts. This
+ * GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process descriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent  pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ */
+struct i915_guc_client {
+       struct i915_vma *vma;
+       void *vaddr;
+       struct i915_gem_context *owner;
+       struct intel_guc *guc;
+
+       /* bitmap of (host) engine ids */
+       u32 engines;
+       u32 priority;
+       u32 stage_id;
+       u32 proc_desc_offset;
+
+       u16 doorbell_id;
+       unsigned long doorbell_offset;
+
+       spinlock_t wq_lock;
+       /* Per-engine counts of GuC submissions */
+       u64 submissions[I915_NUM_ENGINES];
+};
+
+int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+
+#endif
index efd7827ff18120cd39fbb876796699c281b6751b..b1296a55c1e4a8ee3caf7471e51baf19fa4a8f5e 100644 (file)
@@ -404,19 +404,21 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
        spin_lock_irq(&dev_priv->irq_lock);
        gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
-       dev_priv->rps.pm_iir = 0;
+       dev_priv->gt_pm.rps.pm_iir = 0;
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       if (READ_ONCE(dev_priv->rps.interrupts_enabled))
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       if (READ_ONCE(rps->interrupts_enabled))
                return;
 
        spin_lock_irq(&dev_priv->irq_lock);
-       WARN_ON_ONCE(dev_priv->rps.pm_iir);
+       WARN_ON_ONCE(rps->pm_iir);
        WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-       dev_priv->rps.interrupts_enabled = true;
+       rps->interrupts_enabled = true;
        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
        spin_unlock_irq(&dev_priv->irq_lock);
@@ -424,11 +426,13 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 
 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       if (!READ_ONCE(rps->interrupts_enabled))
                return;
 
        spin_lock_irq(&dev_priv->irq_lock);
-       dev_priv->rps.interrupts_enabled = false;
+       rps->interrupts_enabled = false;
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
@@ -442,7 +446,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
         * we will reset the GPU to minimum frequencies, so the current
         * state of the worker can be discarded.
         */
-       cancel_work_sync(&dev_priv->rps.work);
+       cancel_work_sync(&rps->work);
        gen6_reset_rps_interrupts(dev_priv);
 }
 
@@ -1119,12 +1123,13 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
 
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
 {
-       memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+       memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
 }
 
 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
-       const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       const struct intel_rps_ei *prev = &rps->ei;
        struct intel_rps_ei now;
        u32 events = 0;
 
@@ -1151,28 +1156,29 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
                c0 = max(render, media);
                c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
 
-               if (c0 > time * dev_priv->rps.up_threshold)
+               if (c0 > time * rps->up_threshold)
                        events = GEN6_PM_RP_UP_THRESHOLD;
-               else if (c0 < time * dev_priv->rps.down_threshold)
+               else if (c0 < time * rps->down_threshold)
                        events = GEN6_PM_RP_DOWN_THRESHOLD;
        }
 
-       dev_priv->rps.ei = now;
+       rps->ei = now;
        return events;
 }
 
 static void gen6_pm_rps_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, rps.work);
+               container_of(work, struct drm_i915_private, gt_pm.rps.work);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        bool client_boost = false;
        int new_delay, adj, min, max;
        u32 pm_iir = 0;
 
        spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->rps.interrupts_enabled) {
-               pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
-               client_boost = atomic_read(&dev_priv->rps.num_waiters);
+       if (rps->interrupts_enabled) {
+               pm_iir = fetch_and_zero(&rps->pm_iir);
+               client_boost = atomic_read(&rps->num_waiters);
        }
        spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -1181,18 +1187,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
        if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
                goto out;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
 
-       adj = dev_priv->rps.last_adj;
-       new_delay = dev_priv->rps.cur_freq;
-       min = dev_priv->rps.min_freq_softlimit;
-       max = dev_priv->rps.max_freq_softlimit;
+       adj = rps->last_adj;
+       new_delay = rps->cur_freq;
+       min = rps->min_freq_softlimit;
+       max = rps->max_freq_softlimit;
        if (client_boost)
-               max = dev_priv->rps.max_freq;
-       if (client_boost && new_delay < dev_priv->rps.boost_freq) {
-               new_delay = dev_priv->rps.boost_freq;
+               max = rps->max_freq;
+       if (client_boost && new_delay < rps->boost_freq) {
+               new_delay = rps->boost_freq;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                if (adj > 0)
@@ -1200,15 +1206,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
                else /* CHV needs even encode values */
                        adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
 
-               if (new_delay >= dev_priv->rps.max_freq_softlimit)
+               if (new_delay >= rps->max_freq_softlimit)
                        adj = 0;
        } else if (client_boost) {
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-               if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
-                       new_delay = dev_priv->rps.efficient_freq;
-               else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
-                       new_delay = dev_priv->rps.min_freq_softlimit;
+               if (rps->cur_freq > rps->efficient_freq)
+                       new_delay = rps->efficient_freq;
+               else if (rps->cur_freq > rps->min_freq_softlimit)
+                       new_delay = rps->min_freq_softlimit;
                adj = 0;
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
@@ -1216,13 +1222,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
                else /* CHV needs even encode values */
                        adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
 
-               if (new_delay <= dev_priv->rps.min_freq_softlimit)
+               if (new_delay <= rps->min_freq_softlimit)
                        adj = 0;
        } else { /* unknown event */
                adj = 0;
        }
 
-       dev_priv->rps.last_adj = adj;
+       rps->last_adj = adj;
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
@@ -1232,15 +1238,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
 
        if (intel_set_rps(dev_priv, new_delay)) {
                DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
-               dev_priv->rps.last_adj = 0;
+               rps->last_adj = 0;
        }
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
 out:
        /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
        spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->rps.interrupts_enabled)
+       if (rps->interrupts_enabled)
                gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
        spin_unlock_irq(&dev_priv->irq_lock);
 }
@@ -1382,10 +1388,8 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
        bool tasklet = false;
 
        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
-               if (port_count(&execlists->port[0])) {
-                       __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-                       tasklet = true;
-               }
+               __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+               tasklet = true;
        }
 
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
@@ -1723,12 +1727,14 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
  * the work queue. */
 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        if (pm_iir & dev_priv->pm_rps_events) {
                spin_lock(&dev_priv->irq_lock);
                gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
-               if (dev_priv->rps.interrupts_enabled) {
-                       dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-                       schedule_work(&dev_priv->rps.work);
+               if (rps->interrupts_enabled) {
+                       rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
+                       schedule_work(&rps->work);
                }
                spin_unlock(&dev_priv->irq_lock);
        }
@@ -2254,18 +2260,14 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
 {
        u32 serr_int = I915_READ(SERR_INT);
+       enum pipe pipe;
 
        if (serr_int & SERR_INT_POISON)
                DRM_ERROR("PCH poison interrupt\n");
 
-       if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
-               intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
-
-       if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
-               intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
-
-       if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
-               intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
+       for_each_pipe(dev_priv, pipe)
+               if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
+                       intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
 
        I915_WRITE(SERR_INT, serr_int);
 }
@@ -3163,10 +3165,17 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
        enum pipe pipe;
 
        spin_lock_irq(&dev_priv->irq_lock);
+
+       if (!intel_irqs_enabled(dev_priv)) {
+               spin_unlock_irq(&dev_priv->irq_lock);
+               return;
+       }
+
        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
                GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
                                  dev_priv->de_irq_mask[pipe],
                                  ~dev_priv->de_irq_mask[pipe] | extra_ier);
+
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
@@ -3176,8 +3185,15 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
        enum pipe pipe;
 
        spin_lock_irq(&dev_priv->irq_lock);
+
+       if (!intel_irqs_enabled(dev_priv)) {
+               spin_unlock_irq(&dev_priv->irq_lock);
+               return;
+       }
+
        for_each_pipe_masked(dev_priv, pipe, pipe_mask)
                GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* make sure we're done processing display irqs */
@@ -3598,16 +3614,15 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        else if (IS_BROADWELL(dev_priv))
                de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
 
-       dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
-       dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
-       dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+       for_each_pipe(dev_priv, pipe) {
+               dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
 
-       for_each_pipe(dev_priv, pipe)
                if (intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_PIPE(pipe)))
                        GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
                                          dev_priv->de_irq_mask[pipe],
                                          de_pipe_enables);
+       }
 
        GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
        GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
@@ -4000,11 +4015,12 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 void intel_irq_init(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int i;
 
        intel_hpd_init_work(dev_priv);
 
-       INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+       INIT_WORK(&rps->work, gen6_pm_rps_work);
 
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
        for (i = 0; i < MAX_L3_SLICES; ++i)
@@ -4020,7 +4036,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        else
                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
-       dev_priv->rps.pm_intrmsk_mbz = 0;
+       rps->pm_intrmsk_mbz = 0;
 
        /*
         * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
@@ -4029,10 +4045,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
         * TODO: verify if this can be reproduced on VLV,CHV.
         */
        if (INTEL_GEN(dev_priv) <= 7)
-               dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+               rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
 
        if (INTEL_GEN(dev_priv) >= 8)
-               dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+               rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
 
        if (IS_GEN2(dev_priv)) {
                /* Gen2 doesn't have a hardware frame counter */
@@ -4166,7 +4182,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
         * interrupts as enabled _before_ actually enabling them to avoid
         * special cases in our ordering checks.
         */
-       dev_priv->pm.irqs_enabled = true;
+       dev_priv->runtime_pm.irqs_enabled = true;
 
        return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
 }
@@ -4182,7 +4198,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 {
        drm_irq_uninstall(&dev_priv->drm);
        intel_hpd_cancel_work(dev_priv);
-       dev_priv->pm.irqs_enabled = false;
+       dev_priv->runtime_pm.irqs_enabled = false;
 }
 
 /**
@@ -4195,7 +4211,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 {
        dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
-       dev_priv->pm.irqs_enabled = false;
+       dev_priv->runtime_pm.irqs_enabled = false;
        synchronize_irq(dev_priv->drm.irq);
 }
 
@@ -4208,7 +4224,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
 {
-       dev_priv->pm.irqs_enabled = true;
+       dev_priv->runtime_pm.irqs_enabled = true;
        dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
        dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
 }
index 9dff323a83d3e556807e91dd57f99f36635db6d6..b4faeb6aa2bdc5c9d12210bd114d7856dd183061 100644 (file)
@@ -146,9 +146,6 @@ i915_param_named(disable_display, bool, 0400,
 i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
        "Enable command parsing (true=enabled [default], false=disabled)");
 
-i915_param_named_unsafe(use_mmio_flip, int, 0600,
-       "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-
 i915_param_named(mmio_debug, int, 0600,
        "Enable the MMIO debug code for the first N failures (default: off). "
        "This may negatively affect performance.");
index 4f3f8d6501945ac8b0bf521fecbf20a9488c00da..c7292268ed43917040cc64273ea31b09005a2c79 100644 (file)
@@ -49,7 +49,6 @@
        param(int, guc_log_level, -1) \
        param(char *, guc_firmware_path, NULL) \
        param(char *, huc_firmware_path, NULL) \
-       param(int, use_mmio_flip, 0) \
        param(int, mmio_debug, 0) \
        param(int, edp_vswing, 0) \
        param(int, reset, 2) \
index da60866b6628affa988b5cdeed13b8151614e21e..bf467f30c99b7adb062164540a5c4f0f0943c210 100644 (file)
        .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
 #define CHV_COLORS \
        .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+#define GLK_COLORS \
+       .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
 
 /* Keep in gen based order, and chronological order within a gen */
+
+#define GEN_DEFAULT_PAGE_SIZES \
+       .page_sizes = I915_GTT_PAGE_SIZE_4K
+
 #define GEN2_FEATURES \
        .gen = 2, .num_pipes = 1, \
        .has_overlay = 1, .overlay_needs_physical = 1, \
@@ -65,6 +71,7 @@
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        GEN_DEFAULT_PIPEOFFSETS, \
+       GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
 
 static const struct intel_device_info intel_i830_info __initconst = {
@@ -98,6 +105,7 @@ static const struct intel_device_info intel_i865g_info __initconst = {
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        GEN_DEFAULT_PIPEOFFSETS, \
+       GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
 
 static const struct intel_device_info intel_i915g_info __initconst = {
@@ -161,6 +169,7 @@ static const struct intel_device_info intel_pineview_info __initconst = {
        .ring_mask = RENDER_RING, \
        .has_snoop = true, \
        GEN_DEFAULT_PIPEOFFSETS, \
+       GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
 
 static const struct intel_device_info intel_i965g_info __initconst = {
@@ -203,6 +212,7 @@ static const struct intel_device_info intel_gm45_info __initconst = {
        .ring_mask = RENDER_RING | BSD_RING, \
        .has_snoop = true, \
        GEN_DEFAULT_PIPEOFFSETS, \
+       GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
 
 static const struct intel_device_info intel_ironlake_d_info __initconst = {
@@ -226,6 +236,7 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
        .has_rc6p = 1, \
        .has_aliasing_ppgtt = 1, \
        GEN_DEFAULT_PIPEOFFSETS, \
+       GEN_DEFAULT_PAGE_SIZES, \
        CURSOR_OFFSETS
 
 #define SNB_D_PLATFORM \
@@ -269,6 +280,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
        .has_aliasing_ppgtt = 1, \
        .has_full_ppgtt = 1, \
        GEN_DEFAULT_PIPEOFFSETS, \
+       GEN_DEFAULT_PAGE_SIZES, \
        IVB_CURSOR_OFFSETS
 
 #define IVB_D_PLATFORM \
@@ -325,11 +337,12 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
        .has_snoop = true,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .display_mmio_offset = VLV_DISPLAY_BASE,
+       GEN_DEFAULT_PAGE_SIZES,
        GEN_DEFAULT_PIPEOFFSETS,
        CURSOR_OFFSETS
 };
 
-#define HSW_FEATURES  \
+#define G75_FEATURES  \
        GEN7_FEATURES, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
        .has_ddi = 1, \
@@ -341,7 +354,7 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
        .has_runtime_pm = 1
 
 #define HSW_PLATFORM \
-       HSW_FEATURES, \
+       G75_FEATURES, \
        .platform = INTEL_HASWELL, \
        .has_l3_dpf = 1
 
@@ -360,16 +373,18 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
        .gt = 3,
 };
 
-#define BDW_FEATURES \
-       HSW_FEATURES, \
+#define GEN8_FEATURES \
+       G75_FEATURES, \
        BDW_COLORS, \
+       .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+                     I915_GTT_PAGE_SIZE_2M, \
        .has_logical_ring_contexts = 1, \
        .has_full_48bit_ppgtt = 1, \
        .has_64bit_reloc = 1, \
        .has_reset_engine = 1
 
 #define BDW_PLATFORM \
-       BDW_FEATURES, \
+       GEN8_FEATURES, \
        .gen = 8, \
        .platform = INTEL_BROADWELL
 
@@ -415,19 +430,31 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
        .has_reset_engine = 1,
        .has_snoop = true,
        .display_mmio_offset = VLV_DISPLAY_BASE,
+       GEN_DEFAULT_PAGE_SIZES,
        GEN_CHV_PIPEOFFSETS,
        CURSOR_OFFSETS,
        CHV_COLORS,
 };
 
-#define SKL_PLATFORM \
-       BDW_FEATURES, \
-       .gen = 9, \
-       .platform = INTEL_SKYLAKE, \
+#define GEN9_DEFAULT_PAGE_SIZES \
+       .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+                     I915_GTT_PAGE_SIZE_64K | \
+                     I915_GTT_PAGE_SIZE_2M
+
+#define GEN9_FEATURES \
+       GEN8_FEATURES, \
+       GEN9_DEFAULT_PAGE_SIZES, \
+       .has_logical_ring_preemption = 1, \
        .has_csr = 1, \
        .has_guc = 1, \
+       .has_ipc = 1, \
        .ddb_size = 896
 
+#define SKL_PLATFORM \
+       GEN9_FEATURES, \
+       .gen = 9, \
+       .platform = INTEL_SKYLAKE
+
 static const struct intel_device_info intel_skylake_gt1_info __initconst = {
        SKL_PLATFORM,
        .gt = 1,
@@ -463,6 +490,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
        .has_ddi = 1, \
        .has_fpga_dbg = 1, \
        .has_fbc = 1, \
+       .has_psr = 1, \
        .has_runtime_pm = 1, \
        .has_pooled_eu = 0, \
        .has_csr = 1, \
@@ -470,6 +498,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
        .has_rc6 = 1, \
        .has_dp_mst = 1, \
        .has_logical_ring_contexts = 1, \
+       .has_logical_ring_preemption = 1, \
        .has_guc = 1, \
        .has_aliasing_ppgtt = 1, \
        .has_full_ppgtt = 1, \
@@ -477,6 +506,7 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
        .has_reset_engine = 1, \
        .has_snoop = true, \
        .has_ipc = 1, \
+       GEN9_DEFAULT_PAGE_SIZES, \
        GEN_DEFAULT_PIPEOFFSETS, \
        IVB_CURSOR_OFFSETS, \
        BDW_COLORS
@@ -491,17 +521,13 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
        GEN9_LP_FEATURES,
        .platform = INTEL_GEMINILAKE,
        .ddb_size = 1024,
-       .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+       GLK_COLORS,
 };
 
 #define KBL_PLATFORM \
-       BDW_FEATURES, \
+       GEN9_FEATURES, \
        .gen = 9, \
-       .platform = INTEL_KABYLAKE, \
-       .has_csr = 1, \
-       .has_guc = 1, \
-       .has_ipc = 1, \
-       .ddb_size = 896
+       .platform = INTEL_KABYLAKE
 
 static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
        KBL_PLATFORM,
@@ -520,13 +546,9 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
 };
 
 #define CFL_PLATFORM \
-       BDW_FEATURES, \
+       GEN9_FEATURES, \
        .gen = 9, \
-       .platform = INTEL_COFFEELAKE, \
-       .has_csr = 1, \
-       .has_guc = 1, \
-       .has_ipc = 1, \
-       .ddb_size = 896
+       .platform = INTEL_COFFEELAKE
 
 static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
        CFL_PLATFORM,
@@ -544,16 +566,17 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
+#define GEN10_FEATURES \
+       GEN9_FEATURES, \
+       .ddb_size = 1024, \
+       GLK_COLORS
+
 static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
-       BDW_FEATURES,
+       GEN10_FEATURES,
        .is_alpha_support = 1,
        .platform = INTEL_CANNONLAKE,
        .gen = 10,
        .gt = 2,
-       .ddb_size = 1024,
-       .has_csr = 1,
-       .has_ipc = 1,
-       .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
 };
 
 /*
index ee0d4f14ac98c07b6f4f4cdf7a42b948cb6e0553..d2d0a83c09b6d185f82a5bdade51e13f30f30a93 100644 (file)
@@ -2371,6 +2371,9 @@ enum i915_power_well_id {
 #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
 #define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1<<18)
 
+#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
+#define   GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+
 #define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
 #define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1<<28)
 #define   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT       (1<<24)
@@ -3819,6 +3822,16 @@ enum {
 #define   PWM2_GATING_DIS              (1 << 14)
 #define   PWM1_GATING_DIS              (1 << 13)
 
+#define _CLKGATE_DIS_PSL_A             0x46520
+#define _CLKGATE_DIS_PSL_B             0x46524
+#define _CLKGATE_DIS_PSL_C             0x46528
+#define   DPF_GATING_DIS               (1 << 10)
+#define   DPF_RAM_GATING_DIS           (1 << 9)
+#define   DPFR_GATING_DIS              (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+       _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
 /*
  * GEN10 clock gating regs
  */
@@ -5671,8 +5684,7 @@ enum {
 #define  CBR_PWM_CLOCK_MUX_SELECT      (1<<30)
 
 #define CBR4_VLV                       _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define  CBR_DPLLBMD_PIPE_C            (1<<29)
-#define  CBR_DPLLBMD_PIPE_B            (1<<18)
+#define  CBR_DPLLBMD_PIPE(pipe)                (1<<(7+(pipe)*11)) /* pipes B and C */
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE     64
@@ -6993,6 +7005,12 @@ enum {
 #define GEN9_CS_DEBUG_MODE1            _MMIO(0x20ec)
 #define GEN9_CTX_PREEMPT_REG           _MMIO(0x2248)
 #define GEN8_CS_CHICKEN1               _MMIO(0x2580)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL           (1<<0)
+#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo)       (((hi) << 2) | ((lo) << 1))
+#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL    GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
+#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL  GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
+#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL       GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL_MASK          GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
 
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1             _MMIO(0x7010)
@@ -7164,9 +7182,6 @@ enum {
 
 #define SERR_INT                       _MMIO(0xc4040)
 #define  SERR_INT_POISON               (1<<31)
-#define  SERR_INT_TRANS_C_FIFO_UNDERRUN        (1<<6)
-#define  SERR_INT_TRANS_B_FIFO_UNDERRUN        (1<<3)
-#define  SERR_INT_TRANS_A_FIFO_UNDERRUN        (1<<0)
 #define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1<<((pipe)*3))
 
 /* digital port hotplug */
index 5c86925a029438d795e07d4f72f7da56e2605b23..8f3aa4dc0c98596a3b0443e493be84403d120bc3 100644 (file)
@@ -108,8 +108,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       i915_gem_restore_fences(dev_priv);
-
        if (IS_GEN4(dev_priv))
                pci_write_config_word(pdev, GCDGMBUS,
                                      dev_priv->regfile.saveGCDGMBUS);
index d61c8727f756c43d95277923d6e652b564b79bf6..791759f632e1dd65cde71891c0db6177281ea0e9 100644 (file)
@@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
 static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
+       return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled());
 }
 
 static ssize_t
@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 
        intel_runtime_pm_get(dev_priv);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                u32 freq;
                freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
                        ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
                ret = intel_gpu_freq(dev_priv, ret);
        }
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        intel_runtime_pm_put(dev_priv);
 
@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.cur_freq));
+                                      dev_priv->gt_pm.rps.cur_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.boost_freq));
+                                      dev_priv->gt_pm.rps.boost_freq));
 }
 
 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
                                       const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
        ssize_t ret;
 
@@ -301,12 +302,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 
        /* Validate against (static) hardware limits */
        val = intel_freq_opcode(dev_priv, val);
-       if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+       if (val < rps->min_freq || val > rps->max_freq)
                return -EINVAL;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-       dev_priv->rps.boost_freq = val;
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
+       rps->boost_freq = val;
+       mutex_unlock(&dev_priv->pcu_lock);
 
        return count;
 }
@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.efficient_freq));
+                                      dev_priv->gt_pm.rps.efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.max_freq_softlimit));
+                                      dev_priv->gt_pm.rps.max_freq_softlimit));
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                                     const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
        ssize_t ret;
 
@@ -344,34 +346,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
        intel_runtime_pm_get(dev_priv);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        val = intel_freq_opcode(dev_priv, val);
 
-       if (val < dev_priv->rps.min_freq ||
-           val > dev_priv->rps.max_freq ||
-           val < dev_priv->rps.min_freq_softlimit) {
-               mutex_unlock(&dev_priv->rps.hw_lock);
+       if (val < rps->min_freq ||
+           val > rps->max_freq ||
+           val < rps->min_freq_softlimit) {
+               mutex_unlock(&dev_priv->pcu_lock);
                intel_runtime_pm_put(dev_priv);
                return -EINVAL;
        }
 
-       if (val > dev_priv->rps.rp0_freq)
+       if (val > rps->rp0_freq)
                DRM_DEBUG("User requested overclocking to %d\n",
                          intel_gpu_freq(dev_priv, val));
 
-       dev_priv->rps.max_freq_softlimit = val;
+       rps->max_freq_softlimit = val;
 
-       val = clamp_t(int, dev_priv->rps.cur_freq,
-                     dev_priv->rps.min_freq_softlimit,
-                     dev_priv->rps.max_freq_softlimit);
+       val = clamp_t(int, rps->cur_freq,
+                     rps->min_freq_softlimit,
+                     rps->max_freq_softlimit);
 
        /* We still need *_set_rps to process the new max_delay and
         * update the interrupt limits and PMINTRMSK even though
         * frequency request may be unchanged. */
        ret = intel_set_rps(dev_priv, val);
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        intel_runtime_pm_put(dev_priv);
 
@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
-                                      dev_priv->rps.min_freq_softlimit));
+                                      dev_priv->gt_pm.rps.min_freq_softlimit));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                                     const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
        ssize_t ret;
 
@@ -401,30 +404,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
        intel_runtime_pm_get(dev_priv);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        val = intel_freq_opcode(dev_priv, val);
 
-       if (val < dev_priv->rps.min_freq ||
-           val > dev_priv->rps.max_freq ||
-           val > dev_priv->rps.max_freq_softlimit) {
-               mutex_unlock(&dev_priv->rps.hw_lock);
+       if (val < rps->min_freq ||
+           val > rps->max_freq ||
+           val > rps->max_freq_softlimit) {
+               mutex_unlock(&dev_priv->pcu_lock);
                intel_runtime_pm_put(dev_priv);
                return -EINVAL;
        }
 
-       dev_priv->rps.min_freq_softlimit = val;
+       rps->min_freq_softlimit = val;
 
-       val = clamp_t(int, dev_priv->rps.cur_freq,
-                     dev_priv->rps.min_freq_softlimit,
-                     dev_priv->rps.max_freq_softlimit);
+       val = clamp_t(int, rps->cur_freq,
+                     rps->min_freq_softlimit,
+                     rps->max_freq_softlimit);
 
        /* We still need *_set_rps to process the new min_delay and
         * update the interrupt limits and PMINTRMSK even though
         * frequency request may be unchanged. */
        ret = intel_set_rps(dev_priv, val);
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        intel_runtime_pm_put(dev_priv);
 
@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
-               val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+               val = intel_gpu_freq(dev_priv, rps->rp0_freq);
        else if (attr == &dev_attr_gt_RP1_freq_mhz)
-               val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+               val = intel_gpu_freq(dev_priv, rps->rp1_freq);
        else if (attr == &dev_attr_gt_RPn_freq_mhz)
-               val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+               val = intel_gpu_freq(dev_priv, rps->min_freq);
        else
                BUG();
 
index 92f4c5bb7aa74cf8c5ae51ad512b8223f0f8ea1b..9cab91ddeb79f469b7cc410ce0cf2a64538688f9 100644 (file)
@@ -345,7 +345,7 @@ TRACE_EVENT(i915_gem_object_create,
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
-                            __field(u32, size)
+                            __field(u64, size)
                             ),
 
            TP_fast_assign(
@@ -353,7 +353,7 @@ TRACE_EVENT(i915_gem_object_create,
                           __entry->size = obj->base.size;
                           ),
 
-           TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+           TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
 );
 
 TRACE_EVENT(i915_gem_shrink,
@@ -384,7 +384,7 @@ TRACE_EVENT(i915_vma_bind,
                             __field(struct drm_i915_gem_object *, obj)
                             __field(struct i915_address_space *, vm)
                             __field(u64, offset)
-                            __field(u32, size)
+                            __field(u64, size)
                             __field(unsigned, flags)
                             ),
 
@@ -396,7 +396,7 @@ TRACE_EVENT(i915_vma_bind,
                           __entry->flags = flags;
                           ),
 
-           TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
+           TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
                      __entry->obj, __entry->offset, __entry->size,
                      __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
                      __entry->vm)
@@ -410,7 +410,7 @@ TRACE_EVENT(i915_vma_unbind,
                             __field(struct drm_i915_gem_object *, obj)
                             __field(struct i915_address_space *, vm)
                             __field(u64, offset)
-                            __field(u32, size)
+                            __field(u64, size)
                             ),
 
            TP_fast_assign(
@@ -420,18 +420,18 @@ TRACE_EVENT(i915_vma_unbind,
                           __entry->size = vma->node.size;
                           ),
 
-           TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
+           TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
                      __entry->obj, __entry->offset, __entry->size, __entry->vm)
 );
 
 TRACE_EVENT(i915_gem_object_pwrite,
-           TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+           TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
            TP_ARGS(obj, offset, len),
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
-                            __field(u32, offset)
-                            __field(u32, len)
+                            __field(u64, offset)
+                            __field(u64, len)
                             ),
 
            TP_fast_assign(
@@ -440,18 +440,18 @@ TRACE_EVENT(i915_gem_object_pwrite,
                           __entry->len = len;
                           ),
 
-           TP_printk("obj=%p, offset=%u, len=%u",
+           TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
                      __entry->obj, __entry->offset, __entry->len)
 );
 
 TRACE_EVENT(i915_gem_object_pread,
-           TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+           TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
            TP_ARGS(obj, offset, len),
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
-                            __field(u32, offset)
-                            __field(u32, len)
+                            __field(u64, offset)
+                            __field(u64, len)
                             ),
 
            TP_fast_assign(
@@ -460,17 +460,17 @@ TRACE_EVENT(i915_gem_object_pread,
                           __entry->len = len;
                           ),
 
-           TP_printk("obj=%p, offset=%u, len=%u",
+           TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
                      __entry->obj, __entry->offset, __entry->len)
 );
 
 TRACE_EVENT(i915_gem_object_fault,
-           TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+           TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
            TP_ARGS(obj, index, gtt, write),
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
-                            __field(u32, index)
+                            __field(u64, index)
                             __field(bool, gtt)
                             __field(bool, write)
                             ),
@@ -482,7 +482,7 @@ TRACE_EVENT(i915_gem_object_fault,
                           __entry->write = write;
                           ),
 
-           TP_printk("obj=%p, %s index=%u %s",
+           TP_printk("obj=%p, %s index=%llu %s",
                      __entry->obj,
                      __entry->gtt ? "GTT" : "CPU",
                      __entry->index,
@@ -515,14 +515,14 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 );
 
 TRACE_EVENT(i915_gem_evict,
-           TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+           TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
            TP_ARGS(vm, size, align, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(struct i915_address_space *, vm)
-                            __field(u32, size)
-                            __field(u32, align)
+                            __field(u64, size)
+                            __field(u64, align)
                             __field(unsigned int, flags)
                            ),
 
@@ -534,43 +534,11 @@ TRACE_EVENT(i915_gem_evict,
                           __entry->flags = flags;
                          ),
 
-           TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+           TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
                      __entry->dev, __entry->vm, __entry->size, __entry->align,
                      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
-TRACE_EVENT(i915_gem_evict_everything,
-           TP_PROTO(struct drm_device *dev),
-           TP_ARGS(dev),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                           ),
-
-           TP_fast_assign(
-                          __entry->dev = dev->primary->index;
-                         ),
-
-           TP_printk("dev=%d", __entry->dev)
-);
-
-TRACE_EVENT(i915_gem_evict_vm,
-           TP_PROTO(struct i915_address_space *vm),
-           TP_ARGS(vm),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(struct i915_address_space *, vm)
-                           ),
-
-           TP_fast_assign(
-                          __entry->dev = vm->i915->drm.primary->index;
-                          __entry->vm = vm;
-                         ),
-
-           TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
-);
-
 TRACE_EVENT(i915_gem_evict_node,
            TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
            TP_ARGS(vm, node, flags),
@@ -593,12 +561,29 @@ TRACE_EVENT(i915_gem_evict_node,
                           __entry->flags = flags;
                          ),
 
-           TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
+           TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
                      __entry->dev, __entry->vm,
                      __entry->start, __entry->size,
                      __entry->color, __entry->flags)
 );
 
+TRACE_EVENT(i915_gem_evict_vm,
+           TP_PROTO(struct i915_address_space *vm),
+           TP_ARGS(vm),
+
+           TP_STRUCT__entry(
+                            __field(u32, dev)
+                            __field(struct i915_address_space *, vm)
+                           ),
+
+           TP_fast_assign(
+                          __entry->dev = vm->i915->drm.primary->index;
+                          __entry->vm = vm;
+                         ),
+
+           TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
+);
+
 TRACE_EVENT(i915_gem_ring_sync_to,
            TP_PROTO(struct drm_i915_gem_request *to,
                     struct drm_i915_gem_request *from),
@@ -649,29 +634,6 @@ TRACE_EVENT(i915_gem_request_queue,
                      __entry->flags)
 );
 
-TRACE_EVENT(i915_gem_ring_flush,
-           TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
-           TP_ARGS(req, invalidate, flush),
-
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, ring)
-                            __field(u32, invalidate)
-                            __field(u32, flush)
-                            ),
-
-           TP_fast_assign(
-                          __entry->dev = req->i915->drm.primary->index;
-                          __entry->ring = req->engine->id;
-                          __entry->invalidate = invalidate;
-                          __entry->flush = flush;
-                          ),
-
-           TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
-                     __entry->dev, __entry->ring,
-                     __entry->invalidate, __entry->flush)
-);
-
 DECLARE_EVENT_CLASS(i915_gem_request,
            TP_PROTO(struct drm_i915_gem_request *req),
            TP_ARGS(req),
index 12fc250b47b963a24217cfbc90bf5c1d4fe92e57..af3d7cc53fa13783a8b914e7deaf22ed25cd50d2 100644 (file)
        __T;                                                            \
 })
 
+static inline u64 ptr_to_u64(const void *ptr)
+{
+       return (uintptr_t)ptr;
+}
+
 #define u64_to_ptr(T, x) ({                                            \
        typecheck(u64, x);                                              \
        (T *)(uintptr_t)(x);                                            \
@@ -119,4 +124,17 @@ static inline void __list_del_many(struct list_head *head,
        WRITE_ONCE(head->next, first);
 }
 
+/*
+ * Wait until the work is finally complete, even if it tries to postpone
+ * by requeueing itself. Note, that if the worker never cancels itself,
+ * we will spin forever.
+ */
+static inline void drain_delayed_work(struct delayed_work *dw)
+{
+       do {
+               while (flush_delayed_work(dw))
+                       ;
+       } while (delayed_work_pending(dw));
+}
+
 #endif /* !__I915_UTILS_H */
index 02d1a5eacb00ef5fa89deeaaa41318f81ebac902..4dce2e0197d9bda1b53aacdadfadf8e1f8dd23f7 100644 (file)
@@ -266,6 +266,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
        if (bind_flags == 0)
                return 0;
 
+       GEM_BUG_ON(!vma->pages);
+
        trace_i915_vma_bind(vma, bind_flags);
        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
@@ -278,13 +280,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 {
        void __iomem *ptr;
+       int err;
 
        /* Access through the GTT requires the device to be awake. */
        assert_rpm_wakelock_held(vma->vm->i915);
 
        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-       if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
-               return IO_ERR_PTR(-ENODEV);
+       if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+               err = -ENODEV;
+               goto err;
+       }
 
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
        GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
@@ -294,14 +299,36 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
                ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
                                        vma->node.start,
                                        vma->node.size);
-               if (ptr == NULL)
-                       return IO_ERR_PTR(-ENOMEM);
+               if (ptr == NULL) {
+                       err = -ENOMEM;
+                       goto err;
+               }
 
                vma->iomap = ptr;
        }
 
        __i915_vma_pin(vma);
+
+       err = i915_vma_pin_fence(vma);
+       if (err)
+               goto err_unpin;
+
        return ptr;
+
+err_unpin:
+       __i915_vma_unpin(vma);
+err:
+       return IO_ERR_PTR(err);
+}
+
+void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+
+       GEM_BUG_ON(vma->iomap == NULL);
+
+       i915_vma_unpin_fence(vma);
+       i915_vma_unpin(vma);
 }
 
 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
@@ -471,25 +498,64 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        if (ret)
                return ret;
 
+       GEM_BUG_ON(vma->pages);
+
+       ret = vma->vm->set_pages(vma);
+       if (ret)
+               goto err_unpin;
+
        if (flags & PIN_OFFSET_FIXED) {
                u64 offset = flags & PIN_OFFSET_MASK;
                if (!IS_ALIGNED(offset, alignment) ||
                    range_overflows(offset, size, end)) {
                        ret = -EINVAL;
-                       goto err_unpin;
+                       goto err_clear;
                }
 
                ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
                                           size, offset, obj->cache_level,
                                           flags);
                if (ret)
-                       goto err_unpin;
+                       goto err_clear;
        } else {
+               /*
+                * We only support huge gtt pages through the 48b PPGTT,
+                * however we also don't want to force any alignment for
+                * objects which need to be tightly packed into the low 32bits.
+                *
+                * Note that we assume that GGTT are limited to 4GiB for the
+                * forseeable future. See also i915_ggtt_offset().
+                */
+               if (upper_32_bits(end - 1) &&
+                   vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+                       /*
+                        * We can't mix 64K and 4K PTEs in the same page-table
+                        * (2M block), and so to avoid the ugliness and
+                        * complexity of coloring we opt for just aligning 64K
+                        * objects to 2M.
+                        */
+                       u64 page_alignment =
+                               rounddown_pow_of_two(vma->page_sizes.sg |
+                                                    I915_GTT_PAGE_SIZE_2M);
+
+                       /*
+                        * Check we don't expand for the limited Global GTT
+                        * (mappable aperture is even more precious!). This
+                        * also checks that we exclude the aliasing-ppgtt.
+                        */
+                       GEM_BUG_ON(i915_vma_is_ggtt(vma));
+
+                       alignment = max(alignment, page_alignment);
+
+                       if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+                               size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+               }
+
                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
                                          size, alignment, obj->cache_level,
                                          start, end, flags);
                if (ret)
-                       goto err_unpin;
+                       goto err_clear;
 
                GEM_BUG_ON(vma->node.start < start);
                GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -504,6 +570,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
        return 0;
 
+err_clear:
+       vma->vm->clear_pages(vma);
 err_unpin:
        i915_gem_object_unpin_pages(obj);
        return ret;
@@ -517,6 +585,8 @@ i915_vma_remove(struct i915_vma *vma)
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
+       vma->vm->clear_pages(vma);
+
        drm_mm_remove_node(&vma->node);
        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
@@ -569,8 +639,8 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 
 err_remove:
        if ((bound & I915_VMA_BIND_MASK) == 0) {
-               GEM_BUG_ON(vma->pages);
                i915_vma_remove(vma);
+               GEM_BUG_ON(vma->pages);
        }
 err_unpin:
        __i915_vma_unpin(vma);
@@ -620,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
        vma->iomap = NULL;
 }
 
+void i915_vma_revoke_mmap(struct i915_vma *vma)
+{
+       struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+       u64 vma_offset;
+
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+       if (!i915_vma_has_userfault(vma))
+               return;
+
+       GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+       GEM_BUG_ON(!vma->obj->userfault_count);
+
+       vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+       unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+                           drm_vma_node_offset_addr(node) + vma_offset,
+                           vma->size,
+                           1);
+
+       i915_vma_unset_userfault(vma);
+       if (!--vma->obj->userfault_count)
+               list_del(&vma->obj->userfault_link);
+}
+
 int i915_vma_unbind(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
@@ -683,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma)
                        return ret;
 
                /* Force a pagefault for domain tracking on next user access */
-               i915_gem_release_mmap(obj);
+               i915_vma_revoke_mmap(vma);
 
                __i915_vma_iounmap(vma);
                vma->flags &= ~I915_VMA_CAN_FENCE;
        }
+       GEM_BUG_ON(vma->fence);
+       GEM_BUG_ON(i915_vma_has_userfault(vma));
 
        if (likely(!vma->vm->closed)) {
                trace_i915_vma_unbind(vma);
@@ -695,13 +791,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
-       if (vma->pages != obj->mm.pages) {
-               GEM_BUG_ON(!vma->pages);
-               sg_free_table(vma->pages);
-               kfree(vma->pages);
-       }
-       vma->pages = NULL;
-
        i915_vma_remove(vma);
 
 destroy:
index e811067c7724f626c93c441586c3b4855210b225..1e2bc9b3c3ac19a4790222eb151765050e264d49 100644 (file)
@@ -55,6 +55,7 @@ struct i915_vma {
        void __iomem *iomap;
        u64 size;
        u64 display_alignment;
+       struct i915_page_sizes page_sizes;
 
        u32 fence_size;
        u32 fence_alignment;
@@ -65,7 +66,7 @@ struct i915_vma {
         * that exist in the ctx->handle_vmas LUT for this vma.
         */
        unsigned int open_count;
-       unsigned int flags;
+       unsigned long flags;
        /**
         * How many users have pinned this object in GTT space. The following
         * users can each hold at most one reference: pwrite/pread, execbuffer
@@ -87,6 +88,8 @@ struct i915_vma {
 #define I915_VMA_GGTT          BIT(8)
 #define I915_VMA_CAN_FENCE     BIT(9)
 #define I915_VMA_CLOSED                BIT(10)
+#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_USERFAULT     BIT(I915_VMA_USERFAULT_BIT)
 
        unsigned int active;
        struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -145,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
        return vma->flags & I915_VMA_CLOSED;
 }
 
+static inline bool i915_vma_set_userfault(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+       return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline void i915_vma_unset_userfault(struct i915_vma *vma)
+{
+       return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+       return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
 {
        return vma->active;
@@ -243,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
 bool i915_vma_misplaced(const struct i915_vma *vma,
                        u64 size, u64 alignment, u64 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);
@@ -321,12 +341,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
  * Callers must hold the struct_mutex. This function is only valid to be
  * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
  */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
-       GEM_BUG_ON(vma->iomap == NULL);
-       i915_vma_unpin(vma);
-}
+void i915_vma_unpin_iomap(struct i915_vma *vma);
 
 static inline struct page *i915_vma_first_page(struct i915_vma *vma)
 {
@@ -349,15 +364,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
  *
  * True if the vma has a fence, false otherwise.
  */
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
+int i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_put_fence(struct i915_vma *vma);
+
+static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
-       if (vma->fence) {
-               vma->fence->pin_count++;
-               return true;
-       } else
-               return false;
+       GEM_BUG_ON(vma->fence->pin_count <= 0);
+       vma->fence->pin_count--;
 }
 
 /**
@@ -372,10 +385,8 @@ static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
        lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
-       if (vma->fence) {
-               GEM_BUG_ON(vma->fence->pin_count <= 0);
-               vma->fence->pin_count--;
-       }
+       if (vma->fence)
+               __i915_vma_unpin_fence(vma);
 }
 
 #endif
index 27743be5b768e13c4be9749537b1af76dfb3f478..0ddba16fde1bf81346f9e1920d67688add0b6502 100644 (file)
@@ -754,7 +754,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
 {
        struct intel_encoder *encoder;
 
-       if (WARN_ON(pipe >= I915_MAX_PIPES))
+       if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
                return NULL;
 
        /* MST */
index b881ce8596eeca2a8c2d5ee7639a50240dd9548a..9d5b4295343618e82784f8e1f8cd2cb23fa00cfd 100644 (file)
@@ -431,27 +431,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
                      dev_priv->vbt.fdi_rx_polarity_inverted);
 }
 
-static void
-parse_general_definitions(struct drm_i915_private *dev_priv,
-                         const struct bdb_header *bdb)
-{
-       const struct bdb_general_definitions *general;
-
-       general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
-       if (general) {
-               u16 block_size = get_blocksize(general);
-               if (block_size >= sizeof(*general)) {
-                       int bus_pin = general->crt_ddc_gmbus_pin;
-                       DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
-                       if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
-                               dev_priv->vbt.crt_ddc_pin = bus_pin;
-               } else {
-                       DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
-                                     block_size);
-               }
-       }
-}
-
 static const struct child_device_config *
 child_device_ptr(const struct bdb_general_definitions *defs, int i)
 {
@@ -459,41 +438,24 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
 }
 
 static void
-parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
-                         const struct bdb_header *bdb)
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
 {
        struct sdvo_device_mapping *mapping;
-       const struct bdb_general_definitions *defs;
        const struct child_device_config *child;
-       int i, child_device_num, count;
-       u16     block_size;
-
-       defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
-       if (!defs) {
-               DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
-               return;
-       }
+       int i, count = 0;
 
        /*
-        * Only parse SDVO mappings when the general definitions block child
-        * device size matches that of the *legacy* child device config
-        * struct. Thus, SDVO mapping will be skipped for newer VBT.
+        * Only parse SDVO mappings on gens that could have SDVO. This isn't
+        * accurate and doesn't have to be, as long as it's not too strict.
         */
-       if (defs->child_dev_size != LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
-               DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
+       if (!IS_GEN(dev_priv, 3, 7)) {
+               DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
                return;
        }
-       /* get the block size of general definitions */
-       block_size = get_blocksize(defs);
-       /* get the number of child device */
-       child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
-       count = 0;
-       for (i = 0; i < child_device_num; i++) {
-               child = child_device_ptr(defs, i);
-               if (!child->device_type) {
-                       /* skip the device block if device type is invalid */
-                       continue;
-               }
+
+       for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               child = dev_priv->vbt.child_dev + i;
+
                if (child->slave_addr != SLAVE_ADDR1 &&
                    child->slave_addr != SLAVE_ADDR2) {
                        /*
@@ -544,7 +506,6 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                /* No SDVO device info is found */
                DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
        }
-       return;
 }
 
 static void
@@ -1111,7 +1072,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
 }
 
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
-                          const struct bdb_header *bdb)
+                          u8 bdb_version)
 {
        struct child_device_config *it, *child = NULL;
        struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
@@ -1215,7 +1176,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
                sanitize_aux_ch(dev_priv, port);
        }
 
-       if (bdb->version >= 158) {
+       if (bdb_version >= 158) {
                /* The VBT HDMI level shift values match the table we have. */
                hdmi_level_shift = child->hdmi_level_shifter_value;
                DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
@@ -1225,7 +1186,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        }
 
        /* Parse the I_boost config for SKL and above */
-       if (bdb->version >= 196 && child->iboost) {
+       if (bdb_version >= 196 && child->iboost) {
                info->dp_boost_level = translate_iboost(child->dp_iboost_level);
                DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
                              port_name(port), info->dp_boost_level);
@@ -1235,40 +1196,52 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        }
 }
 
-static void parse_ddi_ports(struct drm_i915_private *dev_priv,
-                           const struct bdb_header *bdb)
+static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
 {
        enum port port;
 
-       if (!HAS_DDI(dev_priv))
+       if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                return;
 
        if (!dev_priv->vbt.child_dev_num)
                return;
 
-       if (bdb->version < 155)
+       if (bdb_version < 155)
                return;
 
        for (port = PORT_A; port < I915_MAX_PORTS; port++)
-               parse_ddi_port(dev_priv, port, bdb);
+               parse_ddi_port(dev_priv, port, bdb_version);
 }
 
 static void
-parse_device_mapping(struct drm_i915_private *dev_priv,
-                    const struct bdb_header *bdb)
+parse_general_definitions(struct drm_i915_private *dev_priv,
+                         const struct bdb_header *bdb)
 {
        const struct bdb_general_definitions *defs;
        const struct child_device_config *child;
-       struct child_device_config *child_dev_ptr;
        int i, child_device_num, count;
        u8 expected_size;
        u16 block_size;
+       int bus_pin;
 
        defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!defs) {
                DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
                return;
        }
+
+       block_size = get_blocksize(defs);
+       if (block_size < sizeof(*defs)) {
+               DRM_DEBUG_KMS("General definitions block too small (%u)\n",
+                             block_size);
+               return;
+       }
+
+       bus_pin = defs->crt_ddc_gmbus_pin;
+       DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+       if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
+               dev_priv->vbt.crt_ddc_pin = bus_pin;
+
        if (bdb->version < 106) {
                expected_size = 22;
        } else if (bdb->version < 111) {
@@ -1298,18 +1271,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
                return;
        }
 
-       /* get the block size of general definitions */
-       block_size = get_blocksize(defs);
        /* get the number of child device */
        child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
        count = 0;
        /* get the number of child device that is present */
        for (i = 0; i < child_device_num; i++) {
                child = child_device_ptr(defs, i);
-               if (!child->device_type) {
-                       /* skip the device block if device type is invalid */
+               if (!child->device_type)
                        continue;
-               }
                count++;
        }
        if (!count) {
@@ -1326,36 +1295,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        count = 0;
        for (i = 0; i < child_device_num; i++) {
                child = child_device_ptr(defs, i);
-               if (!child->device_type) {
-                       /* skip the device block if device type is invalid */
+               if (!child->device_type)
                        continue;
-               }
-
-               child_dev_ptr = dev_priv->vbt.child_dev + count;
-               count++;
 
                /*
                 * Copy as much as we know (sizeof) and is available
                 * (child_dev_size) of the child device. Accessing the data must
                 * depend on VBT version.
                 */
-               memcpy(child_dev_ptr, child,
+               memcpy(dev_priv->vbt.child_dev + count, child,
                       min_t(size_t, defs->child_dev_size, sizeof(*child)));
-
-               /*
-                * copied full block, now init values when they are not
-                * available in current version
-                */
-               if (bdb->version < 196) {
-                       /* Set default values for bits added from v196 */
-                       child_dev_ptr->iboost = 0;
-                       child_dev_ptr->hpd_invert = 0;
-               }
-
-               if (bdb->version < 192)
-                       child_dev_ptr->lspcon = 0;
+               count++;
        }
-       return;
 }
 
 /* Common defaults which may be overridden by VBT. */
@@ -1536,14 +1487,15 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
        parse_lfp_panel_data(dev_priv, bdb);
        parse_lfp_backlight(dev_priv, bdb);
        parse_sdvo_panel_data(dev_priv, bdb);
-       parse_sdvo_device_mapping(dev_priv, bdb);
-       parse_device_mapping(dev_priv, bdb);
        parse_driver_features(dev_priv, bdb);
        parse_edp(dev_priv, bdb);
        parse_psr(dev_priv, bdb);
        parse_mipi_config(dev_priv, bdb);
        parse_mipi_sequence(dev_priv, bdb);
-       parse_ddi_ports(dev_priv, bdb);
+
+       /* Further processing on pre-parsed data */
+       parse_sdvo_device_mapping(dev_priv, bdb->version);
+       parse_ddi_ports(dev_priv, bdb->version);
 
 out:
        if (!vbt) {
index 87fc42b19336240ebec65ba6c2db1bc9d82c3287..b2a6d62b71c049d27bd6664aab8083c80ae92265 100644 (file)
@@ -503,7 +503,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
        else
                cmd = 0;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
        val &= ~DSPFREQGUAR_MASK;
        val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -513,7 +513,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
                     50)) {
                DRM_ERROR("timed out waiting for CDclk change\n");
        }
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        mutex_lock(&dev_priv->sb_lock);
 
@@ -590,7 +590,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
         */
        cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
        val &= ~DSPFREQGUAR_MASK_CHV;
        val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
@@ -600,7 +600,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
                     50)) {
                DRM_ERROR("timed out waiting for CDclk change\n");
        }
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        intel_update_cdclk(dev_priv);
 
@@ -656,10 +656,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
                 "trying to change cdclk frequency with cdclk not enabled\n"))
                return;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        ret = sandybridge_pcode_write(dev_priv,
                                      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
        if (ret) {
                DRM_ERROR("failed to inform pcode about cdclk change\n");
                return;
@@ -712,9 +712,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
                        LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
                DRM_ERROR("Switching back to LCPLL failed\n");
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
 
@@ -928,12 +928,12 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
 
        WARN_ON((cdclk == 24000) != (vco == 0));
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
                                SKL_CDCLK_PREPARE_FOR_CHANGE,
                                SKL_CDCLK_READY_FOR_CHANGE,
                                SKL_CDCLK_READY_FOR_CHANGE, 3);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
        if (ret) {
                DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
                          ret);
@@ -975,9 +975,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
        POSTING_READ(CDCLK_CTL);
 
        /* inform PCU of the change */
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        intel_update_cdclk(dev_priv);
 }
@@ -1268,10 +1268,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
        }
 
        /* Inform power controller of upcoming frequency change */
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
                                      0x80000000);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        if (ret) {
                DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
@@ -1300,10 +1300,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
                val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
        I915_WRITE(CDCLK_CTL, val);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
                                      DIV_ROUND_UP(cdclk, 25000));
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        if (ret) {
                DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
@@ -1518,12 +1518,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
        u32 val, divider, pcu_ack;
        int ret;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
                                SKL_CDCLK_PREPARE_FOR_CHANGE,
                                SKL_CDCLK_READY_FOR_CHANGE,
                                SKL_CDCLK_READY_FOR_CHANGE, 3);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
        if (ret) {
                DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
                          ret);
@@ -1575,9 +1575,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
        I915_WRITE(CDCLK_CTL, val);
 
        /* inform PCU of the change */
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
        sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        intel_update_cdclk(dev_priv);
 }
index ff9ecd211abbb07fd8bae3fb6d55a8c33b23770d..b8315bca852b56061ff4cf598245f7902722c09f 100644 (file)
@@ -74,7 +74,7 @@
 #define I9XX_CSC_COEFF_1_0             \
        ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
 
-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
 {
        return !state->degamma_lut &&
                !state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
        }
 
        mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
-       if (!crtc_state_is_legacy(state)) {
+       if (!crtc_state_is_legacy_gamma(state)) {
                mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
                        (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
        }
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
        struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
        enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                haswell_load_luts(state);
                return;
        }
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
 
        glk_load_degamma_lut(state);
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                haswell_load_luts(state);
                return;
        }
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
        uint32_t i, lut_size;
        uint32_t word0, word1;
 
-       if (crtc_state_is_legacy(state)) {
+       if (crtc_state_is_legacy_gamma(state)) {
                /* Turn off degamma/gamma on CGM block. */
                I915_WRITE(CGM_PIPE_MODE(pipe),
                           (state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
                return 0;
 
        /*
-        * We also allow no degamma lut and a gamma lut at the legacy
+        * We also allow no degamma lut/ctm and a gamma lut at the legacy
         * size (256 entries).
         */
-       if (!crtc_state->degamma_lut &&
-           crtc_state->gamma_lut &&
-           crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+       if (crtc_state_is_legacy_gamma(crtc_state))
                return 0;
 
        return -EINVAL;
index 954070255b4dc52214af7afb8927d6e590d061a7..668e8c3e791dbf9afc925d84e258781a8422bf98 100644 (file)
@@ -213,6 +213,19 @@ static void pch_post_disable_crt(struct intel_encoder *encoder,
        intel_disable_crt(encoder, old_crtc_state, old_conn_state);
 }
 
+static void hsw_disable_crt(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *old_crtc_state,
+                           const struct drm_connector_state *old_conn_state)
+{
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+       intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
 static void hsw_post_disable_crt(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
                                 const struct drm_connector_state *old_conn_state)
@@ -225,6 +238,58 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
        lpt_disable_iclkip(dev_priv);
 
        intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+
+       WARN_ON(!old_crtc_state->has_pch_encoder);
+
+       intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+}
+
+static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+                                  const struct intel_crtc_state *pipe_config,
+                                  const struct drm_connector_state *conn_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+       intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
+static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *pipe_config,
+                              const struct drm_connector_state *conn_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+       dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
+}
+
+static void hsw_enable_crt(struct intel_encoder *encoder,
+                          const struct intel_crtc_state *pipe_config,
+                          const struct drm_connector_state *conn_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+       intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+
+       intel_wait_for_vblank(dev_priv, pipe);
+       intel_wait_for_vblank(dev_priv, pipe);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 }
 
 static void intel_enable_crt(struct intel_encoder *encoder,
@@ -890,26 +955,31 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 
        crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
 
-       crt->base.compute_config = intel_crt_compute_config;
-       if (HAS_PCH_SPLIT(dev_priv)) {
-               crt->base.disable = pch_disable_crt;
-               crt->base.post_disable = pch_post_disable_crt;
-       } else {
-               crt->base.disable = intel_disable_crt;
-       }
-       crt->base.enable = intel_enable_crt;
        if (I915_HAS_HOTPLUG(dev_priv) &&
            !dmi_check_system(intel_spurious_crt_detect))
                crt->base.hpd_pin = HPD_CRT;
+
+       crt->base.compute_config = intel_crt_compute_config;
        if (HAS_DDI(dev_priv)) {
                crt->base.port = PORT_E;
                crt->base.get_config = hsw_crt_get_config;
                crt->base.get_hw_state = intel_ddi_get_hw_state;
+               crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
+               crt->base.pre_enable = hsw_pre_enable_crt;
+               crt->base.enable = hsw_enable_crt;
+               crt->base.disable = hsw_disable_crt;
                crt->base.post_disable = hsw_post_disable_crt;
        } else {
+               if (HAS_PCH_SPLIT(dev_priv)) {
+                       crt->base.disable = pch_disable_crt;
+                       crt->base.post_disable = pch_post_disable_crt;
+               } else {
+                       crt->base.disable = intel_disable_crt;
+               }
                crt->base.port = PORT_NONE;
                crt->base.get_config = intel_crt_get_config;
                crt->base.get_hw_state = intel_crt_get_hw_state;
+               crt->base.enable = intel_enable_crt;
        }
        intel_connector->get_hw_state = intel_connector_get_hw_state;
 
index cdfb624eb82d9e06898e88dff3b13a6b769d65f9..ea5d5c9645a4999c5a524b511b8eb78c6831320c 100644 (file)
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 
        mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
 
-       if (IS_BROXTON(dev_priv))
+       if (IS_GEN9_LP(dev_priv))
                mask |= DC_STATE_DEBUG_MASK_CORES;
 
        /* The below bit doesn't need to be cleared ever afterwards */
index 93cbbcbbc19315b4bd5447827c80b0f685af24a3..b307b6fe1ce385300627f080f773b4d9294a360c 100644 (file)
@@ -602,8 +602,10 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
        } else if (voltage == VOLTAGE_INFO_1_05V) {
                *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
                return cnl_ddi_translations_hdmi_1_05V;
-       } else
+       } else {
+               *n_entries = 1; /* shut up gcc */
                MISSING_CASE(voltage);
+       }
        return NULL;
 }
 
@@ -621,8 +623,10 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
        } else if (voltage == VOLTAGE_INFO_1_05V) {
                *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
                return cnl_ddi_translations_dp_1_05V;
-       } else
+       } else {
+               *n_entries = 1; /* shut up gcc */
                MISSING_CASE(voltage);
+       }
        return NULL;
 }
 
@@ -641,8 +645,10 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
                } else if (voltage == VOLTAGE_INFO_1_05V) {
                        *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
                        return cnl_ddi_translations_edp_1_05V;
-               } else
+               } else {
+                       *n_entries = 1; /* shut up gcc */
                        MISSING_CASE(voltage);
+               }
                return NULL;
        } else {
                return cnl_get_buf_trans_dp(dev_priv, n_entries);
@@ -1214,6 +1220,9 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
        dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
                      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
 
+       if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+               return 0;
+
        return dco_freq / (p0 * p1 * p2 * 5);
 }
 
@@ -1713,7 +1722,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 out:
        if (ret && IS_GEN9_LP(dev_priv)) {
                tmp = I915_READ(BXT_PHY_CTL(port));
-               if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+               if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                           BXT_PHY_LANE_POWERDOWN_ACK |
                            BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
                        DRM_ERROR("Port %c enabled but PHY powered down? "
                                  "(PHY_CTL %08x)\n", port_name(port), tmp);
@@ -2161,7 +2171,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
                intel_prepare_dp_ddi_buffers(encoder);
 
        intel_ddi_init_dp_buf_reg(encoder);
-       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       if (!link_mst)
+               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
@@ -2205,8 +2216,16 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *pipe_config,
                                 const struct drm_connector_state *conn_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
        int type = encoder->type;
 
+       WARN_ON(intel_crtc->config->has_pch_encoder);
+
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
        if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
                intel_ddi_pre_enable_dp(encoder,
                                        pipe_config->port_clock,
@@ -2235,12 +2254,21 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
        uint32_t val;
        bool wait = false;
 
-       /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
-
        if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
+               /*
+                * old_crtc_state and old_conn_state are NULL when called from
+                * DP_MST. The main connector associated with this port is never
+                * bound to a crtc for MST.
+                */
+               bool is_mst = !old_crtc_state;
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+               /*
+                * Power down sink before disabling the port, otherwise we end
+                * up getting interrupts from the sink on detecting link loss.
+                */
+               if (!is_mst)
+                       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        }
 
        val = I915_READ(DDI_BUF_CTL(port));
index 615c58e48613ba3340f062cb33668a5d9bddac6e..7e91dc9a0fcf958293c418959694a6e250740da3 100644 (file)
@@ -1539,7 +1539,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
                 * DPLLCMD is AWOL. Use chicken bits to propagate
                 * the value from DPLLBMD to either pipe B or C.
                 */
-               I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+               I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
                I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
                I915_WRITE(CBR4_VLV, 0);
                dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
@@ -1568,11 +1568,12 @@ static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
        return count;
 }
 
-static void i9xx_enable_pll(struct intel_crtc *crtc)
+static void i9xx_enable_pll(struct intel_crtc *crtc,
+                           const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        i915_reg_t reg = DPLL(crtc->pipe);
-       u32 dpll = crtc->config->dpll_hw_state.dpll;
+       u32 dpll = crtc_state->dpll_hw_state.dpll;
        int i;
 
        assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1609,7 +1610,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
 
        if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE(DPLL_MD(crtc->pipe),
-                          crtc->config->dpll_hw_state.dpll_md);
+                          crtc_state->dpll_hw_state.dpll_md);
        } else {
                /* The pixel multiplier can only be updated once the
                 * DPLL is enabled and the clocks are stable.
@@ -1627,15 +1628,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
        }
 }
 
-/**
- * i9xx_disable_pll - disable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to disable
- *
- * Disable the PLL for @pipe, making sure the pipe is off first.
- *
- * Note!  This is for pre-ILK only.
- */
 static void i9xx_disable_pll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2219,8 +2211,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
                 * something and try to run the system in a "less than optimal"
                 * mode that matches the user configuration.
                 */
-               if (i915_vma_get_fence(vma) == 0)
-                       i915_vma_pin_fence(vma);
+               i915_vma_pin_fence(vma);
        }
 
        i915_vma_get(vma);
@@ -4955,10 +4946,10 @@ void hsw_enable_ips(struct intel_crtc *crtc)
 
        assert_plane_enabled(dev_priv, crtc->plane);
        if (IS_BROADWELL(dev_priv)) {
-               mutex_lock(&dev_priv->rps.hw_lock);
+               mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
                                                IPS_ENABLE | IPS_PCODE_CONTROL));
-               mutex_unlock(&dev_priv->rps.hw_lock);
+               mutex_unlock(&dev_priv->pcu_lock);
                /* Quoting Art Runyan: "its not safe to expect any particular
                 * value in IPS_CTL bit 31 after enabling IPS through the
                 * mailbox." Moreover, the mailbox may return a bogus state,
@@ -4988,9 +4979,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
 
        assert_plane_enabled(dev_priv, crtc->plane);
        if (IS_BROADWELL(dev_priv)) {
-               mutex_lock(&dev_priv->rps.hw_lock);
+               mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
-               mutex_unlock(&dev_priv->rps.hw_lock);
+               mutex_unlock(&dev_priv->pcu_lock);
                /* wait for pcode to finish disabling IPS, which may take up to 42ms */
                if (intel_wait_for_register(dev_priv,
                                            IPS_CTL, IPS_ENABLE, 0,
@@ -5459,6 +5450,20 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
        return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
 }
 
+static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
+                                           enum pipe pipe, bool apply)
+{
+       u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+       u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+       if (apply)
+               val |= mask;
+       else
+               val &= ~mask;
+
+       I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+}
+
 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
                                struct drm_atomic_state *old_state)
 {
@@ -5469,13 +5474,11 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
+       bool psl_clkgate_wa;
 
        if (WARN_ON(intel_crtc->active))
                return;
 
-       if (intel_crtc->config->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
        intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->shared_dpll)
@@ -5509,19 +5512,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_crtc->active = true;
 
-       if (intel_crtc->config->has_pch_encoder)
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-       else
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-       if (intel_crtc->config->has_pch_encoder)
-               dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
-
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_enable_pipe_clock(pipe_config);
 
+       /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
+       psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+                        intel_crtc->config->pch_pfit.enabled;
+       if (psl_clkgate_wa)
+               glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_pfit_enable(intel_crtc);
        else
@@ -5555,11 +5556,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_encoders_enable(crtc, pipe_config, old_state);
 
-       if (intel_crtc->config->has_pch_encoder) {
+       if (psl_clkgate_wa) {
                intel_wait_for_vblank(dev_priv, pipe);
-               intel_wait_for_vblank(dev_priv, pipe);
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+               glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
        }
 
        /* If we change the relative order between pipe/planes enabling, we need
@@ -5655,9 +5654,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
-       if (intel_crtc->config->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
        intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
@@ -5682,9 +5678,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
                intel_ddi_disable_pipe_clock(intel_crtc->config);
 
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
-       if (old_crtc_state->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5894,7 +5887,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
-       i9xx_enable_pll(intel_crtc);
+       i9xx_enable_pll(intel_crtc, pipe_config);
 
        i9xx_pfit_enable(intel_crtc);
 
@@ -8846,11 +8839,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 {
        if (IS_HASWELL(dev_priv)) {
-               mutex_lock(&dev_priv->rps.hw_lock);
+               mutex_lock(&dev_priv->pcu_lock);
                if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
                                            val))
                        DRM_DEBUG_KMS("Failed to write to D_COMP\n");
-               mutex_unlock(&dev_priv->rps.hw_lock);
+               mutex_unlock(&dev_priv->pcu_lock);
        } else {
                I915_WRITE(D_COMP_BDW, val);
                POSTING_READ(D_COMP_BDW);
@@ -10245,58 +10238,44 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
                                         &pipe_config->fdi_m_n);
 }
 
-/** Returns the currently programmed mode of the given pipe. */
-struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-                                            struct drm_crtc *crtc)
+/* Returns the currently programmed mode of the given encoder. */
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc_state *crtc_state;
        struct drm_display_mode *mode;
-       struct intel_crtc_state *pipe_config;
-       int htot = I915_READ(HTOTAL(cpu_transcoder));
-       int hsync = I915_READ(HSYNC(cpu_transcoder));
-       int vtot = I915_READ(VTOTAL(cpu_transcoder));
-       int vsync = I915_READ(VSYNC(cpu_transcoder));
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc;
+       enum pipe pipe;
+
+       if (!encoder->get_hw_state(encoder, &pipe))
+               return NULL;
+
+       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
        if (!mode)
                return NULL;
 
-       pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
-       if (!pipe_config) {
+       crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+       if (!crtc_state) {
                kfree(mode);
                return NULL;
        }
 
-       /*
-        * Construct a pipe_config sufficient for getting the clock info
-        * back out of crtc_clock_get.
-        *
-        * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
-        * to use a real value here instead.
-        */
-       pipe_config->cpu_transcoder = (enum transcoder) pipe;
-       pipe_config->pixel_multiplier = 1;
-       pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
-       pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
-       pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
-       i9xx_crtc_clock_get(intel_crtc, pipe_config);
-
-       mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
-       mode->hdisplay = (htot & 0xffff) + 1;
-       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
-       mode->hsync_start = (hsync & 0xffff) + 1;
-       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
-       mode->vdisplay = (vtot & 0xffff) + 1;
-       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
-       mode->vsync_start = (vsync & 0xffff) + 1;
-       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+       crtc_state->base.crtc = &crtc->base;
 
-       drm_mode_set_name(mode);
+       if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
+               kfree(crtc_state);
+               kfree(mode);
+               return NULL;
+       }
 
-       kfree(pipe_config);
+       encoder->get_config(encoder, crtc_state);
+
+       intel_mode_from_pipe_config(mode, crtc_state);
+
+       kfree(crtc_state);
 
        return mode;
 }
@@ -11336,6 +11315,18 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
        PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
        PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+       PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
+       PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
 
        PIPE_CONF_CHECK_X(dsi_pll.ctrl);
        PIPE_CONF_CHECK_X(dsi_pll.div);
@@ -12220,7 +12211,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
                        if (updated & cmask || !cstate->base.active)
                                continue;
 
-                       if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
+                       if (skl_ddb_allocation_overlaps(dev_priv,
+                                                       entries,
+                                                       &cstate->wm.skl.ddb,
+                                                       i))
                                continue;
 
                        updated |= cmask;
@@ -12515,21 +12509,10 @@ static int intel_atomic_commit(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
-       ret = drm_atomic_helper_setup_commit(state, nonblock);
-       if (ret)
-               return ret;
-
        drm_atomic_state_get(state);
        i915_sw_fence_init(&intel_state->commit_ready,
                           intel_atomic_commit_ready);
 
-       ret = intel_atomic_prepare_commit(dev, state);
-       if (ret) {
-               DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
-               i915_sw_fence_commit(&intel_state->commit_ready);
-               return ret;
-       }
-
        /*
         * The intel_legacy_cursor_update() fast path takes care
         * of avoiding the vblank waits for simple cursor
@@ -12538,19 +12521,37 @@ static int intel_atomic_commit(struct drm_device *dev,
         * updates happen during the correct frames. Gen9+ have
         * double buffered watermarks and so shouldn't need this.
         *
-        * Do this after drm_atomic_helper_setup_commit() and
-        * intel_atomic_prepare_commit() because we still want
-        * to skip the flip and fb cleanup waits. Although that
-        * does risk yanking the mapping from under the display
-        * engine.
+        * Unset state->legacy_cursor_update before the call to
+        * drm_atomic_helper_setup_commit() because otherwise
+        * drm_atomic_helper_wait_for_flip_done() is a noop and
+        * we get FIFO underruns because we didn't wait
+        * for vblank.
         *
         * FIXME doing watermarks and fb cleanup from a vblank worker
         * (assuming we had any) would solve these problems.
         */
-       if (INTEL_GEN(dev_priv) < 9)
-               state->legacy_cursor_update = false;
+       if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+               struct intel_crtc_state *new_crtc_state;
+               struct intel_crtc *crtc;
+               int i;
+
+               for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+                       if (new_crtc_state->wm.need_postvbl_update ||
+                           new_crtc_state->update_wm_post)
+                               state->legacy_cursor_update = false;
+       }
+
+       ret = intel_atomic_prepare_commit(dev, state);
+       if (ret) {
+               DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+               i915_sw_fence_commit(&intel_state->commit_ready);
+               return ret;
+       }
+
+       ret = drm_atomic_helper_setup_commit(state, nonblock);
+       if (!ret)
+               ret = drm_atomic_helper_swap_state(state, true);
 
-       ret = drm_atomic_helper_swap_state(state, true);
        if (ret) {
                i915_sw_fence_commit(&intel_state->commit_ready);
 
@@ -14748,10 +14749,10 @@ static struct intel_connector *intel_encoder_find_connector(struct intel_encoder
 }
 
 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
-                             enum transcoder pch_transcoder)
+                             enum pipe pch_transcoder)
 {
        return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
-               (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
+               (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
 }
 
 static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -14834,7 +14835,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
                 * PCH transcoders B and C would prevent enabling the south
                 * error interrupt (see cpt_can_enable_serr_int()).
                 */
-               if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
+               if (has_pch_trancoder(dev_priv, crtc->pipe))
                        crtc->pch_fifo_underrun_disabled = true;
        }
 }
index 90e756c76f109f94cbb028c8fff2bdc6a110c21c..b0f446b68f42309c4b52e8cc97e8f8e41f757a6e 100644 (file)
@@ -137,32 +137,20 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe);
 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
-static int intel_dp_num_rates(u8 link_bw_code)
-{
-       switch (link_bw_code) {
-       default:
-               WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
-                    link_bw_code);
-       case DP_LINK_BW_1_62:
-               return 1;
-       case DP_LINK_BW_2_7:
-               return 2;
-       case DP_LINK_BW_5_4:
-               return 3;
-       }
-}
-
 /* update sink rates from dpcd */
 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 {
-       int i, num_rates;
+       int i, max_rate;
 
-       num_rates = intel_dp_num_rates(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+       max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 
-       for (i = 0; i < num_rates; i++)
+       for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
+               if (default_rates[i] > max_rate)
+                       break;
                intel_dp->sink_rates[i] = default_rates[i];
+       }
 
-       intel_dp->num_sink_rates = num_rates;
+       intel_dp->num_sink_rates = i;
 }
 
 /* Theoretical max between source and sink */
@@ -254,15 +242,15 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        } else if (IS_GEN9_BC(dev_priv)) {
                source_rates = skl_rates;
                size = ARRAY_SIZE(skl_rates);
-       } else {
+       } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+                  IS_BROADWELL(dev_priv)) {
                source_rates = default_rates;
                size = ARRAY_SIZE(default_rates);
+       } else {
+               source_rates = default_rates;
+               size = ARRAY_SIZE(default_rates) - 1;
        }
 
-       /* This depends on the fact that 5.4 is last value in the array */
-       if (!intel_dp_source_supports_hbr2(intel_dp))
-               size--;
-
        intel_dp->source_rates = source_rates;
        intel_dp->num_source_rates = size;
 }
@@ -1482,14 +1470,9 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
 
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+       int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
 
-       if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
-           IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
-               return true;
-       else
-               return false;
+       return max_rate >= 540000;
 }
 
 static void
@@ -2308,8 +2291,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
-       intel_dp->panel_power_off_time = ktime_get_boottime();
        wait_panel_off(intel_dp);
+       intel_dp->panel_power_off_time = ktime_get_boottime();
 
        /* We got a reference when we enabled the VDD. */
        intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5286,7 +5269,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
         * seems sufficient to avoid this problem.
         */
        if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
-               vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
+               vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
                DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
                              vbt.t11_t12);
        }
index 9a396f483f8beeb468c7ccbd3d0b8edf962b0b9c..3c131e2544cfae81e41f80caca54db478d3bf944 100644 (file)
@@ -162,14 +162,19 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 
        drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
 
+       /*
+        * Power down mst path before disabling the port, otherwise we end
+        * up getting interrupts from the sink upon detecting link loss.
+        */
+       drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+                                    false);
+
        intel_dp->active_mst_links--;
 
        intel_mst->connector = NULL;
        if (intel_dp->active_mst_links == 0) {
                intel_dig_port->base.post_disable(&intel_dig_port->base,
                                                  NULL, NULL);
-
-               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        }
        DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 }
@@ -196,6 +201,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
+       drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
        if (intel_dp->active_mst_links == 0)
                intel_dig_port->base.pre_enable(&intel_dig_port->base,
                                                pipe_config, NULL);
index 09b6709297867d6751957dc12541812fe3f30344..de38d014ed39b937abc348988830a2f188815901 100644 (file)
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
        },
 };
 
-static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
-{
-       return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
-               BIT(phy_info->channel[DPIO_CH0].port);
-}
-
 static const struct bxt_ddi_phy_info *
 bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
 {
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                            enum dpio_phy phy)
 {
        const struct bxt_ddi_phy_info *phy_info;
-       enum port port;
 
        phy_info = bxt_get_phy_info(dev_priv, phy);
 
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
                return false;
        }
 
-       for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
-               u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
-               if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
-                       DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
-                                        "for port %c powered down "
-                                        "(PHY_CTL %08x)\n",
-                                        phy, port_name(port), tmp);
-
-                       return false;
-               }
-       }
-
        return true;
 }
 
index 0cab667fff575f76f3aaa67030d4a7be6324a2ed..b87946dcc53f95027b0706f915fdbedcff2575dd 100644 (file)
@@ -1243,7 +1243,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
 static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
                                            u32 mask)
 {
-       return mask & ~i915->rps.pm_intrmsk_mbz;
+       return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
 }
 
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
@@ -1254,7 +1254,7 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
         * We only use drm_irq_uninstall() at unload and VT switch, so
         * this is the only thing we need to check.
         */
-       return dev_priv->pm.irqs_enabled;
+       return dev_priv->runtime_pm.irqs_enabled;
 }
 
 int intel_get_crtc_scanline(struct intel_crtc *crtc);
@@ -1363,8 +1363,9 @@ struct intel_connector *intel_connector_alloc(void);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
 void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder);
-struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-                                            struct drm_crtc *crtc);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
@@ -1790,7 +1791,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 static inline void
 assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
 {
-       WARN_ONCE(dev_priv->pm.suspended,
+       WARN_ONCE(dev_priv->runtime_pm.suspended,
                  "Device suspended during HW access\n");
 }
 
@@ -1798,7 +1799,7 @@ static inline void
 assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
 {
        assert_rpm_device_not_suspended(dev_priv);
-       WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count),
+       WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
                  "RPM wakelock ref not held during HW access");
 }
 
@@ -1823,7 +1824,7 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
 static inline void
 disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
 {
-       atomic_inc(&dev_priv->pm.wakeref_count);
+       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
 }
 
 /**
@@ -1840,7 +1841,7 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
 static inline void
 enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
 {
-       atomic_dec(&dev_priv->pm.wakeref_count);
+       atomic_dec(&dev_priv->runtime_pm.wakeref_count);
 }
 
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
@@ -1893,7 +1894,8 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
 int intel_disable_sagv(struct drm_i915_private *dev_priv);
 bool skl_wm_level_equals(const struct skl_wm_level *l1,
                         const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
+                                const struct skl_ddb_entry **entries,
                                 const struct skl_ddb_entry *ddb,
                                 int ignore);
 bool ilk_disable_lp_wm(struct drm_device *dev);
@@ -1902,7 +1904,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
                                  struct intel_crtc_state *cstate);
 void intel_init_ipc(struct drm_i915_private *dev_priv);
 void intel_enable_ipc(struct drm_i915_private *dev_priv);
-static inline int intel_enable_rc6(void)
+static inline int intel_rc6_enabled(void)
 {
        return i915_modparams.enable_rc6;
 }
index 20a7b004ffd70fda9b7bb25cb9623fad93a4b35d..66bbedc5fa01d1e79e6869e8cdfdd8bd4b734e34 100644 (file)
@@ -790,14 +790,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *pipe_config,
                                 const struct drm_connector_state *conn_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct drm_crtc *crtc = pipe_config->base.crtc;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
        enum port port;
        u32 val;
        bool glk_cold_boot = false;
 
        DRM_DEBUG_KMS("\n");
 
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
        /*
         * The BIOS may leave the PLL in a wonky state where it doesn't
         * lock. It needs to be fully powered down to fix it.
index 5c562e1f56ae57b5c628cef0a96318eb71ceda31..53c9b763f4ce24ce562e6436e0cf480e62311dc5 100644 (file)
@@ -379,32 +379,15 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
  * chip being on DVOB/C and having multiple pipes.
  */
 static struct drm_display_mode *
-intel_dvo_get_current_mode(struct drm_connector *connector)
+intel_dvo_get_current_mode(struct intel_encoder *encoder)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
-       uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
-       struct drm_display_mode *mode = NULL;
+       struct drm_display_mode *mode;
 
-       /* If the DVO port is active, that'll be the LVDS, so we can pull out
-        * its timings to get how the BIOS set up the panel.
-        */
-       if (dvo_val & DVO_ENABLE) {
-               struct intel_crtc *crtc;
-               int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
-
-               crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-               if (crtc) {
-                       mode = intel_crtc_mode_get(dev, &crtc->base);
-                       if (mode) {
-                               mode->type |= DRM_MODE_TYPE_PREFERRED;
-                               if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
-                                       mode->flags |= DRM_MODE_FLAG_PHSYNC;
-                               if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
-                                       mode->flags |= DRM_MODE_FLAG_PVSYNC;
-                       }
-               }
+       mode = intel_encoder_current_mode(encoder);
+       if (mode) {
+               DRM_DEBUG_KMS("using current (BIOS) mode: ");
+               drm_mode_debug_printmodeline(mode);
+               mode->type |= DRM_MODE_TYPE_PREFERRED;
        }
 
        return mode;
@@ -551,7 +534,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
                         * mode being output through DVO.
                         */
                        intel_panel_init(&intel_connector->panel,
-                                        intel_dvo_get_current_mode(connector),
+                                        intel_dvo_get_current_mode(intel_encoder),
                                         NULL, NULL);
                        intel_dvo->panel_wants_dither = true;
                }
index a28e2a864cf1bbfe3d1482ffd65ecdf993efec12..a59b2a30ff5ab9b99998d12d27792c4aa15defad 100644 (file)
@@ -22,6 +22,8 @@
  *
  */
 
+#include <drm/drm_print.h>
+
 #include "i915_drv.h"
 #include "intel_ringbuffer.h"
 #include "intel_lrc.h"
@@ -39,7 +41,7 @@
 
 #define GEN8_LR_CONTEXT_RENDER_SIZE    (20 * PAGE_SIZE)
 #define GEN9_LR_CONTEXT_RENDER_SIZE    (22 * PAGE_SIZE)
-#define GEN10_LR_CONTEXT_RENDER_SIZE   (19 * PAGE_SIZE)
+#define GEN10_LR_CONTEXT_RENDER_SIZE   (18 * PAGE_SIZE)
 
 #define GEN8_LR_CONTEXT_OTHER_SIZE     ( 2 * PAGE_SIZE)
 
@@ -613,9 +615,22 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        if (IS_ERR(ring))
                return PTR_ERR(ring);
 
+       /*
+        * Similarly the preempt context must always be available so that
+        * we can interrupt the engine at any time.
+        */
+       if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+               ring = engine->context_pin(engine,
+                                          engine->i915->preempt_context);
+               if (IS_ERR(ring)) {
+                       ret = PTR_ERR(ring);
+                       goto err_unpin_kernel;
+               }
+       }
+
        ret = intel_engine_init_breadcrumbs(engine);
        if (ret)
-               goto err_unpin;
+               goto err_unpin_preempt;
 
        ret = i915_gem_render_state_init(engine);
        if (ret)
@@ -634,7 +649,10 @@ err_rs_fini:
        i915_gem_render_state_fini(engine);
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(engine);
-err_unpin:
+err_unpin_preempt:
+       if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+               engine->context_unpin(engine, engine->i915->preempt_context);
+err_unpin_kernel:
        engine->context_unpin(engine, engine->i915->kernel_context);
        return ret;
 }
@@ -660,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        intel_engine_cleanup_cmd_parser(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
+       if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+               engine->context_unpin(engine, engine->i915->preempt_context);
        engine->context_unpin(engine, engine->i915->kernel_context);
 }
 
@@ -830,11 +850,6 @@ static int wa_add(struct drm_i915_private *dev_priv,
 #define WA_SET_FIELD_MASKED(addr, mask, value) \
        WA_REG(addr, mask, _MASKED_FIELD(mask, value))
 
-#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
-#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
                                 i915_reg_t reg)
 {
@@ -845,8 +860,8 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
        if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
                return -EINVAL;
 
-       WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
-                i915_mmio_reg_offset(reg));
+       I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+                  i915_mmio_reg_offset(reg));
        wa->hw_whitelist_count[engine->id]++;
 
        return 0;
@@ -980,7 +995,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
                                  GEN9_PBE_COMPRESSED_HASH_SELECTION);
                WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
                                  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
-               WA_SET_BIT(MMCD_MISC_CTRL, MMCD_PCLA | MMCD_HOTSPOT_EN);
+
+               I915_WRITE(MMCD_MISC_CTRL,
+                          I915_READ(MMCD_MISC_CTRL) |
+                          MMCD_PCLA |
+                          MMCD_HOTSPOT_EN);
        }
 
        /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
@@ -1071,13 +1090,33 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
+       /*
+        * Supporting preemption with fine-granularity requires changes in the
+        * batch buffer programming. Since we can't break old userspace, we
+        * need to set our default preemption level to safe value. Userspace is
+        * still able to use more fine-grained preemption levels, since in
+        * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
+        * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
+        * not real HW workarounds, but merely a way to start using preemption
+        * while maintaining old contract with userspace.
+        */
+
+       /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
+       WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+       /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
+       WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+                           GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
        /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
        ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
        if (ret)
                return ret;
 
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
-       ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+       ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
        if (ret)
                return ret;
 
@@ -1139,14 +1178,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
-       /*
-        * Actual WA is to disable percontext preemption granularity control
-        * until D0 which is the default case so this is equivalent to
-        * !WaDisablePerCtxtPreemptionGranularityControl:skl
-        */
-       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-
        /* WaEnableGapsTsvCreditFix:skl */
        I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
                                   GEN9_GAPS_TSV_CREDIT_DISABLE));
@@ -1278,7 +1309,16 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
        /* FtrEnableFastAnisoL1BankingFix: cnl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
 
+       /* WaDisable3DMidCmdPreemption:cnl */
+       WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+       /* WaDisableGPGPUMidCmdPreemption:cnl */
+       WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+                           GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
        /* WaEnablePreemptionGranularityControlByUMD:cnl */
+       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
        ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
        if (ret)
                return ret;
@@ -1578,6 +1618,164 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
        }
 }
 
+static void print_request(struct drm_printer *m,
+                         struct drm_i915_gem_request *rq,
+                         const char *prefix)
+{
+       drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
+                  rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+                  rq->priotree.priority,
+                  jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+                  rq->timeline->common->name);
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct i915_gpu_error *error = &engine->i915->gpu_error;
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct drm_i915_gem_request *rq;
+       struct rb_node *rb;
+       u64 addr;
+
+       drm_printf(m, "%s\n", engine->name);
+       drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+                  intel_engine_get_seqno(engine),
+                  intel_engine_last_submit(engine),
+                  engine->hangcheck.seqno,
+                  jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+                  engine->timeline->inflight_seqnos);
+       drm_printf(m, "\tReset count: %d\n",
+                  i915_reset_engine_count(error, engine));
+
+       rcu_read_lock();
+
+       drm_printf(m, "\tRequests:\n");
+
+       rq = list_first_entry(&engine->timeline->requests,
+                             struct drm_i915_gem_request, link);
+       if (&rq->link != &engine->timeline->requests)
+               print_request(m, rq, "\t\tfirst  ");
+
+       rq = list_last_entry(&engine->timeline->requests,
+                            struct drm_i915_gem_request, link);
+       if (&rq->link != &engine->timeline->requests)
+               print_request(m, rq, "\t\tlast   ");
+
+       rq = i915_gem_find_active_request(engine);
+       if (rq) {
+               print_request(m, rq, "\t\tactive ");
+               drm_printf(m,
+                          "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+                          rq->head, rq->postfix, rq->tail,
+                          rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+                          rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+       }
+
+       drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+                  I915_READ(RING_START(engine->mmio_base)),
+                  rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+       drm_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
+                  I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+                  rq ? rq->ring->head : 0);
+       drm_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
+                  I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+                  rq ? rq->ring->tail : 0);
+       drm_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
+                  I915_READ(RING_CTL(engine->mmio_base)),
+                  I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+       rcu_read_unlock();
+
+       addr = intel_engine_get_active_head(engine);
+       drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
+                  upper_32_bits(addr), lower_32_bits(addr));
+       addr = intel_engine_get_last_batch_head(engine);
+       drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+                  upper_32_bits(addr), lower_32_bits(addr));
+
+       if (i915_modparams.enable_execlists) {
+               const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+               struct intel_engine_execlists * const execlists = &engine->execlists;
+               u32 ptr, read, write;
+               unsigned int idx;
+
+               drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
+                          I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+                          I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+               ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+               read = GEN8_CSB_READ_PTR(ptr);
+               write = GEN8_CSB_WRITE_PTR(ptr);
+               drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+                          read, execlists->csb_head,
+                          write,
+                          intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+                          yesno(test_bit(ENGINE_IRQ_EXECLIST,
+                                         &engine->irq_posted)));
+               if (read >= GEN8_CSB_ENTRIES)
+                       read = 0;
+               if (write >= GEN8_CSB_ENTRIES)
+                       write = 0;
+               if (read > write)
+                       write += GEN8_CSB_ENTRIES;
+               while (read < write) {
+                       idx = ++read % GEN8_CSB_ENTRIES;
+                       drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+                                  idx,
+                                  I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+                                  hws[idx * 2],
+                                  I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
+                                  hws[idx * 2 + 1]);
+               }
+
+               rcu_read_lock();
+               for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+                       unsigned int count;
+
+                       rq = port_unpack(&execlists->port[idx], &count);
+                       if (rq) {
+                               drm_printf(m, "\t\tELSP[%d] count=%d, ",
+                                          idx, count);
+                               print_request(m, rq, "rq: ");
+                       } else {
+                               drm_printf(m, "\t\tELSP[%d] idle\n",
+                                          idx);
+                       }
+               }
+               rcu_read_unlock();
+
+               spin_lock_irq(&engine->timeline->lock);
+               for (rb = execlists->first; rb; rb = rb_next(rb)) {
+                       struct i915_priolist *p =
+                               rb_entry(rb, typeof(*p), node);
+
+                       list_for_each_entry(rq, &p->requests,
+                                           priotree.link)
+                               print_request(m, rq, "\t\tQ ");
+               }
+               spin_unlock_irq(&engine->timeline->lock);
+       } else if (INTEL_GEN(dev_priv) > 6) {
+               drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+                          I915_READ(RING_PP_DIR_BASE(engine)));
+               drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+                          I915_READ(RING_PP_DIR_BASE_READ(engine)));
+               drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+                          I915_READ(RING_PP_DIR_DCLV(engine)));
+       }
+
+       spin_lock_irq(&b->rb_lock);
+       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+               struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+
+               drm_printf(m, "\t%s [%d] waiting for %x\n",
+                          w->tsk->comm, w->tsk->pid, w->seqno);
+       }
+       spin_unlock_irq(&b->rb_lock);
+
+       drm_printf(m, "\n");
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
new file mode 100644 (file)
index 0000000..9e18c4f
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_guc.h"
+#include "i915_drv.h"
+
+static void gen8_guc_raise_irq(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+       I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+}
+
+static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
+{
+       GEM_BUG_ON(!guc->send_regs.base);
+       GEM_BUG_ON(!guc->send_regs.count);
+       GEM_BUG_ON(i >= guc->send_regs.count);
+
+       return _MMIO(guc->send_regs.base + 4 * i);
+}
+
+void intel_guc_init_send_regs(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       enum forcewake_domains fw_domains = 0;
+       unsigned int i;
+
+       guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+       guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+
+       for (i = 0; i < guc->send_regs.count; i++) {
+               fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                       guc_send_reg(guc, i),
+                                       FW_REG_READ | FW_REG_WRITE);
+       }
+       guc->send_regs.fw_domains = fw_domains;
+}
+
+void intel_guc_init_early(struct intel_guc *guc)
+{
+       intel_guc_ct_init_early(&guc->ct);
+
+       mutex_init(&guc->send_mutex);
+       guc->send = intel_guc_send_nop;
+       guc->notify = gen8_guc_raise_irq;
+}
+
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
+{
+       WARN(1, "Unexpected send: action=%#x\n", *action);
+       return -ENODEV;
+}
+
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       u32 status;
+       int i;
+       int ret;
+
+       GEM_BUG_ON(!len);
+       GEM_BUG_ON(len > guc->send_regs.count);
+
+       /* If CT is available, we expect to use MMIO only during init/fini */
+       GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
+               *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
+               *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
+
+       mutex_lock(&guc->send_mutex);
+       intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+
+       for (i = 0; i < len; i++)
+               I915_WRITE(guc_send_reg(guc, i), action[i]);
+
+       POSTING_READ(guc_send_reg(guc, i - 1));
+
+       intel_guc_notify(guc);
+
+       /*
+        * No GuC command should ever take longer than 10ms.
+        * Fast commands should still complete in 10us.
+        */
+       ret = __intel_wait_for_register_fw(dev_priv,
+                                          guc_send_reg(guc, 0),
+                                          INTEL_GUC_RECV_MASK,
+                                          INTEL_GUC_RECV_MASK,
+                                          10, 10, &status);
+       if (status != INTEL_GUC_STATUS_SUCCESS) {
+               /*
+                * Either the GuC explicitly returned an error (which
+                * we convert to -EIO here) or no response at all was
+                * received within the timeout limit (-ETIMEDOUT)
+                */
+               if (ret != -ETIMEDOUT)
+                       ret = -EIO;
+
+               DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
+                        " ret=%d status=0x%08X response=0x%08X\n",
+                        action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+       }
+
+       intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+       mutex_unlock(&guc->send_mutex);
+
+       return ret;
+}
+
+int intel_guc_sample_forcewake(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       u32 action[2];
+
+       action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
+       /* WaRsDisableCoarsePowerGating:skl,bxt */
+       if (!intel_rc6_enabled() ||
+           NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+               action[1] = 0;
+       else
+               /* bit 0 and 1 are for Render and Media domain separately */
+               action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
+
+       return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
+ * @guc: intel_guc structure
+ * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
+ *
+ * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
+ * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
+ * intel_huc_auth().
+ *
+ * Return:     non-zero code on error
+ */
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_AUTHENTICATE_HUC,
+               rsa_offset
+       };
+
+       return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @dev_priv:  i915 device private
+ */
+int intel_guc_suspend(struct drm_i915_private *dev_priv)
+{
+       struct intel_guc *guc = &dev_priv->guc;
+       struct i915_gem_context *ctx;
+       u32 data[3];
+
+       if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+               return 0;
+
+       gen9_disable_guc_interrupts(dev_priv);
+
+       ctx = dev_priv->kernel_context;
+
+       data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
+       /* any value greater than GUC_POWER_D0 */
+       data[1] = GUC_POWER_D1;
+       /* first page is shared data with GuC */
+       data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
+                 LRC_GUCSHR_PN * PAGE_SIZE;
+
+       return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_resume() - notify GuC resuming from suspend state
+ * @dev_priv:  i915 device private
+ */
+int intel_guc_resume(struct drm_i915_private *dev_priv)
+{
+       struct intel_guc *guc = &dev_priv->guc;
+       struct i915_gem_context *ctx;
+       u32 data[3];
+
+       if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+               return 0;
+
+       if (i915_modparams.guc_log_level >= 0)
+               gen9_enable_guc_interrupts(dev_priv);
+
+       ctx = dev_priv->kernel_context;
+
+       data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
+       data[1] = GUC_POWER_D0;
+       /* first page is shared data with GuC */
+       data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
+                 LRC_GUCSHR_PN * PAGE_SIZE;
+
+       return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc:       the guc
+ * @size:      size of area to allocate (both virtual space and memory)
+ *
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * range is reserved inside GuC.
+ *
+ * Return:     A i915_vma if successful, otherwise an ERR_PTR.
+ */
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int ret;
+
+       obj = i915_gem_object_create(dev_priv, size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+       if (IS_ERR(vma))
+               goto err;
+
+       ret = i915_vma_pin(vma, 0, PAGE_SIZE,
+                          PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       if (ret) {
+               vma = ERR_PTR(ret);
+               goto err;
+       }
+
+       return vma;
+
+err:
+       i915_gem_object_put(obj);
+       return vma;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
new file mode 100644 (file)
index 0000000..aa9a7b5
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_H_
+#define _INTEL_GUC_H_
+
+#include "intel_uncore.h"
+#include "intel_guc_fwif.h"
+#include "intel_guc_ct.h"
+#include "intel_guc_log.h"
+#include "intel_uc_fw.h"
+#include "i915_guc_reg.h"
+#include "i915_vma.h"
+
+struct intel_guc {
+       struct intel_uc_fw fw;
+       struct intel_guc_log log;
+       struct intel_guc_ct ct;
+
+       /* Log snapshot if GuC errors during load */
+       struct drm_i915_gem_object *load_err_log;
+
+       /* intel_guc_recv interrupt related state */
+       bool interrupts_enabled;
+
+       struct i915_vma *ads_vma;
+       struct i915_vma *stage_desc_pool;
+       void *stage_desc_pool_vaddr;
+       struct ida stage_ids;
+
+       struct i915_guc_client *execbuf_client;
+
+       DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
+       /* Cyclic counter mod pagesize  */
+       u32 db_cacheline;
+
+       /* GuC's FW specific registers used in MMIO send */
+       struct {
+               u32 base;
+               unsigned int count;
+               enum forcewake_domains fw_domains;
+       } send_regs;
+
+       /* To serialize the intel_guc_send actions */
+       struct mutex send_mutex;
+
+       /* GuC's FW specific send function */
+       int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
+
+       /* GuC's FW specific notify function */
+       void (*notify)(struct intel_guc *guc);
+};
+
+static
+inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+       return guc->send(guc, action, len);
+}
+
+static inline void intel_guc_notify(struct intel_guc *guc)
+{
+       guc->notify(guc);
+}
+
+static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+{
+       u32 offset = i915_ggtt_offset(vma);
+
+       GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+       GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
+
+       return offset;
+}
+
+void intel_guc_init_early(struct intel_guc *guc);
+void intel_guc_init_send_regs(struct intel_guc *guc);
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
+int intel_guc_suspend(struct drm_i915_private *dev_priv);
+int intel_guc_resume(struct drm_i915_private *dev_priv);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+
+int intel_guc_select_fw(struct intel_guc *guc);
+int intel_guc_init_hw(struct intel_guc *guc);
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
+
+#endif
index 7eb6b4fa1d6f953de393fe52d3f433aa968c7e7e..1c0a2a3de12111c8aad421c89111f9ed42ea2048 100644 (file)
  */
 
 struct uc_css_header {
-       uint32_t module_type;
+       u32 module_type;
        /* header_size includes all non-uCode bits, including css_header, rsa
         * key, modulus key and exponent data. */
-       uint32_t header_size_dw;
-       uint32_t header_version;
-       uint32_t module_id;
-       uint32_t module_vendor;
+       u32 header_size_dw;
+       u32 header_version;
+       u32 module_id;
+       u32 module_vendor;
        union {
                struct {
-                       uint8_t day;
-                       uint8_t month;
-                       uint16_t year;
+                       u8 day;
+                       u8 month;
+                       u16 year;
                };
-               uint32_t date;
+               u32 date;
        };
-       uint32_t size_dw; /* uCode plus header_size_dw */
-       uint32_t key_size_dw;
-       uint32_t modulus_size_dw;
-       uint32_t exponent_size_dw;
+       u32 size_dw; /* uCode plus header_size_dw */
+       u32 key_size_dw;
+       u32 modulus_size_dw;
+       u32 exponent_size_dw;
        union {
                struct {
-                       uint8_t hour;
-                       uint8_t min;
-                       uint16_t sec;
+                       u8 hour;
+                       u8 min;
+                       u16 sec;
                };
-               uint32_t time;
+               u32 time;
        };
 
        char username[8];
        char buildnumber[12];
        union {
                struct {
-                       uint32_t branch_client_version;
-                       uint32_t sw_version;
+                       u32 branch_client_version;
+                       u32 sw_version;
        } guc;
                struct {
-                       uint32_t sw_version;
-                       uint32_t reserved;
+                       u32 sw_version;
+                       u32 reserved;
        } huc;
        };
-       uint32_t prod_preprod_fw;
-       uint32_t reserved[12];
-       uint32_t header_info;
+       u32 prod_preprod_fw;
+       u32 reserved[12];
+       u32 header_info;
 } __packed;
 
 struct guc_doorbell_info {
index c9e25be4db406be2b4e5a1c17f4c3abe2e7998f7..c7a800a3798dd2c43c721dd9222dfe4ffb744168 100644 (file)
@@ -386,10 +386,7 @@ int intel_guc_select_fw(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-       guc->fw.path = NULL;
-       guc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
-       guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
-       guc->fw.type = INTEL_UC_FW_TYPE_GUC;
+       intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
 
        if (i915_modparams.guc_firmware_path) {
                guc->fw.path = i915_modparams.guc_firmware_path;
index 6571d96704ad0ac904c6479737d5dfba7423094e..76d3eb1e4614123052807b3bf978d716fefb59e9 100644 (file)
  * IN THE SOFTWARE.
  *
  */
+
 #include <linux/debugfs.h>
 #include <linux/relay.h>
+
+#include "intel_guc_log.h"
 #include "i915_drv.h"
 
 static void guc_log_capture_logs(struct intel_guc *guc);
@@ -525,7 +528,8 @@ int intel_guc_log_create(struct intel_guc *guc)
 {
        struct i915_vma *vma;
        unsigned long offset;
-       uint32_t size, flags;
+       u32 flags;
+       u32 size;
        int ret;
 
        GEM_BUG_ON(guc->log.vma);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
new file mode 100644 (file)
index 0000000..f512cf7
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_LOG_H_
+#define _INTEL_GUC_LOG_H_
+
+#include <linux/workqueue.h>
+
+#include "intel_guc_fwif.h"
+
+struct drm_i915_private;
+struct intel_guc;
+
+struct intel_guc_log {
+       u32 flags;
+       struct i915_vma *vma;
+       /* The runtime stuff gets created only when GuC logging gets enabled */
+       struct {
+               void *buf_addr;
+               struct workqueue_struct *flush_wq;
+               struct work_struct flush_work;
+               struct rchan *relay_chan;
+       } runtime;
+       /* logging related stats */
+       u32 capture_miss_count;
+       u32 flush_interrupt_count;
+       u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+       u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+       u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
+int intel_guc_log_create(struct intel_guc *guc);
+void intel_guc_log_destroy(struct intel_guc *guc);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
+void i915_guc_log_register(struct drm_i915_private *dev_priv);
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
+
+#endif
index 8b4b5352542281ef3c78c963cfd39f5dadac8b4f..4b4cf56d29ad0459ee23c1b4ac0344142b308397 100644 (file)
  * IN THE SOFTWARE.
  *
  */
-#include <linux/firmware.h>
+
+#include <linux/types.h>
+
+#include "intel_huc.h"
 #include "i915_drv.h"
-#include "intel_uc.h"
 
 /**
  * DOC: HuC Firmware
@@ -150,10 +152,7 @@ void intel_huc_select_fw(struct intel_huc *huc)
 {
        struct drm_i915_private *dev_priv = huc_to_i915(huc);
 
-       huc->fw.path = NULL;
-       huc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
-       huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
-       huc->fw.type = INTEL_UC_FW_TYPE_HUC;
+       intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
 
        if (i915_modparams.huc_firmware_path) {
                huc->fw.path = i915_modparams.huc_firmware_path;
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
new file mode 100644 (file)
index 0000000..aaa38b9
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_HUC_H_
+#define _INTEL_HUC_H_
+
+#include "intel_uc_fw.h"
+
+struct intel_huc {
+       /* Generic uC firmware management */
+       struct intel_uc_fw fw;
+
+       /* HuC-specific additions */
+};
+
+void intel_huc_select_fw(struct intel_huc *huc);
+void intel_huc_init_hw(struct intel_huc *huc);
+void intel_huc_auth(struct intel_huc *huc);
+
+#endif
index 61cac26a8b05c6865e8fdab2721a0cf14530503a..fbfcf88d7fe31200f9b9dd8d8d8582e5f25f1644 100644 (file)
 
 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
 #define WA_TAIL_DWORDS 2
+#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
+#define PREEMPT_ID 0x1
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                                            struct intel_engine_cs *engine);
@@ -243,8 +244,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
                return 0;
 
        if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
-           USES_PPGTT(dev_priv) &&
-           i915_modparams.use_mmio_flip >= 0)
+           USES_PPGTT(dev_priv))
                return 1;
 
        return 0;
@@ -348,6 +348,43 @@ find_priolist:
        return ptr_pack_bits(p, first, 1);
 }
 
+static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+{
+       rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
+       assert_ring_tail_valid(rq->ring, rq->tail);
+}
+
+static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_request *rq, *rn;
+       struct i915_priolist *uninitialized_var(p);
+       int last_prio = I915_PRIORITY_INVALID;
+
+       lockdep_assert_held(&engine->timeline->lock);
+
+       list_for_each_entry_safe_reverse(rq, rn,
+                                        &engine->timeline->requests,
+                                        link) {
+               if (i915_gem_request_completed(rq))
+                       return;
+
+               __i915_gem_request_unsubmit(rq);
+               unwind_wa_tail(rq);
+
+               GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
+               if (rq->priotree.priority != last_prio) {
+                       p = lookup_priolist(engine,
+                                           &rq->priotree,
+                                           rq->priotree.priority);
+                       p = ptr_mask_bits(p, 1);
+
+                       last_prio = rq->priotree.priority;
+               }
+
+               list_add(&rq->priotree.link, &p->requests);
+       }
+}
+
 static inline void
 execlists_context_status_change(struct drm_i915_gem_request *rq,
                                unsigned long status)
@@ -392,6 +429,12 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
        return ce->lrc_desc;
 }
 
+static inline void elsp_write(u64 desc, u32 __iomem *elsp)
+{
+       writel(upper_32_bits(desc), elsp);
+       writel(lower_32_bits(desc), elsp);
+}
+
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
        struct execlist_port *port = engine->execlists.port;
@@ -417,8 +460,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
                        desc = 0;
                }
 
-               writel(upper_32_bits(desc), elsp);
-               writel(lower_32_bits(desc), elsp);
+               elsp_write(desc, elsp);
        }
 }
 
@@ -451,26 +493,43 @@ static void port_assign(struct execlist_port *port,
        port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
 }
 
+static void inject_preempt_context(struct intel_engine_cs *engine)
+{
+       struct intel_context *ce =
+               &engine->i915->preempt_context->engine[engine->id];
+       u32 __iomem *elsp =
+               engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+       unsigned int n;
+
+       GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
+       GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
+
+       memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
+       ce->ring->tail += WA_TAIL_BYTES;
+       ce->ring->tail &= (ce->ring->size - 1);
+       ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
+
+       for (n = execlists_num_ports(&engine->execlists); --n; )
+               elsp_write(0, elsp);
+
+       elsp_write(ce->lrc_desc, elsp);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+       return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
+}
+
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *last;
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
        const struct execlist_port * const last_port =
                &execlists->port[execlists->port_mask];
+       struct drm_i915_gem_request *last = port_request(port);
        struct rb_node *rb;
        bool submit = false;
 
-       last = port_request(port);
-       if (last)
-               /* WaIdleLiteRestore:bdw,skl
-                * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
-                * as we resubmit the request. See gen8_emit_breadcrumb()
-                * for where we prepare the padding after the end of the
-                * request.
-                */
-               last->tail = last->wa_tail;
-
        /* Hardware submission is through 2 ports. Conceptually each port
         * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
         * static for a context, and unique to each, so we only execute
@@ -495,7 +554,65 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        spin_lock_irq(&engine->timeline->lock);
        rb = execlists->first;
        GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-       while (rb) {
+       if (!rb)
+               goto unlock;
+
+       if (last) {
+               /*
+                * Don't resubmit or switch until all outstanding
+                * preemptions (lite-restore) are seen. Then we
+                * know the next preemption status we see corresponds
+                * to this ELSP update.
+                */
+               if (port_count(&port[0]) > 1)
+                       goto unlock;
+
+               if (can_preempt(engine) &&
+                   rb_entry(rb, struct i915_priolist, node)->priority >
+                   max(last->priotree.priority, 0)) {
+                       /*
+                        * Switch to our empty preempt context so
+                        * the state of the GPU is known (idle).
+                        */
+                       inject_preempt_context(engine);
+                       execlists->preempt = true;
+                       goto unlock;
+               } else {
+                       /*
+                        * In theory, we could coalesce more requests onto
+                        * the second port (the first port is active, with
+                        * no preemptions pending). However, that means we
+                        * then have to deal with the possible lite-restore
+                        * of the second port (as we submit the ELSP, there
+                        * may be a context-switch) but also we may complete
+                        * the resubmission before the context-switch. Ergo,
+                        * coalescing onto the second port will cause a
+                        * preemption event, but we cannot predict whether
+                        * that will affect port[0] or port[1].
+                        *
+                        * If the second port is already active, we can wait
+                        * until the next context-switch before contemplating
+                        * new requests. The GPU will be busy and we should be
+                        * able to resubmit the new ELSP before it idles,
+                        * avoiding pipeline bubbles (momentary pauses where
+                        * the driver is unable to keep up the supply of new
+                        * work).
+                        */
+                       if (port_count(&port[1]))
+                               goto unlock;
+
+                       /* WaIdleLiteRestore:bdw,skl
+                        * Apply the wa NOOPs to prevent
+                        * ring:HEAD == req:TAIL as we resubmit the
+                        * request. See gen8_emit_breadcrumb() for
+                        * where we prepare the padding after the
+                        * end of the request.
+                        */
+                       last->tail = last->wa_tail;
+               }
+       }
+
+       do {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
                struct drm_i915_gem_request *rq, *rn;
 
@@ -547,8 +664,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                        }
 
                        INIT_LIST_HEAD(&rq->priotree.link);
-                       rq->priotree.priority = INT_MAX;
-
                        __i915_gem_request_submit(rq);
                        trace_i915_gem_request_in(rq, port_index(port, execlists));
                        last = rq;
@@ -560,11 +675,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
-       }
+       } while (rb);
 done:
        execlists->first = rb;
        if (submit)
                port_assign(port, last);
+unlock:
        spin_unlock_irq(&engine->timeline->lock);
 
        if (submit)
@@ -575,12 +691,12 @@ static void
 execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
 {
        struct execlist_port *port = execlists->port;
-       unsigned int num_ports = ARRAY_SIZE(execlists->port);
+       unsigned int num_ports = execlists_num_ports(execlists);
 
        while (num_ports-- && port_isset(port)) {
                struct drm_i915_gem_request *rq = port_request(port);
 
-               execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+               execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
                i915_gem_request_put(rq);
 
                memset(port, 0, sizeof(*port));
@@ -645,13 +761,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
-static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
-{
-       const struct execlist_port *port = engine->execlists.port;
-
-       return port_count(&port[0]) + port_count(&port[1]) < 2;
-}
-
 /*
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
@@ -660,7 +769,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct execlist_port *port = execlists->port;
+       struct execlist_port * const port = execlists->port;
        struct drm_i915_private *dev_priv = engine->i915;
 
        /* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -745,6 +854,23 @@ static void intel_lrc_irq_handler(unsigned long data)
                        if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
                                continue;
 
+                       if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
+                           buf[2*head + 1] == PREEMPT_ID) {
+                               execlist_cancel_port_requests(execlists);
+
+                               spin_lock_irq(&engine->timeline->lock);
+                               unwind_incomplete_requests(engine);
+                               spin_unlock_irq(&engine->timeline->lock);
+
+                               GEM_BUG_ON(!execlists->preempt);
+                               execlists->preempt = false;
+                               continue;
+                       }
+
+                       if (status & GEN8_CTX_STATUS_PREEMPTED &&
+                           execlists->preempt)
+                               continue;
+
                        /* Check the context/desc id for this event matches */
                        GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
 
@@ -756,6 +882,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                                execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 
                                trace_i915_gem_request_out(rq);
+                               rq->priotree.priority = INT_MAX;
                                i915_gem_request_put(rq);
 
                                execlists_port_complete(execlists, port);
@@ -775,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                }
        }
 
-       if (execlists_elsp_ready(engine))
+       if (!execlists->preempt)
                execlists_dequeue(engine);
 
        intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
@@ -788,7 +915,7 @@ static void insert_request(struct intel_engine_cs *engine,
        struct i915_priolist *p = lookup_priolist(engine, pt, prio);
 
        list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
-       if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine))
+       if (ptr_unmask_bits(p, 1))
                tasklet_hi_schedule(&engine->execlists.irq_tasklet);
 }
 
@@ -808,11 +935,15 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
+static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+{
+       return container_of(pt, struct drm_i915_gem_request, priotree);
+}
+
 static struct intel_engine_cs *
 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 {
-       struct intel_engine_cs *engine =
-               container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+       struct intel_engine_cs *engine = pt_to_request(pt)->engine;
 
        GEM_BUG_ON(!locked);
 
@@ -831,6 +962,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        struct i915_dependency stack;
        LIST_HEAD(dfs);
 
+       GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
        if (prio <= READ_ONCE(request->priotree.priority))
                return;
 
@@ -866,6 +999,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                 * engines.
                 */
                list_for_each_entry(p, &pt->signalers_list, signal_link) {
+                       if (i915_gem_request_completed(pt_to_request(p->signaler)))
+                               continue;
+
                        GEM_BUG_ON(p->signaler->priority < pt->priority);
                        if (prio > READ_ONCE(p->signaler->priority))
                                list_move_tail(&p->dfs_link, &dfs);
@@ -879,7 +1015,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
         * execlists_submit_request()), we can set our own priority and skip
         * acquiring the engine locks.
         */
-       if (request->priotree.priority == INT_MIN) {
+       if (request->priotree.priority == I915_PRIORITY_INVALID) {
                GEM_BUG_ON(!list_empty(&request->priotree.link));
                request->priotree.priority = prio;
                if (stack.dfs_link.next == stack.dfs_link.prev)
@@ -909,8 +1045,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        }
 
        spin_unlock_irq(&engine->timeline->lock);
-
-       /* XXX Do we need to preempt to make room for us and our deps? */
 }
 
 static struct intel_ring *
@@ -1106,6 +1240,8 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                                       i915_ggtt_offset(engine->scratch) +
                                       2 * CACHELINE_BYTES);
 
+       *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
        /* Pad to end of cacheline */
        while ((unsigned long)batch % CACHELINE_BYTES)
                *batch++ = MI_NOOP;
@@ -1119,26 +1255,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        return batch;
 }
 
-/*
- *  This batch is started immediately after indirect_ctx batch. Since we ensure
- *  that indirect_ctx ends on a cacheline this batch is aligned automatically.
- *
- *  The number of DWORDS written are returned using this field.
- *
- *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
- *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
- */
-static u32 *gen8_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
-       /* WaDisableCtxRestoreArbitration:bdw,chv */
-       *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-       *batch++ = MI_BATCH_BUFFER_END;
-
-       return batch;
-}
-
 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 {
+       *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
@@ -1184,6 +1304,8 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                *batch++ = 0;
        }
 
+       *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
        /* Pad to end of cacheline */
        while ((unsigned long)batch % CACHELINE_BYTES)
                *batch++ = MI_NOOP;
@@ -1251,7 +1373,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
                break;
        case 8:
                wa_bb_fn[0] = gen8_init_indirectctx_bb;
-               wa_bb_fn[1] = gen8_init_perctx_bb;
+               wa_bb_fn[1] = NULL;
                break;
        default:
                MISSING_CASE(INTEL_GEN(engine->i915));
@@ -1337,6 +1459,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
                   GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
        clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
        execlists->csb_head = -1;
+       execlists->preempt = false;
 
        /* After a GPU reset, we may have requests to replay */
        if (!i915_modparams.enable_guc_submission && execlists->first)
@@ -1382,7 +1505,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
                              struct drm_i915_gem_request *request)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct drm_i915_gem_request *rq, *rn;
        struct intel_context *ce;
        unsigned long flags;
 
@@ -1400,21 +1522,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        execlist_cancel_port_requests(execlists);
 
        /* Push back any incomplete requests for replay after the reset. */
-       list_for_each_entry_safe_reverse(rq, rn,
-                                        &engine->timeline->requests, link) {
-               struct i915_priolist *p;
-
-               if (i915_gem_request_completed(rq))
-                       break;
-
-               __i915_gem_request_unsubmit(rq);
-
-               p = lookup_priolist(engine,
-                                   &rq->priotree,
-                                   rq->priotree.priority);
-               list_add(&rq->priotree.link,
-                        &ptr_mask_bits(p, 1)->requests);
-       }
+       unwind_incomplete_requests(engine);
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
 
@@ -1451,10 +1559,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        intel_ring_update_space(request->ring);
 
        /* Reset WaIdleLiteRestore:bdw,skl as well */
-       request->tail =
-               intel_ring_wrap(request->ring,
-                               request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
-       assert_ring_tail_valid(request->ring, request->tail);
+       unwind_wa_tail(request);
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1513,13 +1618,31 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
+       /*
+        * WaDisableCtxRestoreArbitration:bdw,chv
+        *
+        * We don't need to perform MI_ARB_ENABLE as often as we do (in
+        * particular all the gen that do not need the w/a at all!), if we
+        * took care to make sure that on every switch into this context
+        * (both ordinary and for preemption) that arbitrartion was enabled
+        * we would be fine. However, there doesn't seem to be a downside to
+        * being paranoid and making sure it is set before each batch and
+        * every context-switch.
+        *
+        * Note that if we fail to enable arbitration before the request
+        * is complete, then we do not see the context-switch interrupt and
+        * the engine hangs (with RING_HEAD == RING_TAIL).
+        *
+        * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
+        */
+       *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
        /* FIXME(BDW): Address space and security selectors. */
        *cs++ = MI_BATCH_BUFFER_START_GEN8 |
                (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
                (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
        *cs++ = lower_32_bits(offset);
        *cs++ = upper_32_bits(offset);
-       *cs++ = MI_NOOP;
        intel_ring_advance(req, cs);
 
        return 0;
@@ -1648,7 +1771,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
  */
 static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
 {
-       *cs++ = MI_NOOP;
+       /* Ensure there's always at least one preemption point per-request. */
+       *cs++ = MI_ARB_CHECK;
        *cs++ = MI_NOOP;
        request->wa_tail = intel_ring_offset(request, cs);
 }
@@ -1669,7 +1793,6 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
 
        gen8_emit_wa_tail(request, cs);
 }
-
 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
 
 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
@@ -1697,7 +1820,6 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
 
        gen8_emit_wa_tail(request, cs);
 }
-
 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
 
 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
index 314adee7127a5f93473d3e3a227dc7e6d8f0a8bf..689fde1a63a90c7a2c0d2aa54a289a232c4d2464 100644 (file)
@@ -61,6 +61,7 @@
 enum {
        INTEL_CONTEXT_SCHEDULE_IN = 0,
        INTEL_CONTEXT_SCHEDULE_OUT,
+       INTEL_CONTEXT_SCHEDULE_PREEMPTED,
 };
 
 /* Logical Rings */
index a55954a89148befc776176f633f8be17bd587197..38572d65e46ea75dbd1ac4ba482991340c091034 100644 (file)
@@ -939,10 +939,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        struct drm_display_mode *fixed_mode = NULL;
        struct drm_display_mode *downclock_mode = NULL;
        struct edid *edid;
-       struct intel_crtc *crtc;
        i915_reg_t lvds_reg;
        u32 lvds;
-       int pipe;
        u8 pin;
        u32 allowed_scalers;
 
@@ -1113,22 +1111,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
         * on.  If so, assume that whatever is currently programmed is the
         * correct mode.
         */
-
-       /* Ironlake: FIXME if still fail, not try pipe mode now */
-       if (HAS_PCH_SPLIT(dev_priv))
-               goto failed;
-
-       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
-       crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
-       if (crtc && (lvds & LVDS_PORT_EN)) {
-               fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
-               if (fixed_mode) {
-                       DRM_DEBUG_KMS("using current (BIOS) mode: ");
-                       drm_mode_debug_printmodeline(fixed_mode);
-                       fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
-                       goto out;
-               }
+       fixed_mode = intel_encoder_current_mode(intel_encoder);
+       if (fixed_mode) {
+               DRM_DEBUG_KMS("using current (BIOS) mode: ");
+               drm_mode_debug_printmodeline(fixed_mode);
+               fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
        }
 
        /* If we still don't have a mode after all that, give up. */
index 96043a51c1bf566ec90398a783bcc080b0d55483..899839f2f7c6fe1609ad79172968ba820134eb35 100644 (file)
@@ -206,11 +206,11 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = m->private;
-       int i;
+       enum pipe pipe;
 
-       for (i = 0; i < I915_MAX_PIPES; i++)
-               seq_printf(m, "%c %s\n", pipe_name(i),
-                          pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+       for_each_pipe(dev_priv, pipe)
+               seq_printf(m, "%c %s\n", pipe_name(pipe),
+                          pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
 
        return 0;
 }
@@ -775,11 +775,12 @@ display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
        return -EINVAL;
 }
 
-static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
+                                     const char *buf, enum pipe *pipe)
 {
        const char name = buf[0];
 
-       if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+       if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
                return -EINVAL;
 
        *pipe = name - 'A';
@@ -828,7 +829,7 @@ static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
                return -EINVAL;
        }
 
-       if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+       if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
                DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
                return -EINVAL;
        }
index c66af09e27a7541364e34462ed405c72bfa213a3..2fcff9788b6f3122d152aef60ce2ddcc26230fad 100644 (file)
@@ -322,7 +322,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
 {
        u32 val;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
        if (enable)
@@ -337,14 +337,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
                      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
                DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
 {
        u32 val;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
        if (enable)
@@ -353,7 +353,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
                val &= ~DSP_MAXFIFO_PM5_ENABLE;
        vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 #define FW_WM(value, plane) \
@@ -2790,11 +2790,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 
                /* read the first set of memory latencies[0:3] */
                val = 0; /* data0 to be programmed to 0 for first set */
-               mutex_lock(&dev_priv->rps.hw_lock);
+               mutex_lock(&dev_priv->pcu_lock);
                ret = sandybridge_pcode_read(dev_priv,
                                             GEN9_PCODE_READ_MEM_LATENCY,
                                             &val);
-               mutex_unlock(&dev_priv->rps.hw_lock);
+               mutex_unlock(&dev_priv->pcu_lock);
 
                if (ret) {
                        DRM_ERROR("SKL Mailbox read error = %d\n", ret);
@@ -2811,11 +2811,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 
                /* read the second set of memory latencies[4:7] */
                val = 1; /* data0 to be programmed to 1 for second set */
-               mutex_lock(&dev_priv->rps.hw_lock);
+               mutex_lock(&dev_priv->pcu_lock);
                ret = sandybridge_pcode_read(dev_priv,
                                             GEN9_PCODE_READ_MEM_LATENCY,
                                             &val);
-               mutex_unlock(&dev_priv->rps.hw_lock);
+               mutex_unlock(&dev_priv->pcu_lock);
                if (ret) {
                        DRM_ERROR("SKL Mailbox read error = %d\n", ret);
                        return;
@@ -3608,13 +3608,13 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
                return 0;
 
        DRM_DEBUG_KMS("Enabling the SAGV\n");
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
                                      GEN9_SAGV_ENABLE);
 
        /* We don't need to wait for the SAGV when enabling */
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        /*
         * Some skl systems, pre-release machines in particular,
@@ -3645,14 +3645,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
                return 0;
 
        DRM_DEBUG_KMS("Disabling the SAGV\n");
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        /* bspec says to keep retrying for at least 1 ms */
        ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
                                GEN9_SAGV_DISABLE,
                                GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
                                1);
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        /*
         * Some skl systems, pre-release machines in particular,
@@ -4820,16 +4820,18 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
        return a->start < b->end && b->start < a->end;
 }
 
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
+                                const struct skl_ddb_entry **entries,
                                 const struct skl_ddb_entry *ddb,
                                 int ignore)
 {
-       int i;
+       enum pipe pipe;
 
-       for (i = 0; i < I915_MAX_PIPES; i++)
-               if (i != ignore && entries[i] &&
-                   skl_ddb_entries_overlap(ddb, entries[i]))
+       for_each_pipe(dev_priv, pipe) {
+               if (pipe != ignore && entries[pipe] &&
+                   skl_ddb_entries_overlap(ddb, entries[pipe]))
                        return true;
+       }
 
        return false;
 }
@@ -5619,7 +5621,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
        wm->level = VLV_WM_LEVEL_PM2;
 
        if (IS_CHERRYVIEW(dev_priv)) {
-               mutex_lock(&dev_priv->rps.hw_lock);
+               mutex_lock(&dev_priv->pcu_lock);
 
                val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
                if (val & DSP_MAXFIFO_PM5_ENABLE)
@@ -5649,7 +5651,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
                                wm->level = VLV_WM_LEVEL_DDR_DVFS;
                }
 
-               mutex_unlock(&dev_priv->rps.hw_lock);
+               mutex_unlock(&dev_priv->pcu_lock);
        }
 
        for_each_intel_crtc(dev, crtc) {
@@ -5827,6 +5829,12 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
 {
        u32 val;
 
+       /* Display WA #0477 WaDisableIPC: skl */
+       if (IS_SKYLAKE(dev_priv)) {
+               dev_priv->ipc_enabled = false;
+               return;
+       }
+
        val = I915_READ(DISP_ARB_CTL2);
 
        if (dev_priv->ipc_enabled)
@@ -5980,6 +5988,7 @@ static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
  */
 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 limits;
 
        /* Only set the down limit when we've reached the lowest level to avoid
@@ -5989,13 +5998,13 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
        if (INTEL_GEN(dev_priv) >= 9) {
-               limits = (dev_priv->rps.max_freq_softlimit) << 23;
-               if (val <= dev_priv->rps.min_freq_softlimit)
-                       limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+               limits = (rps->max_freq_softlimit) << 23;
+               if (val <= rps->min_freq_softlimit)
+                       limits |= (rps->min_freq_softlimit) << 14;
        } else {
-               limits = dev_priv->rps.max_freq_softlimit << 24;
-               if (val <= dev_priv->rps.min_freq_softlimit)
-                       limits |= dev_priv->rps.min_freq_softlimit << 16;
+               limits = rps->max_freq_softlimit << 24;
+               if (val <= rps->min_freq_softlimit)
+                       limits |= rps->min_freq_softlimit << 16;
        }
 
        return limits;
@@ -6003,39 +6012,40 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 
 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int new_power;
        u32 threshold_up = 0, threshold_down = 0; /* in % */
        u32 ei_up = 0, ei_down = 0;
 
-       new_power = dev_priv->rps.power;
-       switch (dev_priv->rps.power) {
+       new_power = rps->power;
+       switch (rps->power) {
        case LOW_POWER:
-               if (val > dev_priv->rps.efficient_freq + 1 &&
-                   val > dev_priv->rps.cur_freq)
+               if (val > rps->efficient_freq + 1 &&
+                   val > rps->cur_freq)
                        new_power = BETWEEN;
                break;
 
        case BETWEEN:
-               if (val <= dev_priv->rps.efficient_freq &&
-                   val < dev_priv->rps.cur_freq)
+               if (val <= rps->efficient_freq &&
+                   val < rps->cur_freq)
                        new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_freq &&
-                        val > dev_priv->rps.cur_freq)
+               else if (val >= rps->rp0_freq &&
+                        val > rps->cur_freq)
                        new_power = HIGH_POWER;
                break;
 
        case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
-                   val < dev_priv->rps.cur_freq)
+               if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+                   val < rps->cur_freq)
                        new_power = BETWEEN;
                break;
        }
        /* Max/min bins are special */
-       if (val <= dev_priv->rps.min_freq_softlimit)
+       if (val <= rps->min_freq_softlimit)
                new_power = LOW_POWER;
-       if (val >= dev_priv->rps.max_freq_softlimit)
+       if (val >= rps->max_freq_softlimit)
                new_power = HIGH_POWER;
-       if (new_power == dev_priv->rps.power)
+       if (new_power == rps->power)
                return;
 
        /* Note the units here are not exactly 1us, but 1280ns. */
@@ -6098,20 +6108,21 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                   GEN6_RP_DOWN_IDLE_AVG);
 
 skip_hw_write:
-       dev_priv->rps.power = new_power;
-       dev_priv->rps.up_threshold = threshold_up;
-       dev_priv->rps.down_threshold = threshold_down;
-       dev_priv->rps.last_adj = 0;
+       rps->power = new_power;
+       rps->up_threshold = threshold_up;
+       rps->down_threshold = threshold_down;
+       rps->last_adj = 0;
 }
 
 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 mask = 0;
 
        /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
-       if (val > dev_priv->rps.min_freq_softlimit)
+       if (val > rps->min_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
-       if (val < dev_priv->rps.max_freq_softlimit)
+       if (val < rps->max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
        mask &= dev_priv->pm_rps_events;
@@ -6124,10 +6135,12 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
  * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* min/max delay may still have been modified so be sure to
         * write the limits value.
         */
-       if (val != dev_priv->rps.cur_freq) {
+       if (val != rps->cur_freq) {
                gen6_set_rps_thresholds(dev_priv, val);
 
                if (INTEL_GEN(dev_priv) >= 9)
@@ -6149,7 +6162,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
        I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-       dev_priv->rps.cur_freq = val;
+       rps->cur_freq = val;
        trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 
        return 0;
@@ -6165,7 +6178,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-       if (val != dev_priv->rps.cur_freq) {
+       if (val != dev_priv->gt_pm.rps.cur_freq) {
                err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
                if (err)
                        return err;
@@ -6173,7 +6186,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
                gen6_set_rps_thresholds(dev_priv, val);
        }
 
-       dev_priv->rps.cur_freq = val;
+       dev_priv->gt_pm.rps.cur_freq = val;
        trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 
        return 0;
@@ -6188,10 +6201,11 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
 */
 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 {
-       u32 val = dev_priv->rps.idle_freq;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       u32 val = rps->idle_freq;
        int err;
 
-       if (dev_priv->rps.cur_freq <= val)
+       if (rps->cur_freq <= val)
                return;
 
        /* The punit delays the write of the frequency and voltage until it
@@ -6216,34 +6230,38 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 
 void gen6_rps_busy(struct drm_i915_private *dev_priv)
 {
-       mutex_lock(&dev_priv->rps.hw_lock);
-       if (dev_priv->rps.enabled) {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       mutex_lock(&dev_priv->pcu_lock);
+       if (rps->enabled) {
                u8 freq;
 
                if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
                        gen6_rps_reset_ei(dev_priv);
                I915_WRITE(GEN6_PMINTRMSK,
-                          gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+                          gen6_rps_pm_mask(dev_priv, rps->cur_freq));
 
                gen6_enable_rps_interrupts(dev_priv);
 
                /* Use the user's desired frequency as a guide, but for better
                 * performance, jump directly to RPe as our starting frequency.
                 */
-               freq = max(dev_priv->rps.cur_freq,
-                          dev_priv->rps.efficient_freq);
+               freq = max(rps->cur_freq,
+                          rps->efficient_freq);
 
                if (intel_set_rps(dev_priv,
                                  clamp(freq,
-                                       dev_priv->rps.min_freq_softlimit,
-                                       dev_priv->rps.max_freq_softlimit)))
+                                       rps->min_freq_softlimit,
+                                       rps->max_freq_softlimit)))
                        DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
        }
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* Flush our bottom-half so that it does not race with us
         * setting the idle frequency and so that it is bounded by
         * our rpm wakeref. And then disable the interrupts to stop any
@@ -6251,36 +6269,36 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
         */
        gen6_disable_rps_interrupts(dev_priv);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
-       if (dev_priv->rps.enabled) {
+       mutex_lock(&dev_priv->pcu_lock);
+       if (rps->enabled) {
                if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                        vlv_set_rps_idle(dev_priv);
                else
-                       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
-               dev_priv->rps.last_adj = 0;
+                       gen6_set_rps(dev_priv, rps->idle_freq);
+               rps->last_adj = 0;
                I915_WRITE(GEN6_PMINTRMSK,
                           gen6_sanitize_rps_pm_mask(dev_priv, ~0));
        }
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 void gen6_rps_boost(struct drm_i915_gem_request *rq,
-                   struct intel_rps_client *rps)
+                   struct intel_rps_client *rps_client)
 {
-       struct drm_i915_private *i915 = rq->i915;
+       struct intel_rps *rps = &rq->i915->gt_pm.rps;
        unsigned long flags;
        bool boost;
 
        /* This is intentionally racy! We peek at the state here, then
         * validate inside the RPS worker.
         */
-       if (!i915->rps.enabled)
+       if (!rps->enabled)
                return;
 
        boost = false;
        spin_lock_irqsave(&rq->lock, flags);
        if (!rq->waitboost && !i915_gem_request_completed(rq)) {
-               atomic_inc(&i915->rps.num_waiters);
+               atomic_inc(&rps->num_waiters);
                rq->waitboost = true;
                boost = true;
        }
@@ -6288,22 +6306,23 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
        if (!boost)
                return;
 
-       if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
-               schedule_work(&i915->rps.work);
+       if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
+               schedule_work(&rps->work);
 
-       atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
+       atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
 }
 
 int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int err;
 
-       lockdep_assert_held(&dev_priv->rps.hw_lock);
-       GEM_BUG_ON(val > dev_priv->rps.max_freq);
-       GEM_BUG_ON(val < dev_priv->rps.min_freq);
+       lockdep_assert_held(&dev_priv->pcu_lock);
+       GEM_BUG_ON(val > rps->max_freq);
+       GEM_BUG_ON(val < rps->min_freq);
 
-       if (!dev_priv->rps.enabled) {
-               dev_priv->rps.cur_freq = val;
+       if (!rps->enabled) {
+               rps->cur_freq = val;
                return 0;
        }
 
@@ -6326,21 +6345,30 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RP_CONTROL, 0);
 }
 
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
 {
        I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+{
        I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
        I915_WRITE(GEN6_RP_CONTROL, 0);
 }
 
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
 {
        I915_WRITE(GEN6_RC_CONTROL, 0);
 }
 
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
 {
-       /* we're doing forcewake before Disabling RC6,
+       I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
+static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
+{
+       /* We're doing forcewake before Disabling RC6,
         * This what the BIOS expects when going into suspend */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -6349,6 +6377,11 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
 {
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -6471,24 +6504,26 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
 
 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* All of these values are in units of 50MHz */
 
        /* static values from HW: RP0 > RP1 > RPn (min_freq) */
        if (IS_GEN9_LP(dev_priv)) {
                u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
-               dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
-               dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
-               dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+               rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+               rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
+               rps->min_freq = (rp_state_cap >>  0) & 0xff;
        } else {
                u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-               dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
-               dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
-               dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+               rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
+               rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
+               rps->min_freq = (rp_state_cap >> 16) & 0xff;
        }
        /* hw_max = RP0 until we check for overclocking */
-       dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+       rps->max_freq = rps->rp0_freq;
 
-       dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+       rps->efficient_freq = rps->rp1_freq;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
            IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                u32 ddcc_status = 0;
@@ -6496,33 +6531,34 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
                if (sandybridge_pcode_read(dev_priv,
                                           HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
                                           &ddcc_status) == 0)
-                       dev_priv->rps.efficient_freq =
+                       rps->efficient_freq =
                                clamp_t(u8,
                                        ((ddcc_status >> 8) & 0xff),
-                                       dev_priv->rps.min_freq,
-                                       dev_priv->rps.max_freq);
+                                       rps->min_freq,
+                                       rps->max_freq);
        }
 
        if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                /* Store the frequency values in 16.66 MHZ units, which is
                 * the natural hardware unit for SKL
                 */
-               dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
-               dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+               rps->rp0_freq *= GEN9_FREQ_SCALER;
+               rps->rp1_freq *= GEN9_FREQ_SCALER;
+               rps->min_freq *= GEN9_FREQ_SCALER;
+               rps->max_freq *= GEN9_FREQ_SCALER;
+               rps->efficient_freq *= GEN9_FREQ_SCALER;
        }
 }
 
 static void reset_rps(struct drm_i915_private *dev_priv,
                      int (*set)(struct drm_i915_private *, u8))
 {
-       u8 freq = dev_priv->rps.cur_freq;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       u8 freq = rps->cur_freq;
 
        /* force a reset */
-       dev_priv->rps.power = -1;
-       dev_priv->rps.cur_freq = -1;
+       rps->power = -1;
+       rps->cur_freq = -1;
 
        if (set(dev_priv, freq))
                DRM_ERROR("Failed to reset RPS to initial values\n");
@@ -6535,7 +6571,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 
        /* Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RC_VIDEO_FREQ,
-               GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+               GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
 
        /* 1 second timeout*/
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
@@ -6589,7 +6625,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
 
        /* 3a: Enable RC6 */
-       if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+       if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
        DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
        I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
@@ -6609,7 +6645,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -6618,7 +6654,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
        /* 1a: Software RC state - RC0 */
        I915_WRITE(GEN6_RC_STATE, 0);
 
-       /* 1c & 1d: Get forcewake during program sequence. Although the driver
+       /* 1b: Get forcewake during program sequence. Although the driver
         * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -6632,36 +6668,38 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
        for_each_engine(engine, dev_priv, id)
                I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
        I915_WRITE(GEN6_RC_SLEEP, 0);
-       if (IS_BROADWELL(dev_priv))
-               I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-       else
-               I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
 
        /* 3: Enable RC6 */
-       if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+       if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
        intel_print_rc6_info(dev_priv, rc6_mask);
-       if (IS_BROADWELL(dev_priv))
-               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                               GEN7_RC_CTL_TO_MODE |
-                               rc6_mask);
-       else
-               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                               GEN6_RC_CTL_EI_MODE(1) |
-                               rc6_mask);
 
-       /* 4 Program defaults and thresholds for RPS*/
+       I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                       GEN7_RC_CTL_TO_MODE |
+                       rc6_mask);
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+{
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* 1 Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RPNSWREQ,
-                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+                  HSW_FREQUENCY(rps->rp1_freq));
        I915_WRITE(GEN6_RC_VIDEO_FREQ,
-                  HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+                  HSW_FREQUENCY(rps->rp1_freq));
        /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
 
        /* Docs recommend 900MHz, and 300 MHz respectively */
        I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  dev_priv->rps.max_freq_softlimit << 24 |
-                  dev_priv->rps.min_freq_softlimit << 16);
+                  rps->max_freq_softlimit << 24 |
+                  rps->min_freq_softlimit << 16);
 
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -6670,7 +6708,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-       /* 5: Enable RPS */
+       /* 2: Enable RPS */
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
@@ -6679,14 +6717,12 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
                   GEN6_RP_UP_BUSY_AVG |
                   GEN6_RP_DOWN_IDLE_AVG);
 
-       /* 6: Ring frequency + overclocking (our driver does this later */
-
        reset_rps(dev_priv, gen6_set_rps);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -6695,14 +6731,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
        int rc6_mode;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-       /* Here begins a magic sequence of register writes to enable
-        * auto-downclocking.
-        *
-        * Perhaps there might be some value in exposing these to
-        * userspace...
-        */
        I915_WRITE(GEN6_RC_STATE, 0);
 
        /* Clear the DBG now so we don't confuse earlier errors */
@@ -6736,7 +6764,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
        /* Check if we are enabling RC6 */
-       rc6_mode = intel_enable_rc6();
+       rc6_mode = intel_rc6_enabled();
        if (rc6_mode & INTEL_RC6_ENABLE)
                rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
 
@@ -6756,12 +6784,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
                   GEN6_RC_CTL_EI_MODE(1) |
                   GEN6_RC_CTL_HW_ENABLE);
 
-       /* Power down if completely idle for over 50ms */
-       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
-       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
-       reset_rps(dev_priv, gen6_set_rps);
-
        rc6vids = 0;
        ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
        if (IS_GEN6(dev_priv) && ret) {
@@ -6779,8 +6801,28 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+       /* Here begins a magic sequence of register writes to enable
+        * auto-downclocking.
+        *
+        * Perhaps there might be some value in exposing these to
+        * userspace...
+        */
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* Power down if completely idle for over 50ms */
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+       reset_rps(dev_priv, gen6_set_rps);
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        int min_freq = 15;
        unsigned int gpu_freq;
        unsigned int max_ia_freq, min_ring_freq;
@@ -6788,7 +6830,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
        int scaling_factor = 180;
        struct cpufreq_policy *policy;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
        policy = cpufreq_cpu_get(0);
        if (policy) {
@@ -6811,11 +6853,11 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 
        if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
-               min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
-               max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+               min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
+               max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
        } else {
-               min_gpu_freq = dev_priv->rps.min_freq;
-               max_gpu_freq = dev_priv->rps.max_freq;
+               min_gpu_freq = rps->min_freq;
+               max_gpu_freq = rps->max_freq;
        }
 
        /*
@@ -7066,17 +7108,18 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
 
 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
 {
-       dev_priv->rps.gpll_ref_freq =
+       dev_priv->gt_pm.rps.gpll_ref_freq =
                vlv_get_cck_clock(dev_priv, "GPLL ref",
                                  CCK_GPLL_CLOCK_CONTROL,
                                  dev_priv->czclk_freq);
 
        DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
-                        dev_priv->rps.gpll_ref_freq);
+                        dev_priv->gt_pm.rps.gpll_ref_freq);
 }
 
 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
 
        valleyview_setup_pctx(dev_priv);
@@ -7098,30 +7141,31 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
        }
        DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
-       dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
-       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       rps->max_freq = valleyview_rps_max_freq(dev_priv);
+       rps->rp0_freq = rps->max_freq;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-                        dev_priv->rps.max_freq);
+                        intel_gpu_freq(dev_priv, rps->max_freq),
+                        rps->max_freq);
 
-       dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+       rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+                        intel_gpu_freq(dev_priv, rps->efficient_freq),
+                        rps->efficient_freq);
 
-       dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+       rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
        DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-                        dev_priv->rps.rp1_freq);
+                        intel_gpu_freq(dev_priv, rps->rp1_freq),
+                        rps->rp1_freq);
 
-       dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+       rps->min_freq = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                        dev_priv->rps.min_freq);
+                        intel_gpu_freq(dev_priv, rps->min_freq),
+                        rps->min_freq);
 }
 
 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
        u32 val;
 
        cherryview_setup_pctx(dev_priv);
@@ -7142,31 +7186,29 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
        }
        DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
-       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
-       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       rps->max_freq = cherryview_rps_max_freq(dev_priv);
+       rps->rp0_freq = rps->max_freq;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
-                        dev_priv->rps.max_freq);
+                        intel_gpu_freq(dev_priv, rps->max_freq),
+                        rps->max_freq);
 
-       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+       rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+                        intel_gpu_freq(dev_priv, rps->efficient_freq),
+                        rps->efficient_freq);
 
-       dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+       rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
        DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
-                        dev_priv->rps.rp1_freq);
+                        intel_gpu_freq(dev_priv, rps->rp1_freq),
+                        rps->rp1_freq);
 
-       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+       rps->min_freq = cherryview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
-                        dev_priv->rps.min_freq);
+                        intel_gpu_freq(dev_priv, rps->min_freq),
+                        rps->min_freq);
 
-       WARN_ONCE((dev_priv->rps.max_freq |
-                  dev_priv->rps.efficient_freq |
-                  dev_priv->rps.rp1_freq |
-                  dev_priv->rps.min_freq) & 1,
+       WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+                  rps->min_freq) & 1,
                  "Odd GPU freq values\n");
 }
 
@@ -7175,13 +7217,11 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
        valleyview_cleanup_pctx(dev_priv);
 }
 
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       u32 gtfifodbg, val, rc6_mode = 0, pcbr;
-
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       u32 gtfifodbg, rc6_mode = 0, pcbr;
 
        gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
                                             GT_FIFO_FREE_ENTRIES_CHV);
@@ -7212,7 +7252,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
        /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
        I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
 
-       /* allows RC6 residency counter to work */
+       /* Allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
                   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
                                      VLV_MEDIA_RC6_COUNT_EN |
@@ -7222,13 +7262,22 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
        pcbr = I915_READ(VLV_PCBR);
 
        /* 3: Enable RC6 */
-       if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+       if ((intel_rc6_enabled() & INTEL_RC6_ENABLE) &&
            (pcbr >> VLV_PCBR_ADDR_SHIFT))
                rc6_mode = GEN7_RC_CTL_TO_MODE;
 
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
-       /* 4 Program defaults and thresholds for RPS*/
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* 1: Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -7237,7 +7286,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-       /* 5: Enable RPS */
+       /* 2: Enable RPS */
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
                   GEN6_RP_MEDIA_IS_GFX |
@@ -7264,13 +7313,11 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       u32 gtfifodbg, val, rc6_mode = 0;
-
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       u32 gtfifodbg, rc6_mode = 0;
 
        valleyview_check_pctx(dev_priv);
 
@@ -7281,28 +7328,11 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
 
-       /* If VLV, Forcewake all wells, else re-direct to regular path */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /*  Disable RC states. */
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
-       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-       I915_WRITE(GEN6_RP_UP_EI, 66000);
-       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
-       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
-       I915_WRITE(GEN6_RP_CONTROL,
-                  GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX |
-                  GEN6_RP_ENABLE |
-                  GEN6_RP_UP_BUSY_AVG |
-                  GEN6_RP_DOWN_IDLE_CONT);
-
        I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
        I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
        I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
@@ -7312,7 +7342,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 
        I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
-       /* allows RC6 residency counter to work */
+       /* Allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
                   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
                                      VLV_MEDIA_RC0_COUNT_EN |
@@ -7320,13 +7350,38 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
 
-       if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+       if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
                rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
        intel_print_rc6_info(dev_priv, rc6_mode);
 
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_TURBO |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX |
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_CONT);
+
        /* Setting Fixed Bias */
        val = VLV_OVERRIDE_EN |
                  VLV_SOC_TDP_EN |
@@ -7534,7 +7589,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 
        lockdep_assert_held(&mchdev_lock);
 
-       pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
+       pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
        pxvid = (pxvid >> 24) & 0x7f;
        ext_v = pvid_to_extvid(dev_priv, pxvid);
 
@@ -7821,6 +7876,8 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
 
 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /*
         * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
         * requirement.
@@ -7831,7 +7888,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
        }
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        /* Initialize RPS limits (for userspace) */
        if (IS_CHERRYVIEW(dev_priv))
@@ -7842,16 +7899,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
                gen6_init_rps_frequencies(dev_priv);
 
        /* Derive initial user preferences/limits from the hardware limits */
-       dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-       dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+       rps->idle_freq = rps->min_freq;
+       rps->cur_freq = rps->idle_freq;
 
-       dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-       dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+       rps->max_freq_softlimit = rps->max_freq;
+       rps->min_freq_softlimit = rps->min_freq;
 
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-               dev_priv->rps.min_freq_softlimit =
+               rps->min_freq_softlimit =
                        max_t(int,
-                             dev_priv->rps.efficient_freq,
+                             rps->efficient_freq,
                              intel_freq_opcode(dev_priv, 450));
 
        /* After setting max-softlimit, find the overclock max freq */
@@ -7862,16 +7919,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
                sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
                if (params & BIT(31)) { /* OC supported */
                        DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
-                                        (dev_priv->rps.max_freq & 0xff) * 50,
+                                        (rps->max_freq & 0xff) * 50,
                                         (params & 0xff) * 50);
-                       dev_priv->rps.max_freq = params & 0xff;
+                       rps->max_freq = params & 0xff;
                }
        }
 
        /* Finally allow us to boost to max by default */
-       dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+       rps->boost_freq = rps->max_freq;
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        intel_autoenable_gt_powersave(dev_priv);
@@ -7899,7 +7956,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) < 6)
                return;
 
-       if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+       if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
                intel_runtime_pm_put(dev_priv);
 
        /* gen6_rps_idle() will be called later to disable interrupts */
@@ -7907,90 +7964,168 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
 
 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       dev_priv->rps.enabled = true; /* force disabling */
+       dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
+       dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
        intel_disable_gt_powersave(dev_priv);
 
        gen6_reset_rps_interrupts(dev_priv);
 }
 
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
+static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
 {
-       if (!READ_ONCE(dev_priv->rps.enabled))
+       lockdep_assert_held(&i915->pcu_lock);
+
+       if (!i915->gt_pm.llc_pstate.enabled)
                return;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       /* Currently there is no HW configuration to be done to disable. */
 
-       if (INTEL_GEN(dev_priv) >= 9) {
+       i915->gt_pm.llc_pstate.enabled = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
+       lockdep_assert_held(&dev_priv->pcu_lock);
+
+       if (!dev_priv->gt_pm.rc6.enabled)
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 9)
                gen9_disable_rc6(dev_priv);
+       else if (IS_CHERRYVIEW(dev_priv))
+               cherryview_disable_rc6(dev_priv);
+       else if (IS_VALLEYVIEW(dev_priv))
+               valleyview_disable_rc6(dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               gen6_disable_rc6(dev_priv);
+
+       dev_priv->gt_pm.rc6.enabled = false;
+}
+
+static void intel_disable_rps(struct drm_i915_private *dev_priv)
+{
+       lockdep_assert_held(&dev_priv->pcu_lock);
+
+       if (!dev_priv->gt_pm.rps.enabled)
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 9)
                gen9_disable_rps(dev_priv);
-       } else if (IS_CHERRYVIEW(dev_priv)) {
+       else if (IS_CHERRYVIEW(dev_priv))
                cherryview_disable_rps(dev_priv);
-       } else if (IS_VALLEYVIEW(dev_priv)) {
+       else if (IS_VALLEYVIEW(dev_priv))
                valleyview_disable_rps(dev_priv);
-       } else if (INTEL_GEN(dev_priv) >= 6) {
+       else if (INTEL_GEN(dev_priv) >= 6)
                gen6_disable_rps(dev_priv);
-       }  else if (IS_IRONLAKE_M(dev_priv)) {
+       else if (IS_IRONLAKE_M(dev_priv))
                ironlake_disable_drps(dev_priv);
-       }
 
-       dev_priv->rps.enabled = false;
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       dev_priv->gt_pm.rps.enabled = false;
 }
 
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       /* We shouldn't be disabling as we submit, so this should be less
-        * racy than it appears!
-        */
-       if (READ_ONCE(dev_priv->rps.enabled))
+       mutex_lock(&dev_priv->pcu_lock);
+
+       intel_disable_rc6(dev_priv);
+       intel_disable_rps(dev_priv);
+       if (HAS_LLC(dev_priv))
+               intel_disable_llc_pstate(dev_priv);
+
+       mutex_unlock(&dev_priv->pcu_lock);
+}
+
+static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
+{
+       lockdep_assert_held(&i915->pcu_lock);
+
+       if (i915->gt_pm.llc_pstate.enabled)
                return;
 
-       /* Powersaving is controlled by the host when inside a VM */
-       if (intel_vgpu_active(dev_priv))
+       gen6_update_ring_freq(i915);
+
+       i915->gt_pm.llc_pstate.enabled = true;
+}
+
+static void intel_enable_rc6(struct drm_i915_private *dev_priv)
+{
+       lockdep_assert_held(&dev_priv->pcu_lock);
+
+       if (dev_priv->gt_pm.rc6.enabled)
                return;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       if (IS_CHERRYVIEW(dev_priv))
+               cherryview_enable_rc6(dev_priv);
+       else if (IS_VALLEYVIEW(dev_priv))
+               valleyview_enable_rc6(dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 9)
+               gen9_enable_rc6(dev_priv);
+       else if (IS_BROADWELL(dev_priv))
+               gen8_enable_rc6(dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               gen6_enable_rc6(dev_priv);
+
+       dev_priv->gt_pm.rc6.enabled = true;
+}
+
+static void intel_enable_rps(struct drm_i915_private *dev_priv)
+{
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       lockdep_assert_held(&dev_priv->pcu_lock);
+
+       if (rps->enabled)
+               return;
 
        if (IS_CHERRYVIEW(dev_priv)) {
                cherryview_enable_rps(dev_priv);
        } else if (IS_VALLEYVIEW(dev_priv)) {
                valleyview_enable_rps(dev_priv);
        } else if (INTEL_GEN(dev_priv) >= 9) {
-               gen9_enable_rc6(dev_priv);
                gen9_enable_rps(dev_priv);
-               if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
-                       gen6_update_ring_freq(dev_priv);
        } else if (IS_BROADWELL(dev_priv)) {
                gen8_enable_rps(dev_priv);
-               gen6_update_ring_freq(dev_priv);
        } else if (INTEL_GEN(dev_priv) >= 6) {
                gen6_enable_rps(dev_priv);
-               gen6_update_ring_freq(dev_priv);
        } else if (IS_IRONLAKE_M(dev_priv)) {
                ironlake_enable_drps(dev_priv);
                intel_init_emon(dev_priv);
        }
 
-       WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
-       WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+       WARN_ON(rps->max_freq < rps->min_freq);
+       WARN_ON(rps->idle_freq > rps->max_freq);
+
+       WARN_ON(rps->efficient_freq < rps->min_freq);
+       WARN_ON(rps->efficient_freq > rps->max_freq);
+
+       rps->enabled = true;
+}
+
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+{
+       /* Powersaving is controlled by the host when inside a VM */
+       if (intel_vgpu_active(dev_priv))
+               return;
+
+       mutex_lock(&dev_priv->pcu_lock);
 
-       WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
-       WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+       intel_enable_rc6(dev_priv);
+       intel_enable_rps(dev_priv);
+       if (HAS_LLC(dev_priv))
+               intel_enable_llc_pstate(dev_priv);
 
-       dev_priv->rps.enabled = true;
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 static void __intel_autoenable_gt_powersave(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+               container_of(work,
+                            typeof(*dev_priv),
+                            gt_pm.autoenable_work.work);
        struct intel_engine_cs *rcs;
        struct drm_i915_gem_request *req;
 
-       if (READ_ONCE(dev_priv->rps.enabled))
-               goto out;
-
        rcs = dev_priv->engine[RCS];
        if (rcs->last_retired_context)
                goto out;
@@ -8018,9 +8153,6 @@ out:
 
 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       if (READ_ONCE(dev_priv->rps.enabled))
-               return;
-
        if (IS_IRONLAKE_M(dev_priv)) {
                ironlake_enable_drps(dev_priv);
                intel_init_emon(dev_priv);
@@ -8038,7 +8170,7 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
                 * runtime resume it's necessary).
                 */
                if (queue_delayed_work(dev_priv->wq,
-                                      &dev_priv->rps.autoenable_work,
+                                      &dev_priv->gt_pm.autoenable_work,
                                       round_jiffies_up_relative(HZ)))
                        intel_runtime_pm_get_noresume(dev_priv);
        }
@@ -8447,6 +8579,9 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+       /* The GTT cache must be disabled if the system is using 2M pages. */
+       bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
+                                                I915_GTT_PAGE_SIZE_2M);
        enum pipe pipe;
 
        ilk_init_lp_watermarks(dev_priv);
@@ -8481,12 +8616,8 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
        /* WaProgramL3SqcReg1Default:bdw */
        gen8_set_l3sqc_credits(dev_priv, 30, 2);
 
-       /*
-        * WaGttCachingOffByDefault:bdw
-        * GTT cache may not work with big pages, so if those
-        * are ever enabled GTT cache may need to be disabled.
-        */
-       I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+       /* WaGttCachingOffByDefault:bdw */
+       I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
 
        /* WaKVMNotificationOnConfigChange:bdw */
        I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
@@ -9067,7 +9198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
 {
        int status;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
        /* GEN6_PCODE_* are outside of the forcewake domain, we can
         * use te fw I915_READ variants to reduce the amount of work
@@ -9114,7 +9245,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
 {
        int status;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
        /* GEN6_PCODE_* are outside of the forcewake domain, we can
         * use te fw I915_READ variants to reduce the amount of work
@@ -9191,7 +9322,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
        u32 status;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
                                   &status)
@@ -9233,31 +9364,39 @@ out:
 
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /*
         * N = val - 0xb7
         * Slow = Fast = GPLL ref * N
         */
-       return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
+       return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
 }
 
 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+       return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
 }
 
 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /*
         * N = val / 2
         * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
         */
-       return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
+       return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
 }
 
 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
        /* CHV needs even values */
-       return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
+       return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
 }
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -9288,14 +9427,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 
 void intel_pm_setup(struct drm_i915_private *dev_priv)
 {
-       mutex_init(&dev_priv->rps.hw_lock);
+       mutex_init(&dev_priv->pcu_lock);
 
-       INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+       INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
                          __intel_autoenable_gt_powersave);
-       atomic_set(&dev_priv->rps.num_waiters, 0);
+       atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
 
-       dev_priv->pm.suspended = false;
-       atomic_set(&dev_priv->pm.wakeref_count, 0);
+       dev_priv->runtime_pm.suspended = false;
+       atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
 }
 
 static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
@@ -9348,7 +9487,7 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
 {
        u64 time_hw, units, div;
 
-       if (!intel_enable_rc6())
+       if (!intel_rc6_enabled())
                return 0;
 
        intel_runtime_pm_get(dev_priv);
index 05c08b0bc172fea7fcdaf02cbd909097345c8f25..4285f09ff8b843793fd521798ac180d1407db137 100644 (file)
@@ -579,7 +579,16 @@ out:
 static void reset_ring_common(struct intel_engine_cs *engine,
                              struct drm_i915_gem_request *request)
 {
-       /* Try to restore the logical GPU state to match the continuation
+       /*
+        * RC6 must be prevented until the reset is complete and the engine
+        * reinitialised. If it occurs in the middle of this sequence, the
+        * state written to/loaded from the power context is ill-defined (e.g.
+        * the PP_BASE_DIR may be lost).
+        */
+       assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+
+       /*
+        * Try to restore the logical GPU state to match the continuation
         * of the request queue. If we skip the context/PD restore, then
         * the next request may try to execute assuming that its context
         * is valid and loaded on the GPU and so may try to access invalid
index 56d7ae9f298b978b240224af3e6786ed1b1e4261..17186f067408587543e8f7955e00f4d86c51cb72 100644 (file)
@@ -7,6 +7,8 @@
 #include "i915_gem_timeline.h"
 #include "i915_selftest.h"
 
+struct drm_printer;
+
 #define I915_CMD_HASH_ORDER 9
 
 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
@@ -238,6 +240,11 @@ struct intel_engine_execlists {
 #define EXECLIST_MAX_PORTS 2
        } port[EXECLIST_MAX_PORTS];
 
+       /**
+        * @preempt: are we currently handling a preempting context switch?
+        */
+       bool preempt;
+
        /**
         * @port_mask: number of execlist ports - 1
         */
@@ -834,4 +841,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915);
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
 
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
index 7933d1bc6a1c57f1fc613946ed9972f50bce06bf..8af286c63d3b6e9bd8e55b78f6de2fd814eaaf06 100644 (file)
@@ -187,7 +187,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
        struct i915_power_well *power_well;
        bool is_enabled;
 
-       if (dev_priv->pm.suspended)
+       if (dev_priv->runtime_pm.suspended)
                return false;
 
        is_enabled = true;
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 {
        enum i915_power_well_id id = power_well->id;
        bool wait_fuses = power_well->hsw.has_fuses;
-       enum skl_power_gate pg;
+       enum skl_power_gate uninitialized_var(pg);
        u32 val;
 
        if (wait_fuses) {
@@ -785,7 +785,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
        state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
                         PUNIT_PWRGT_PWR_GATE(power_well_id);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
 #define COND \
        ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
@@ -806,7 +806,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
 #undef COND
 
 out:
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
@@ -833,7 +833,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
        mask = PUNIT_PWRGT_MASK(power_well_id);
        ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
        /*
@@ -852,7 +852,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
        ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
        WARN_ON(ctrl != state);
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        return enabled;
 }
@@ -1364,7 +1364,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
        bool enabled;
        u32 state, ctrl;
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
        state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
        /*
@@ -1381,7 +1381,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
        ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
        WARN_ON(ctrl << 16 != state);
 
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 
        return enabled;
 }
@@ -1396,7 +1396,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 
        state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
 
-       mutex_lock(&dev_priv->rps.hw_lock);
+       mutex_lock(&dev_priv->pcu_lock);
 
 #define COND \
        ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
@@ -1417,7 +1417,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 #undef COND
 
 out:
-       mutex_unlock(&dev_priv->rps.hw_lock);
+       mutex_unlock(&dev_priv->pcu_lock);
 }
 
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -2809,6 +2809,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
 
        /* 6. Enable DBUF */
        gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
+               intel_csr_load_program(dev_priv);
 }
 
 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -3125,7 +3128,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
        ret = pm_runtime_get_sync(kdev);
        WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 
-       atomic_inc(&dev_priv->pm.wakeref_count);
+       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
        assert_rpm_wakelock_held(dev_priv);
 }
 
@@ -3159,7 +3162,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
                        return false;
        }
 
-       atomic_inc(&dev_priv->pm.wakeref_count);
+       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
        assert_rpm_wakelock_held(dev_priv);
 
        return true;
@@ -3190,7 +3193,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
        assert_rpm_wakelock_held(dev_priv);
        pm_runtime_get_noresume(kdev);
 
-       atomic_inc(&dev_priv->pm.wakeref_count);
+       atomic_inc(&dev_priv->runtime_pm.wakeref_count);
 }
 
 /**
@@ -3207,7 +3210,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
        struct device *kdev = &pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
-       atomic_dec(&dev_priv->pm.wakeref_count);
+       atomic_dec(&dev_priv->runtime_pm.wakeref_count);
 
        pm_runtime_mark_last_busy(kdev);
        pm_runtime_put_autosuspend(kdev);
index 7d971cb5611683bc8a7927e6497b4da402bfb176..75c872bb8cc9d3fe0d9a0b19d1240035ff06fa4e 100644 (file)
@@ -81,7 +81,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
 {
        u32 val = 0;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
        mutex_lock(&dev_priv->sb_lock);
        vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
@@ -95,7 +95,7 @@ int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
 {
        int err;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
        mutex_lock(&dev_priv->sb_lock);
        err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
@@ -125,7 +125,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
 {
        u32 val = 0;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
 
        mutex_lock(&dev_priv->sb_lock);
        vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
index 28a1209d87e2761203bef48dbbaa6fa298cbc277..86fc9b529f2d20e0cc3b33ccd34d76a8cf5f31ee 100644 (file)
@@ -66,7 +66,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
                            1000 * adjusted_mode->crtc_htotal);
 }
 
+/* FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio. */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
 #define VBLANK_EVASION_TIME_US 100
+#endif
 
 /**
  * intel_pipe_update_start() - start update of a set of display registers
index 277477890240ca7490ae9c00267a9e1ccca1632b..7b938e822fde2f8ade41ab34208faa77dc9d3a09 100644 (file)
  *
  */
 
-#include "i915_drv.h"
 #include "intel_uc.h"
-#include <linux/firmware.h>
-
-/* Cleans up uC firmware by releasing the firmware GEM obj.
- */
-static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
-{
-       struct drm_i915_gem_object *obj;
-
-       obj = fetch_and_zero(&uc_fw->obj);
-       if (obj)
-               i915_gem_object_put(obj);
-
-       uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
+#include "i915_drv.h"
+#include "i915_guc_submission.h"
 
 /* Reset GuC providing us with fresh state for both GuC and HuC.
  */
@@ -94,198 +81,34 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
                i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
 }
 
-static void gen8_guc_raise_irq(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
-       I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
-}
-
 void intel_uc_init_early(struct drm_i915_private *dev_priv)
 {
-       struct intel_guc *guc = &dev_priv->guc;
-
-       intel_guc_ct_init_early(&guc->ct);
-
-       mutex_init(&guc->send_mutex);
-       guc->send = intel_guc_send_nop;
-       guc->notify = gen8_guc_raise_irq;
-}
-
-static void fetch_uc_fw(struct drm_i915_private *dev_priv,
-                       struct intel_uc_fw *uc_fw)
-{
-       struct pci_dev *pdev = dev_priv->drm.pdev;
-       struct drm_i915_gem_object *obj;
-       const struct firmware *fw = NULL;
-       struct uc_css_header *css;
-       size_t size;
-       int err;
-
-       if (!uc_fw->path)
-               return;
-
-       uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
-       DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
-                        intel_uc_fw_status_repr(uc_fw->fetch_status));
-
-       err = request_firmware(&fw, uc_fw->path, &pdev->dev);
-       if (err)
-               goto fail;
-       if (!fw)
-               goto fail;
-
-       DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
-                        uc_fw->path, fw);
-
-       /* Check the size of the blob before examining buffer contents */
-       if (fw->size < sizeof(struct uc_css_header)) {
-               DRM_NOTE("Firmware header is missing\n");
-               goto fail;
-       }
-
-       css = (struct uc_css_header *)fw->data;
-
-       /* Firmware bits always start from header */
-       uc_fw->header_offset = 0;
-       uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
-                             css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
-       if (uc_fw->header_size != sizeof(struct uc_css_header)) {
-               DRM_NOTE("CSS header definition mismatch\n");
-               goto fail;
-       }
-
-       /* then, uCode */
-       uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
-       uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
-       /* now RSA */
-       if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-               DRM_NOTE("RSA key size is bad\n");
-               goto fail;
-       }
-       uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
-       uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
-       /* At least, it should have header, uCode and RSA. Size of all three. */
-       size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
-       if (fw->size < size) {
-               DRM_NOTE("Missing firmware components\n");
-               goto fail;
-       }
-
-       /*
-        * The GuC firmware image has the version number embedded at a
-        * well-known offset within the firmware blob; note that major / minor
-        * version are TWO bytes each (i.e. u16), although all pointers and
-        * offsets are defined in terms of bytes (u8).
-        */
-       switch (uc_fw->type) {
-       case INTEL_UC_FW_TYPE_GUC:
-               /* Header and uCode will be loaded to WOPCM. Size of the two. */
-               size = uc_fw->header_size + uc_fw->ucode_size;
-
-               /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
-               if (size > intel_guc_wopcm_size(dev_priv)) {
-                       DRM_ERROR("Firmware is too large to fit in WOPCM\n");
-                       goto fail;
-               }
-               uc_fw->major_ver_found = css->guc.sw_version >> 16;
-               uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
-               break;
-
-       case INTEL_UC_FW_TYPE_HUC:
-               uc_fw->major_ver_found = css->huc.sw_version >> 16;
-               uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
-               break;
-
-       default:
-               DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
-               err = -ENOEXEC;
-               goto fail;
-       }
-
-       if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
-               DRM_NOTE("Skipping %s firmware version check\n",
-                        intel_uc_fw_type_repr(uc_fw->type));
-       } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
-                  uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
-               DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
-                        intel_uc_fw_type_repr(uc_fw->type),
-                        uc_fw->major_ver_found, uc_fw->minor_ver_found,
-                        uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-               err = -ENOEXEC;
-               goto fail;
-       }
-
-       DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
-                        uc_fw->major_ver_found, uc_fw->minor_ver_found,
-                        uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
-       obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
-       if (IS_ERR(obj)) {
-               err = PTR_ERR(obj);
-               goto fail;
-       }
-
-       uc_fw->obj = obj;
-       uc_fw->size = fw->size;
-
-       DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
-                        uc_fw->obj);
-
-       release_firmware(fw);
-       uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
-       return;
-
-fail:
-       DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
-                uc_fw->path, err);
-       DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
-                        err, fw, uc_fw->obj);
-
-       release_firmware(fw);           /* OK even if fw is NULL */
-       uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+       intel_guc_init_early(&dev_priv->guc);
 }
 
 void intel_uc_init_fw(struct drm_i915_private *dev_priv)
 {
-       fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
-       fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
+       intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+       intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
 }
 
 void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
 {
-       __intel_uc_fw_fini(&dev_priv->guc.fw);
-       __intel_uc_fw_fini(&dev_priv->huc.fw);
-}
-
-static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
-{
-       GEM_BUG_ON(!guc->send_regs.base);
-       GEM_BUG_ON(!guc->send_regs.count);
-       GEM_BUG_ON(i >= guc->send_regs.count);
-
-       return _MMIO(guc->send_regs.base + 4 * i);
+       intel_uc_fw_fini(&dev_priv->guc.fw);
+       intel_uc_fw_fini(&dev_priv->huc.fw);
 }
 
-static void guc_init_send_regs(struct intel_guc *guc)
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ *
+ * @dev_priv: device private
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       enum forcewake_domains fw_domains = 0;
-       unsigned int i;
-
-       guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
-       guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
-
-       for (i = 0; i < guc->send_regs.count; i++) {
-               fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                       guc_send_reg(guc, i),
-                                       FW_REG_READ | FW_REG_WRITE);
-       }
-       guc->send_regs.fw_domains = fw_domains;
+       intel_guc_init_send_regs(&dev_priv->guc);
 }
 
 static void guc_capture_load_err_log(struct intel_guc *guc)
@@ -309,8 +132,6 @@ static int guc_enable_communication(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-       guc_init_send_regs(guc);
-
        if (HAS_GUC_CT(dev_priv))
                return intel_guc_enable_ct(guc);
 
@@ -328,27 +149,6 @@ static void guc_disable_communication(struct intel_guc *guc)
        guc->send = intel_guc_send_nop;
 }
 
-/**
- * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
- * @guc: intel_guc structure
- * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
- *
- * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
- * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
- * intel_huc_auth().
- *
- * Return:     non-zero code on error
- */
-int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
-{
-       u32 action[] = {
-               INTEL_GUC_ACTION_AUTHENTICATE_HUC,
-               rsa_offset
-       };
-
-       return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
 int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
@@ -480,82 +280,3 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
 
        i915_ggtt_disable_guc(dev_priv);
 }
-
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
-{
-       WARN(1, "Unexpected send: action=%#x\n", *action);
-       return -ENODEV;
-}
-
-/*
- * This function implements the MMIO based host to GuC interface.
- */
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       u32 status;
-       int i;
-       int ret;
-
-       GEM_BUG_ON(!len);
-       GEM_BUG_ON(len > guc->send_regs.count);
-
-       /* If CT is available, we expect to use MMIO only during init/fini */
-       GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
-               *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
-               *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
-
-       mutex_lock(&guc->send_mutex);
-       intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
-
-       for (i = 0; i < len; i++)
-               I915_WRITE(guc_send_reg(guc, i), action[i]);
-
-       POSTING_READ(guc_send_reg(guc, i - 1));
-
-       intel_guc_notify(guc);
-
-       /*
-        * No GuC command should ever take longer than 10ms.
-        * Fast commands should still complete in 10us.
-        */
-       ret = __intel_wait_for_register_fw(dev_priv,
-                                          guc_send_reg(guc, 0),
-                                          INTEL_GUC_RECV_MASK,
-                                          INTEL_GUC_RECV_MASK,
-                                          10, 10, &status);
-       if (status != INTEL_GUC_STATUS_SUCCESS) {
-               /*
-                * Either the GuC explicitly returned an error (which
-                * we convert to -EIO here) or no response at all was
-                * received within the timeout limit (-ETIMEDOUT)
-                */
-               if (ret != -ETIMEDOUT)
-                       ret = -EIO;
-
-               DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
-                        " ret=%d status=0x%08X response=0x%08X\n",
-                        action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
-       }
-
-       intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
-       mutex_unlock(&guc->send_mutex);
-
-       return ret;
-}
-
-int intel_guc_sample_forcewake(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       u32 action[2];
-
-       action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
-       /* WaRsDisableCoarsePowerGating:skl,bxt */
-       if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
-               action[1] = 0;
-       else
-               /* bit 0 and 1 are for Render and Media domain separately */
-               action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
-
-       return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
index 6966349ed73732f1619b65058c25a0772c29f2fe..e18d3bb020887430bbe66960a0a9144be7cea26f 100644 (file)
 #ifndef _INTEL_UC_H_
 #define _INTEL_UC_H_
 
-#include "intel_guc_fwif.h"
-#include "i915_guc_reg.h"
-#include "intel_ringbuffer.h"
-#include "intel_guc_ct.h"
-#include "i915_vma.h"
+#include "intel_guc.h"
+#include "intel_huc.h"
 
-struct drm_i915_gem_request;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent  pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- */
-struct i915_guc_client {
-       struct i915_vma *vma;
-       void *vaddr;
-       struct i915_gem_context *owner;
-       struct intel_guc *guc;
-
-       uint32_t engines;               /* bitmap of (host) engine ids  */
-       uint32_t priority;
-       u32 stage_id;
-       uint32_t proc_desc_offset;
-
-       u16 doorbell_id;
-       unsigned long doorbell_offset;
-
-       spinlock_t wq_lock;
-       /* Per-engine counts of GuC submissions */
-       uint64_t submissions[I915_NUM_ENGINES];
-};
-
-enum intel_uc_fw_status {
-       INTEL_UC_FIRMWARE_FAIL = -1,
-       INTEL_UC_FIRMWARE_NONE = 0,
-       INTEL_UC_FIRMWARE_PENDING,
-       INTEL_UC_FIRMWARE_SUCCESS
-};
-
-/* User-friendly representation of an enum */
-static inline
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
-       switch (status) {
-       case INTEL_UC_FIRMWARE_FAIL:
-               return "FAIL";
-       case INTEL_UC_FIRMWARE_NONE:
-               return "NONE";
-       case INTEL_UC_FIRMWARE_PENDING:
-               return "PENDING";
-       case INTEL_UC_FIRMWARE_SUCCESS:
-               return "SUCCESS";
-       }
-       return "<invalid>";
-}
-
-enum intel_uc_fw_type {
-       INTEL_UC_FW_TYPE_GUC,
-       INTEL_UC_FW_TYPE_HUC
-};
-
-/* User-friendly representation of an enum */
-static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
-{
-       switch (type) {
-       case INTEL_UC_FW_TYPE_GUC:
-               return "GuC";
-       case INTEL_UC_FW_TYPE_HUC:
-               return "HuC";
-       }
-       return "uC";
-}
-
-/*
- * This structure encapsulates all the data needed during the process
- * of fetching, caching, and loading the firmware image into the GuC.
- */
-struct intel_uc_fw {
-       const char *path;
-       size_t size;
-       struct drm_i915_gem_object *obj;
-       enum intel_uc_fw_status fetch_status;
-       enum intel_uc_fw_status load_status;
-
-       uint16_t major_ver_wanted;
-       uint16_t minor_ver_wanted;
-       uint16_t major_ver_found;
-       uint16_t minor_ver_found;
-
-       enum intel_uc_fw_type type;
-       uint32_t header_size;
-       uint32_t header_offset;
-       uint32_t rsa_size;
-       uint32_t rsa_offset;
-       uint32_t ucode_size;
-       uint32_t ucode_offset;
-};
-
-struct intel_guc_log {
-       uint32_t flags;
-       struct i915_vma *vma;
-       /* The runtime stuff gets created only when GuC logging gets enabled */
-       struct {
-               void *buf_addr;
-               struct workqueue_struct *flush_wq;
-               struct work_struct flush_work;
-               struct rchan *relay_chan;
-       } runtime;
-       /* logging related stats */
-       u32 capture_miss_count;
-       u32 flush_interrupt_count;
-       u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
-       u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
-       u32 flush_count[GUC_MAX_LOG_BUFFER];
-};
-
-struct intel_guc {
-       struct intel_uc_fw fw;
-       struct intel_guc_log log;
-       struct intel_guc_ct ct;
-
-       /* Log snapshot if GuC errors during load */
-       struct drm_i915_gem_object *load_err_log;
-
-       /* intel_guc_recv interrupt related state */
-       bool interrupts_enabled;
-
-       struct i915_vma *ads_vma;
-       struct i915_vma *stage_desc_pool;
-       void *stage_desc_pool_vaddr;
-       struct ida stage_ids;
-
-       struct i915_guc_client *execbuf_client;
-
-       DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
-       uint32_t db_cacheline;          /* Cyclic counter mod pagesize  */
-
-       /* GuC's FW specific registers used in MMIO send */
-       struct {
-               u32 base;
-               unsigned int count;
-               enum forcewake_domains fw_domains;
-       } send_regs;
-
-       /* To serialize the intel_guc_send actions */
-       struct mutex send_mutex;
-
-       /* GuC's FW specific send function */
-       int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
-
-       /* GuC's FW specific notify function */
-       void (*notify)(struct intel_guc *guc);
-};
-
-struct intel_huc {
-       /* Generic uC firmware management */
-       struct intel_uc_fw fw;
-
-       /* HuC-specific additions */
-};
-
-/* intel_uc.c */
 void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
 void intel_uc_init_fw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
 int intel_uc_init_hw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_guc_sample_forcewake(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
-int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
-
-static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
-{
-       return guc->send(guc, action, len);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
-       guc->notify(guc);
-}
-
-/* intel_guc_loader.c */
-int intel_guc_select_fw(struct intel_guc *guc);
-int intel_guc_init_hw(struct intel_guc *guc);
-int intel_guc_suspend(struct drm_i915_private *dev_priv);
-int intel_guc_resume(struct drm_i915_private *dev_priv);
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
-
-/* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_i915_private *dev_priv);
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-
-/* intel_guc_log.c */
-int intel_guc_log_create(struct intel_guc *guc);
-void intel_guc_log_destroy(struct intel_guc *guc);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
-void i915_guc_log_register(struct drm_i915_private *dev_priv);
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
-
-static inline u32 guc_ggtt_offset(struct i915_vma *vma)
-{
-       u32 offset = i915_ggtt_offset(vma);
-       GEM_BUG_ON(offset < GUC_WOPCM_TOP);
-       GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
-       return offset;
-}
-
-/* intel_huc.c */
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_huc_auth(struct intel_huc *huc);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
new file mode 100644 (file)
index 0000000..766b1cb
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * Copyright Â© 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "intel_uc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ *
+ * @dev_priv: device private
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ */
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+                      struct intel_uc_fw *uc_fw)
+{
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct drm_i915_gem_object *obj;
+       const struct firmware *fw = NULL;
+       struct uc_css_header *css;
+       size_t size;
+       int err;
+
+       if (!uc_fw->path)
+               return;
+
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+       DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+                        intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+       err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+       if (err)
+               goto fail;
+       if (!fw)
+               goto fail;
+
+       DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+                        uc_fw->path, fw);
+
+       /* Check the size of the blob before examining buffer contents */
+       if (fw->size < sizeof(struct uc_css_header)) {
+               DRM_NOTE("Firmware header is missing\n");
+               goto fail;
+       }
+
+       css = (struct uc_css_header *)fw->data;
+
+       /* Firmware bits always start from header */
+       uc_fw->header_offset = 0;
+       uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+                             css->key_size_dw - css->exponent_size_dw) *
+                            sizeof(u32);
+
+       if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+               DRM_NOTE("CSS header definition mismatch\n");
+               goto fail;
+       }
+
+       /* then, uCode */
+       uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+       uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+       /* now RSA */
+       if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+               DRM_NOTE("RSA key size is bad\n");
+               goto fail;
+       }
+       uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+       uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+       /* At least, it should have header, uCode and RSA. Size of all three. */
+       size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+       if (fw->size < size) {
+               DRM_NOTE("Missing firmware components\n");
+               goto fail;
+       }
+
+       /*
+        * The GuC firmware image has the version number embedded at a
+        * well-known offset within the firmware blob; note that major / minor
+        * version are TWO bytes each (i.e. u16), although all pointers and
+        * offsets are defined in terms of bytes (u8).
+        */
+       switch (uc_fw->type) {
+       case INTEL_UC_FW_TYPE_GUC:
+               /* Header and uCode will be loaded to WOPCM. Size of the two. */
+               size = uc_fw->header_size + uc_fw->ucode_size;
+
+               /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+               if (size > intel_guc_wopcm_size(dev_priv)) {
+                       DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+                       goto fail;
+               }
+               uc_fw->major_ver_found = css->guc.sw_version >> 16;
+               uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+               break;
+
+       case INTEL_UC_FW_TYPE_HUC:
+               uc_fw->major_ver_found = css->huc.sw_version >> 16;
+               uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+               break;
+
+       default:
+               DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
+               err = -ENOEXEC;
+               goto fail;
+       }
+
+       if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+               DRM_NOTE("Skipping %s firmware version check\n",
+                        intel_uc_fw_type_repr(uc_fw->type));
+       } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+                  uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+               DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
+                        intel_uc_fw_type_repr(uc_fw->type),
+                        uc_fw->major_ver_found, uc_fw->minor_ver_found,
+                        uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+               err = -ENOEXEC;
+               goto fail;
+       }
+
+       DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+                        uc_fw->major_ver_found, uc_fw->minor_ver_found,
+                        uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+       obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto fail;
+       }
+
+       uc_fw->obj = obj;
+       uc_fw->size = fw->size;
+
+       DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+                        uc_fw->obj);
+
+       release_firmware(fw);
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+       return;
+
+fail:
+       DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+                uc_fw->path, err);
+       DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+                        err, fw, uc_fw->obj);
+
+       release_firmware(fw);           /* OK even if fw is NULL */
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+}
+
+/**
+ * intel_uc_fw_fini - cleanup uC firmware
+ *
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+       struct drm_i915_gem_object *obj;
+
+       obj = fetch_and_zero(&uc_fw->obj);
+       if (obj)
+               i915_gem_object_put(obj);
+
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
new file mode 100644 (file)
index 0000000..c3e9af4
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+struct drm_i915_private;
+
+enum intel_uc_fw_status {
+       INTEL_UC_FIRMWARE_FAIL = -1,
+       INTEL_UC_FIRMWARE_NONE = 0,
+       INTEL_UC_FIRMWARE_PENDING,
+       INTEL_UC_FIRMWARE_SUCCESS
+};
+
+enum intel_uc_fw_type {
+       INTEL_UC_FW_TYPE_GUC,
+       INTEL_UC_FW_TYPE_HUC
+};
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the uC.
+ */
+struct intel_uc_fw {
+       const char *path;
+       size_t size;
+       struct drm_i915_gem_object *obj;
+       enum intel_uc_fw_status fetch_status;
+       enum intel_uc_fw_status load_status;
+
+       u16 major_ver_wanted;
+       u16 minor_ver_wanted;
+       u16 major_ver_found;
+       u16 minor_ver_found;
+
+       enum intel_uc_fw_type type;
+       u32 header_size;
+       u32 header_offset;
+       u32 rsa_size;
+       u32 rsa_offset;
+       u32 ucode_size;
+       u32 ucode_offset;
+};
+
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+       switch (status) {
+       case INTEL_UC_FIRMWARE_FAIL:
+               return "FAIL";
+       case INTEL_UC_FIRMWARE_NONE:
+               return "NONE";
+       case INTEL_UC_FIRMWARE_PENDING:
+               return "PENDING";
+       case INTEL_UC_FIRMWARE_SUCCESS:
+               return "SUCCESS";
+       }
+       return "<invalid>";
+}
+
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+       switch (type) {
+       case INTEL_UC_FW_TYPE_GUC:
+               return "GuC";
+       case INTEL_UC_FW_TYPE_HUC:
+               return "HuC";
+       }
+       return "uC";
+}
+
+static inline
+void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
+{
+       uc_fw->path = NULL;
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+       uc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
+       uc_fw->type = type;
+}
+
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+                      struct intel_uc_fw *uc_fw);
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+
+#endif
index b3c3f94fc7e46ae196c89522036fcbd27058eded..983617b5b338d3da3a9ae1a69a812a6de4068a36 100644 (file)
@@ -626,7 +626,23 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
 
-       WARN_ON(dev_priv->uncore.fw_domains_active);
+       WARN(dev_priv->uncore.fw_domains_active,
+            "Expected all fw_domains to be inactive, but %08x are still on\n",
+            dev_priv->uncore.fw_domains_active);
+}
+
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+                             enum forcewake_domains fw_domains)
+{
+       if (!dev_priv->uncore.funcs.force_wake_get)
+               return;
+
+       assert_rpm_wakelock_held(dev_priv);
+
+       fw_domains &= dev_priv->uncore.fw_domains;
+       WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+            "Expected %08x fw_domains to be active, but %08x are off\n",
+            fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
 }
 
 /* We give fast paths for the really cool registers */
index 03786f931905d02aca825780094120d49b7a24ac..582771251b57a28122f98a8092bfd1a3211c0590 100644 (file)
 #ifndef __INTEL_UNCORE_H__
 #define __INTEL_UNCORE_H__
 
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/hrtimer.h>
+
+#include "i915_reg.h"
+
 struct drm_i915_private;
 
 enum forcewake_domain_id {
@@ -131,6 +137,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+                             enum forcewake_domains fw_domains);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 
 enum forcewake_domains
index c5c7e8efbdd3420a0da450044993e04d9a4f834b..a2632df3917353b246a1316f3322811476c24418 100644 (file)
@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
        kfree(pages);
 }
 
-static struct sg_table *
-huge_get_pages(struct drm_i915_gem_object *obj)
+static int huge_get_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
        const unsigned long nreal = obj->scratch / PAGE_SIZE;
@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj)
 
        pages = kmalloc(sizeof(*pages), GFP);
        if (!pages)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        if (sg_alloc_table(pages, npages, GFP)) {
                kfree(pages);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        sg = pages->sgl;
@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj)
        if (i915_gem_gtt_prepare_pages(obj, pages))
                goto err;
 
-       return pages;
+       __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
+
+       return 0;
 
 err:
        huge_free_pages(obj, pages);
-       return ERR_PTR(-ENOMEM);
+
+       return -ENOMEM;
 #undef GFP
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
new file mode 100644 (file)
index 0000000..c53f847
--- /dev/null
@@ -0,0 +1,1734 @@
+/*
+ * Copyright Â© 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include <linux/prime_numbers.h>
+
+#include "mock_drm.h"
+
+static const unsigned int page_sizes[] = {
+       I915_GTT_PAGE_SIZE_2M,
+       I915_GTT_PAGE_SIZE_64K,
+       I915_GTT_PAGE_SIZE_4K,
+};
+
+static unsigned int get_largest_page_size(struct drm_i915_private *i915,
+                                         u64 rem)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+               unsigned int page_size = page_sizes[i];
+
+               if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
+                       return page_size;
+       }
+
+       return 0;
+}
+
+static void huge_pages_free_pages(struct sg_table *st)
+{
+       struct scatterlist *sg;
+
+       for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+               if (sg_page(sg))
+                       __free_pages(sg_page(sg), get_order(sg->length));
+       }
+
+       sg_free_table(st);
+       kfree(st);
+}
+
+static int get_huge_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+       unsigned int page_mask = obj->mm.page_mask;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       unsigned int sg_page_sizes;
+       u64 rem;
+
+       st = kmalloc(sizeof(*st), GFP);
+       if (!st)
+               return -ENOMEM;
+
+       if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       rem = obj->base.size;
+       sg = st->sgl;
+       st->nents = 0;
+       sg_page_sizes = 0;
+
+       /*
+        * Our goal here is simple, we want to greedily fill the object from
+        * largest to smallest page-size, while ensuring that we use *every*
+        * page-size as per the given page-mask.
+        */
+       do {
+               unsigned int bit = ilog2(page_mask);
+               unsigned int page_size = BIT(bit);
+               int order = get_order(page_size);
+
+               do {
+                       struct page *page;
+
+                       GEM_BUG_ON(order >= MAX_ORDER);
+                       page = alloc_pages(GFP | __GFP_ZERO, order);
+                       if (!page)
+                               goto err;
+
+                       sg_set_page(sg, page, page_size, 0);
+                       sg_page_sizes |= page_size;
+                       st->nents++;
+
+                       rem -= page_size;
+                       if (!rem) {
+                               sg_mark_end(sg);
+                               break;
+                       }
+
+                       sg = __sg_next(sg);
+               } while ((rem - ((page_size-1) & page_mask)) >= page_size);
+
+               page_mask &= (page_size-1);
+       } while (page_mask);
+
+       if (i915_gem_gtt_prepare_pages(obj, st))
+               goto err;
+
+       obj->mm.madv = I915_MADV_DONTNEED;
+
+       GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+       return 0;
+
+err:
+       sg_set_page(sg, NULL, 0, 0);
+       sg_mark_end(sg);
+       huge_pages_free_pages(st);
+
+       return -ENOMEM;
+}
+
+static void put_huge_pages(struct drm_i915_gem_object *obj,
+                          struct sg_table *pages)
+{
+       i915_gem_gtt_finish_pages(obj, pages);
+       huge_pages_free_pages(pages);
+
+       obj->mm.dirty = false;
+       obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops huge_page_ops = {
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+                I915_GEM_OBJECT_IS_SHRINKABLE,
+       .get_pages = get_huge_pages,
+       .put_pages = put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+huge_pages_object(struct drm_i915_private *i915,
+                 u64 size,
+                 unsigned int page_mask)
+{
+       struct drm_i915_gem_object *obj;
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
+
+       if (size >> PAGE_SHIFT > INT_MAX)
+               return ERR_PTR(-E2BIG);
+
+       if (overflows_type(size, obj->base.size))
+               return ERR_PTR(-E2BIG);
+
+       obj = i915_gem_object_alloc(i915);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       drm_gem_private_object_init(&i915->drm, &obj->base, size);
+       i915_gem_object_init(obj, &huge_page_ops);
+
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->cache_level = I915_CACHE_NONE;
+
+       obj->mm.page_mask = page_mask;
+
+       return obj;
+}
+
+static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       const u64 max_len = rounddown_pow_of_two(UINT_MAX);
+       struct sg_table *st;
+       struct scatterlist *sg;
+       unsigned int sg_page_sizes;
+       u64 rem;
+
+       st = kmalloc(sizeof(*st), GFP);
+       if (!st)
+               return -ENOMEM;
+
+       if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       /* Use optimal page sized chunks to fill in the sg table */
+       rem = obj->base.size;
+       sg = st->sgl;
+       st->nents = 0;
+       sg_page_sizes = 0;
+       do {
+               unsigned int page_size = get_largest_page_size(i915, rem);
+               unsigned int len = min(page_size * div_u64(rem, page_size),
+                                      max_len);
+
+               GEM_BUG_ON(!page_size);
+
+               sg->offset = 0;
+               sg->length = len;
+               sg_dma_len(sg) = len;
+               sg_dma_address(sg) = page_size;
+
+               sg_page_sizes |= len;
+
+               st->nents++;
+
+               rem -= len;
+               if (!rem) {
+                       sg_mark_end(sg);
+                       break;
+               }
+
+               sg = sg_next(sg);
+       } while (1);
+
+       obj->mm.madv = I915_MADV_DONTNEED;
+
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+       return 0;
+}
+
+static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct sg_table *st;
+       struct scatterlist *sg;
+       unsigned int page_size;
+
+       st = kmalloc(sizeof(*st), GFP);
+       if (!st)
+               return -ENOMEM;
+
+       if (sg_alloc_table(st, 1, GFP)) {
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       sg = st->sgl;
+       st->nents = 1;
+
+       page_size = get_largest_page_size(i915, obj->base.size);
+       GEM_BUG_ON(!page_size);
+
+       sg->offset = 0;
+       sg->length = obj->base.size;
+       sg_dma_len(sg) = obj->base.size;
+       sg_dma_address(sg) = page_size;
+
+       obj->mm.madv = I915_MADV_DONTNEED;
+
+       __i915_gem_object_set_pages(obj, st, sg->length);
+
+       return 0;
+#undef GFP
+}
+
+static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
+                                struct sg_table *pages)
+{
+       sg_free_table(pages);
+       kfree(pages);
+}
+
+static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
+                               struct sg_table *pages)
+{
+       fake_free_huge_pages(obj, pages);
+       obj->mm.dirty = false;
+       obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+       .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+       .get_pages = fake_get_huge_pages,
+       .put_pages = fake_put_huge_pages,
+};
+
+static const struct drm_i915_gem_object_ops fake_ops_single = {
+       .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+       .get_pages = fake_get_huge_pages_single,
+       .put_pages = fake_put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
+{
+       struct drm_i915_gem_object *obj;
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+       if (size >> PAGE_SHIFT > UINT_MAX)
+               return ERR_PTR(-E2BIG);
+
+       if (overflows_type(size, obj->base.size))
+               return ERR_PTR(-E2BIG);
+
+       obj = i915_gem_object_alloc(i915);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       drm_gem_private_object_init(&i915->drm, &obj->base, size);
+
+       if (single)
+               i915_gem_object_init(obj, &fake_ops_single);
+       else
+               i915_gem_object_init(obj, &fake_ops);
+
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->cache_level = I915_CACHE_NONE;
+
+       return obj;
+}
+
+static int igt_check_page_sizes(struct i915_vma *vma)
+{
+       struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+       unsigned int supported = INTEL_INFO(i915)->page_sizes;
+       struct drm_i915_gem_object *obj = vma->obj;
+       int err = 0;
+
+       if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
+               pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
+                      vma->page_sizes.sg & ~supported, supported);
+               err = -EINVAL;
+       }
+
+       if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
+               pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
+                      vma->page_sizes.gtt & ~supported, supported);
+               err = -EINVAL;
+       }
+
+       if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
+               pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
+                      vma->page_sizes.phys, obj->mm.page_sizes.phys);
+               err = -EINVAL;
+       }
+
+       if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
+               pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
+                      vma->page_sizes.sg, obj->mm.page_sizes.sg);
+               err = -EINVAL;
+       }
+
+       if (obj->mm.page_sizes.gtt) {
+               pr_err("obj->page_sizes.gtt(%u) should never be set\n",
+                      obj->mm.page_sizes.gtt);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int igt_mock_exhaust_device_supported_pages(void *arg)
+{
+       struct i915_hw_ppgtt *ppgtt = arg;
+       struct drm_i915_private *i915 = ppgtt->base.i915;
+       unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int i, j, single;
+       int err;
+
+       /*
+        * Sanity check creating objects with every valid page support
+        * combination for our mock device.
+        */
+
+       for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
+               unsigned int combination = 0;
+
+               for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
+                       if (i & BIT(j))
+                               combination |= page_sizes[j];
+               }
+
+               mkwrite_device_info(i915)->page_sizes = combination;
+
+               for (single = 0; single <= 1; ++single) {
+                       obj = fake_huge_pages_object(i915, combination, !!single);
+                       if (IS_ERR(obj)) {
+                               err = PTR_ERR(obj);
+                               goto out_device;
+                       }
+
+                       if (obj->base.size != combination) {
+                               pr_err("obj->base.size=%zu, expected=%u\n",
+                                      obj->base.size, combination);
+                               err = -EINVAL;
+                               goto out_put;
+                       }
+
+                       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+                       if (IS_ERR(vma)) {
+                               err = PTR_ERR(vma);
+                               goto out_put;
+                       }
+
+                       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+                       if (err)
+                               goto out_close;
+
+                       err = igt_check_page_sizes(vma);
+
+                       if (vma->page_sizes.sg != combination) {
+                               pr_err("page_sizes.sg=%u, expected=%u\n",
+                                      vma->page_sizes.sg, combination);
+                               err = -EINVAL;
+                       }
+
+                       i915_vma_unpin(vma);
+                       i915_vma_close(vma);
+
+                       i915_gem_object_put(obj);
+
+                       if (err)
+                               goto out_device;
+               }
+       }
+
+       goto out_device;
+
+out_close:
+       i915_vma_close(vma);
+out_put:
+       i915_gem_object_put(obj);
+out_device:
+       mkwrite_device_info(i915)->page_sizes = saved_mask;
+
+       return err;
+}
+
+static int igt_mock_ppgtt_misaligned_dma(void *arg)
+{
+       struct i915_hw_ppgtt *ppgtt = arg;
+       struct drm_i915_private *i915 = ppgtt->base.i915;
+       unsigned long supported = INTEL_INFO(i915)->page_sizes;
+       struct drm_i915_gem_object *obj;
+       int bit;
+       int err;
+
+       /*
+        * Sanity check dma misalignment for huge pages -- the dma addresses we
+        * insert into the paging structures need to always respect the page
+        * size alignment.
+        */
+
+       bit = ilog2(I915_GTT_PAGE_SIZE_64K);
+
+       for_each_set_bit_from(bit, &supported,
+                             ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+               IGT_TIMEOUT(end_time);
+               unsigned int page_size = BIT(bit);
+               unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+               unsigned int offset;
+               unsigned int size =
+                       round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
+               struct i915_vma *vma;
+
+               obj = fake_huge_pages_object(i915, size, true);
+               if (IS_ERR(obj))
+                       return PTR_ERR(obj);
+
+               if (obj->base.size != size) {
+                       pr_err("obj->base.size=%zu, expected=%u\n",
+                              obj->base.size, size);
+                       err = -EINVAL;
+                       goto out_put;
+               }
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto out_put;
+
+               /* Force the page size for this object */
+               obj->mm.page_sizes.sg = page_size;
+
+               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       goto out_unpin;
+               }
+
+               err = i915_vma_pin(vma, 0, 0, flags);
+               if (err) {
+                       i915_vma_close(vma);
+                       goto out_unpin;
+               }
+
+
+               err = igt_check_page_sizes(vma);
+
+               if (vma->page_sizes.gtt != page_size) {
+                       pr_err("page_sizes.gtt=%u, expected %u\n",
+                              vma->page_sizes.gtt, page_size);
+                       err = -EINVAL;
+               }
+
+               i915_vma_unpin(vma);
+
+               if (err) {
+                       i915_vma_close(vma);
+                       goto out_unpin;
+               }
+
+               /*
+                * Try all the other valid offsets until the next
+                * boundary -- should always fall back to using 4K
+                * pages.
+                */
+               for (offset = 4096; offset < page_size; offset += 4096) {
+                       err = i915_vma_unbind(vma);
+                       if (err) {
+                               i915_vma_close(vma);
+                               goto out_unpin;
+                       }
+
+                       err = i915_vma_pin(vma, 0, 0, flags | offset);
+                       if (err) {
+                               i915_vma_close(vma);
+                               goto out_unpin;
+                       }
+
+                       err = igt_check_page_sizes(vma);
+
+                       if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
+                               pr_err("page_sizes.gtt=%u, expected %lu\n",
+                                      vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
+                               err = -EINVAL;
+                       }
+
+                       i915_vma_unpin(vma);
+
+                       if (err) {
+                               i915_vma_close(vma);
+                               goto out_unpin;
+                       }
+
+                       if (igt_timeout(end_time,
+                                       "%s timed out at offset %x with page-size %x\n",
+                                       __func__, offset, page_size))
+                               break;
+               }
+
+               i915_vma_close(vma);
+
+               i915_gem_object_unpin_pages(obj);
+               i915_gem_object_put(obj);
+       }
+
+       return 0;
+
+out_unpin:
+       i915_gem_object_unpin_pages(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static void close_object_list(struct list_head *objects,
+                             struct i915_hw_ppgtt *ppgtt)
+{
+       struct drm_i915_gem_object *obj, *on;
+
+       list_for_each_entry_safe(obj, on, objects, st_link) {
+               struct i915_vma *vma;
+
+               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               if (!IS_ERR(vma))
+                       i915_vma_close(vma);
+
+               list_del(&obj->st_link);
+               i915_gem_object_unpin_pages(obj);
+               i915_gem_object_put(obj);
+       }
+}
+
+static int igt_mock_ppgtt_huge_fill(void *arg)
+{
+       struct i915_hw_ppgtt *ppgtt = arg;
+       struct drm_i915_private *i915 = ppgtt->base.i915;
+       unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+       unsigned long page_num;
+       bool single = false;
+       LIST_HEAD(objects);
+       IGT_TIMEOUT(end_time);
+       int err;
+
+       for_each_prime_number_from(page_num, 1, max_pages) {
+               struct drm_i915_gem_object *obj;
+               u64 size = page_num << PAGE_SHIFT;
+               struct i915_vma *vma;
+               unsigned int expected_gtt = 0;
+               int i;
+
+               obj = fake_huge_pages_object(i915, size, single);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       break;
+               }
+
+               if (obj->base.size != size) {
+                       pr_err("obj->base.size=%zd, expected=%llu\n",
+                              obj->base.size, size);
+                       i915_gem_object_put(obj);
+                       err = -EINVAL;
+                       break;
+               }
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err) {
+                       i915_gem_object_put(obj);
+                       break;
+               }
+
+               list_add(&obj->st_link, &objects);
+
+               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       break;
+               }
+
+               err = i915_vma_pin(vma, 0, 0, PIN_USER);
+               if (err)
+                       break;
+
+               err = igt_check_page_sizes(vma);
+               if (err) {
+                       i915_vma_unpin(vma);
+                       break;
+               }
+
+               /*
+                * Figure out the expected gtt page size knowing that we go from
+                * largest to smallest page size sg chunks, and that we align to
+                * the largest page size.
+                */
+               for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+                       unsigned int page_size = page_sizes[i];
+
+                       if (HAS_PAGE_SIZES(i915, page_size) &&
+                           size >= page_size) {
+                               expected_gtt |= page_size;
+                               size &= page_size-1;
+                       }
+               }
+
+               GEM_BUG_ON(!expected_gtt);
+               GEM_BUG_ON(size);
+
+               if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
+                       expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
+
+               i915_vma_unpin(vma);
+
+               if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+                       if (!IS_ALIGNED(vma->node.start,
+                                       I915_GTT_PAGE_SIZE_2M)) {
+                               pr_err("node.start(%llx) not aligned to 2M\n",
+                                      vma->node.start);
+                               err = -EINVAL;
+                               break;
+                       }
+
+                       if (!IS_ALIGNED(vma->node.size,
+                                       I915_GTT_PAGE_SIZE_2M)) {
+                               pr_err("node.size(%llx) not aligned to 2M\n",
+                                      vma->node.size);
+                               err = -EINVAL;
+                               break;
+                       }
+               }
+
+               if (vma->page_sizes.gtt != expected_gtt) {
+                       pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
+                              vma->page_sizes.gtt, expected_gtt,
+                              obj->base.size, yesno(!!single));
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (igt_timeout(end_time,
+                               "%s timed out at size %zd\n",
+                               __func__, obj->base.size))
+                       break;
+
+               single = !single;
+       }
+
+       close_object_list(&objects, ppgtt);
+
+       if (err == -ENOMEM || err == -ENOSPC)
+               err = 0;
+
+       return err;
+}
+
+static int igt_mock_ppgtt_64K(void *arg)
+{
+       struct i915_hw_ppgtt *ppgtt = arg;
+       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_gem_object *obj;
+       const struct object_info {
+               unsigned int size;
+               unsigned int gtt;
+               unsigned int offset;
+       } objects[] = {
+               /* Cases with forced padding/alignment */
+               {
+                       .size = SZ_64K,
+                       .gtt = I915_GTT_PAGE_SIZE_64K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_64K + SZ_4K,
+                       .gtt = I915_GTT_PAGE_SIZE_4K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_64K - SZ_4K,
+                       .gtt = I915_GTT_PAGE_SIZE_4K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_2M,
+                       .gtt = I915_GTT_PAGE_SIZE_64K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_2M - SZ_4K,
+                       .gtt = I915_GTT_PAGE_SIZE_4K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_2M + SZ_4K,
+                       .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_2M + SZ_64K,
+                       .gtt = I915_GTT_PAGE_SIZE_64K,
+                       .offset = 0,
+               },
+               {
+                       .size = SZ_2M - SZ_64K,
+                       .gtt = I915_GTT_PAGE_SIZE_64K,
+                       .offset = 0,
+               },
+               /* Try without any forced padding/alignment */
+               {
+                       .size = SZ_64K,
+                       .offset = SZ_2M,
+                       .gtt = I915_GTT_PAGE_SIZE_4K,
+               },
+               {
+                       .size = SZ_128K,
+                       .offset = SZ_2M - SZ_64K,
+                       .gtt = I915_GTT_PAGE_SIZE_4K,
+               },
+       };
+       struct i915_vma *vma;
+       int i, single;
+       int err;
+
+       /*
+        * Sanity check some of the trickiness with 64K pages -- either we can
+        * safely mark the whole page-table(2M block) as 64K, or we have to
+        * always fallback to 4K.
+        */
+
+       if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(objects); ++i) {
+               unsigned int size = objects[i].size;
+               unsigned int expected_gtt = objects[i].gtt;
+               unsigned int offset = objects[i].offset;
+               unsigned int flags = PIN_USER;
+
+               for (single = 0; single <= 1; single++) {
+                       obj = fake_huge_pages_object(i915, size, !!single);
+                       if (IS_ERR(obj))
+                               return PTR_ERR(obj);
+
+                       err = i915_gem_object_pin_pages(obj);
+                       if (err)
+                               goto out_object_put;
+
+                       /*
+                        * Disable 2M pages -- We only want to use 64K/4K pages
+                        * for this test.
+                        */
+                       obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+
+                       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+                       if (IS_ERR(vma)) {
+                               err = PTR_ERR(vma);
+                               goto out_object_unpin;
+                       }
+
+                       if (offset)
+                               flags |= PIN_OFFSET_FIXED | offset;
+
+                       err = i915_vma_pin(vma, 0, 0, flags);
+                       if (err)
+                               goto out_vma_close;
+
+                       err = igt_check_page_sizes(vma);
+                       if (err)
+                               goto out_vma_unpin;
+
+                       if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+                               if (!IS_ALIGNED(vma->node.start,
+                                               I915_GTT_PAGE_SIZE_2M)) {
+                                       pr_err("node.start(%llx) not aligned to 2M\n",
+                                              vma->node.start);
+                                       err = -EINVAL;
+                                       goto out_vma_unpin;
+                               }
+
+                               if (!IS_ALIGNED(vma->node.size,
+                                               I915_GTT_PAGE_SIZE_2M)) {
+                                       pr_err("node.size(%llx) not aligned to 2M\n",
+                                              vma->node.size);
+                                       err = -EINVAL;
+                                       goto out_vma_unpin;
+                               }
+                       }
+
+                       if (vma->page_sizes.gtt != expected_gtt) {
+                               pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
+                                      vma->page_sizes.gtt, expected_gtt, i,
+                                      yesno(!!single));
+                               err = -EINVAL;
+                               goto out_vma_unpin;
+                       }
+
+                       i915_vma_unpin(vma);
+                       i915_vma_close(vma);
+
+                       i915_gem_object_unpin_pages(obj);
+                       i915_gem_object_put(obj);
+               }
+       }
+
+       return 0;
+
+out_vma_unpin:
+       i915_vma_unpin(vma);
+out_vma_close:
+       i915_vma_close(vma);
+out_object_unpin:
+       i915_gem_object_unpin_pages(obj);
+out_object_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static struct i915_vma *
+gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
+{
+       struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+       const int gen = INTEL_GEN(vma->vm->i915);
+       unsigned int count = vma->size >> PAGE_SHIFT;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *batch;
+       unsigned int size;
+       u32 *cmd;
+       int n;
+       int err;
+
+       size = (1 + 4 * count) * sizeof(u32);
+       size = round_up(size, PAGE_SIZE);
+       obj = i915_gem_object_create_internal(i915, size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto err;
+       }
+
+       offset += vma->node.start;
+
+       for (n = 0; n < count; n++) {
+               if (gen >= 8) {
+                       *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+                       *cmd++ = lower_32_bits(offset);
+                       *cmd++ = upper_32_bits(offset);
+                       *cmd++ = val;
+               } else if (gen >= 4) {
+                       *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+                               (gen < 6 ? 1 << 22 : 0);
+                       *cmd++ = 0;
+                       *cmd++ = offset;
+                       *cmd++ = val;
+               } else {
+                       *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+                       *cmd++ = offset;
+                       *cmd++ = val;
+               }
+
+               offset += PAGE_SIZE;
+       }
+
+       *cmd = MI_BATCH_BUFFER_END;
+
+       i915_gem_object_unpin_map(obj);
+
+       err = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (err)
+               goto err;
+
+       batch = i915_vma_instance(obj, vma->vm, NULL);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto err;
+       }
+
+       err = i915_vma_pin(batch, 0, 0, PIN_USER);
+       if (err)
+               goto err;
+
+       return batch;
+
+err:
+       i915_gem_object_put(obj);
+
+       return ERR_PTR(err);
+}
+
+static int gpu_write(struct i915_vma *vma,
+                    struct i915_gem_context *ctx,
+                    struct intel_engine_cs *engine,
+                    u32 dword,
+                    u32 value)
+{
+       struct drm_i915_gem_request *rq;
+       struct i915_vma *batch;
+       int flags = 0;
+       int err;
+
+       GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+
+       err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+       if (err)
+               return err;
+
+       rq = i915_gem_request_alloc(engine, ctx);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       batch = gpu_write_dw(vma, dword * sizeof(u32), value);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto err_request;
+       }
+
+       i915_vma_move_to_active(batch, rq, 0);
+       i915_gem_object_set_active_reference(batch->obj);
+       i915_vma_unpin(batch);
+       i915_vma_close(batch);
+
+       err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+       if (err)
+               goto err_request;
+
+       err = i915_switch_context(rq);
+       if (err)
+               goto err_request;
+
+       err = rq->engine->emit_bb_start(rq,
+                                       batch->node.start, batch->node.size,
+                                       flags);
+       if (err)
+               goto err_request;
+
+       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
+       reservation_object_lock(vma->resv, NULL);
+       reservation_object_add_excl_fence(vma->resv, &rq->fence);
+       reservation_object_unlock(vma->resv);
+
+err_request:
+       __i915_add_request(rq, err == 0);
+
+       return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+       unsigned int needs_flush;
+       unsigned long n;
+       int err;
+
+       err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+       if (err)
+               return err;
+
+       for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+               u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+
+               if (needs_flush & CLFLUSH_BEFORE)
+                       drm_clflush_virt_range(ptr, PAGE_SIZE);
+
+               if (ptr[dword] != val) {
+                       pr_err("n=%lu ptr[%u]=%u, val=%u\n",
+                              n, dword, ptr[dword], val);
+                       kunmap_atomic(ptr);
+                       err = -EINVAL;
+                       break;
+               }
+
+               kunmap_atomic(ptr);
+       }
+
+       i915_gem_obj_finish_shmem_access(obj);
+
+       return err;
+}
+
+static int igt_write_huge(struct i915_gem_context *ctx,
+                         struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct intel_engine_cs *engine;
+       struct i915_vma *vma;
+       unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+       unsigned int max_page_size;
+       unsigned int id;
+       u64 max;
+       u64 num;
+       u64 size;
+       int err = 0;
+
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+       size = obj->base.size;
+       if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+               size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+
+       max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+       max = div_u64((vm->total - size), max_page_size);
+
+       vma = i915_vma_instance(obj, vm, NULL);
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
+
+       for_each_engine(engine, i915, id) {
+               IGT_TIMEOUT(end_time);
+
+               if (!intel_engine_can_store_dword(engine)) {
+                       pr_info("store-dword-imm not supported on engine=%u\n",
+                               id);
+                       continue;
+               }
+
+               /*
+                * Try various offsets until we timeout -- we want to avoid
+                * issues hidden by effectively always using offset = 0.
+                */
+               for_each_prime_number_from(num, 0, max) {
+                       u64 offset = num * max_page_size;
+                       u32 dword;
+
+                       err = i915_vma_unbind(vma);
+                       if (err)
+                               goto out_vma_close;
+
+                       err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+                       if (err) {
+                               /*
+                                * The ggtt may have some pages reserved so
+                                * refrain from erroring out.
+                                */
+                               if (err == -ENOSPC && i915_is_ggtt(vm)) {
+                                       err = 0;
+                                       continue;
+                               }
+
+                               goto out_vma_close;
+                       }
+
+                       err = igt_check_page_sizes(vma);
+                       if (err)
+                               goto out_vma_unpin;
+
+                       dword = offset_in_page(num) / 4;
+
+                       err = gpu_write(vma, ctx, engine, dword, num + 1);
+                       if (err) {
+                               pr_err("gpu-write failed at offset=%llx", offset);
+                               goto out_vma_unpin;
+                       }
+
+                       err = cpu_check(obj, dword, num + 1);
+                       if (err) {
+                               pr_err("cpu-check failed at offset=%llx", offset);
+                               goto out_vma_unpin;
+                       }
+
+                       i915_vma_unpin(vma);
+
+                       if (num > 0 &&
+                           igt_timeout(end_time,
+                                       "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+                                       __func__, id, offset, max_page_size))
+                               break;
+               }
+       }
+
+out_vma_unpin:
+       if (i915_vma_is_pinned(vma))
+               i915_vma_unpin(vma);
+out_vma_close:
+       i915_vma_close(vma);
+
+       return err;
+}
+
+static int igt_ppgtt_exhaust_huge(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       unsigned long supported = INTEL_INFO(i915)->page_sizes;
+       static unsigned int pages[ARRAY_SIZE(page_sizes)];
+       struct drm_i915_gem_object *obj;
+       unsigned int size_mask;
+       unsigned int page_mask;
+       int n, i;
+       int err;
+
+       /*
+        * Sanity check creating objects with a varying mix of page sizes --
+        * ensuring that our writes lands in the right place.
+        */
+
+       n = 0;
+       for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
+               pages[n++] = BIT(i);
+
+       for (size_mask = 2; size_mask < BIT(n); size_mask++) {
+               unsigned int size = 0;
+
+               for (i = 0; i < n; i++) {
+                       if (size_mask & BIT(i))
+                               size |= pages[i];
+               }
+
+               /*
+                * For our page mask we want to enumerate all the page-size
+                * combinations which will fit into our chosen object size.
+                */
+               for (page_mask = 2; page_mask <= size_mask; page_mask++) {
+                       unsigned int page_sizes = 0;
+
+                       for (i = 0; i < n; i++) {
+                               if (page_mask & BIT(i))
+                                       page_sizes |= pages[i];
+                       }
+
+                       /*
+                        * Ensure that we can actually fill the given object
+                        * with our chosen page mask.
+                        */
+                       if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
+                               continue;
+
+                       obj = huge_pages_object(i915, size, page_sizes);
+                       if (IS_ERR(obj)) {
+                               err = PTR_ERR(obj);
+                               goto out_device;
+                       }
+
+                       err = i915_gem_object_pin_pages(obj);
+                       if (err) {
+                               i915_gem_object_put(obj);
+
+                               if (err == -ENOMEM) {
+                                       pr_info("unable to get pages, size=%u, pages=%u\n",
+                                               size, page_sizes);
+                                       err = 0;
+                                       break;
+                               }
+
+                               pr_err("pin_pages failed, size=%u, pages=%u\n",
+                                      size_mask, page_mask);
+
+                               goto out_device;
+                       }
+
+                       /* Force the page-size for the gtt insertion */
+                       obj->mm.page_sizes.sg = page_sizes;
+
+                       err = igt_write_huge(ctx, obj);
+                       if (err) {
+                               pr_err("exhaust write-huge failed with size=%u\n",
+                                      size);
+                               goto out_unpin;
+                       }
+
+                       i915_gem_object_unpin_pages(obj);
+                       i915_gem_object_put(obj);
+               }
+       }
+
+       goto out_device;
+
+out_unpin:
+       i915_gem_object_unpin_pages(obj);
+       i915_gem_object_put(obj);
+out_device:
+       mkwrite_device_info(i915)->page_sizes = supported;
+
+       return err;
+}
+
+static int igt_ppgtt_internal_huge(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       static const unsigned int sizes[] = {
+               SZ_64K,
+               SZ_128K,
+               SZ_256K,
+               SZ_512K,
+               SZ_1M,
+               SZ_2M,
+       };
+       int i;
+       int err;
+
+       /*
+        * Sanity check that the HW uses huge pages correctly through internal
+        * -- ensure that our writes land in the right place.
+        */
+
+       for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+               unsigned int size = sizes[i];
+
+               obj = i915_gem_object_create_internal(i915, size);
+               if (IS_ERR(obj))
+                       return PTR_ERR(obj);
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto out_put;
+
+               if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+                       pr_info("internal unable to allocate huge-page(s) with size=%u\n",
+                               size);
+                       goto out_unpin;
+               }
+
+               err = igt_write_huge(ctx, obj);
+               if (err) {
+                       pr_err("internal write-huge failed with size=%u\n",
+                              size);
+                       goto out_unpin;
+               }
+
+               i915_gem_object_unpin_pages(obj);
+               i915_gem_object_put(obj);
+       }
+
+       return 0;
+
+out_unpin:
+       i915_gem_object_unpin_pages(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+       return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static int igt_ppgtt_gemfs_huge(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       static const unsigned int sizes[] = {
+               SZ_2M,
+               SZ_4M,
+               SZ_8M,
+               SZ_16M,
+               SZ_32M,
+       };
+       int i;
+       int err;
+
+       /*
+        * Sanity check that the HW uses huge pages correctly through gemfs --
+        * ensure that our writes land in the right place.
+        */
+
+       if (!igt_can_allocate_thp(i915)) {
+               pr_info("missing THP support, skipping\n");
+               return 0;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+               unsigned int size = sizes[i];
+
+               obj = i915_gem_object_create(i915, size);
+               if (IS_ERR(obj))
+                       return PTR_ERR(obj);
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto out_put;
+
+               if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+                       pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
+                               size);
+                       goto out_unpin;
+               }
+
+               err = igt_write_huge(ctx, obj);
+               if (err) {
+                       pr_err("gemfs write-huge failed with size=%u\n",
+                              size);
+                       goto out_unpin;
+               }
+
+               i915_gem_object_unpin_pages(obj);
+               i915_gem_object_put(obj);
+       }
+
+       return 0;
+
+out_unpin:
+       i915_gem_object_unpin_pages(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static int igt_ppgtt_pin_update(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *dev_priv = ctx->i915;
+       unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
+       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+       int first, last;
+       int err;
+
+       /*
+        * Make sure there's no funny business when doing a PIN_UPDATE -- in the
+        * past we had a subtle issue with being able to incorrectly do multiple
+        * alloc va ranges on the same object when doing a PIN_UPDATE, which
+        * resulted in some pretty nasty bugs, though only when using
+        * huge-gtt-pages.
+        */
+
+       if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+               pr_info("48b PPGTT not supported, skipping\n");
+               return 0;
+       }
+
+       first = ilog2(I915_GTT_PAGE_SIZE_64K);
+       last = ilog2(I915_GTT_PAGE_SIZE_2M);
+
+       for_each_set_bit_from(first, &supported, last + 1) {
+               unsigned int page_size = BIT(first);
+
+               obj = i915_gem_object_create_internal(dev_priv, page_size);
+               if (IS_ERR(obj))
+                       return PTR_ERR(obj);
+
+               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       goto out_put;
+               }
+
+               err = i915_vma_pin(vma, SZ_2M, 0, flags);
+               if (err)
+                       goto out_close;
+
+               if (vma->page_sizes.sg < page_size) {
+                       pr_info("Unable to allocate page-size %x, finishing test early\n",
+                               page_size);
+                       goto out_unpin;
+               }
+
+               err = igt_check_page_sizes(vma);
+               if (err)
+                       goto out_unpin;
+
+               if (vma->page_sizes.gtt != page_size) {
+                       dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
+
+                       /*
+                        * The only valid reason for this to ever fail would be
+                        * if the dma-mapper screwed us over when we did the
+                        * dma_map_sg(), since it has the final say over the dma
+                        * address.
+                        */
+                       if (IS_ALIGNED(addr, page_size)) {
+                               pr_err("page_sizes.gtt=%u, expected=%u\n",
+                                      vma->page_sizes.gtt, page_size);
+                               err = -EINVAL;
+                       } else {
+                               pr_info("dma address misaligned, finishing test early\n");
+                       }
+
+                       goto out_unpin;
+               }
+
+               err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+               if (err)
+                       goto out_unpin;
+
+               i915_vma_unpin(vma);
+               i915_vma_close(vma);
+
+               i915_gem_object_put(obj);
+       }
+
+       obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto out_put;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, flags);
+       if (err)
+               goto out_close;
+
+       /*
+        * Make sure we don't end up with something like where the pde is still
+        * pointing to the 2M page, and the pt we just filled-in is dangling --
+        * we can check this by writing to the first page where it would then
+        * land in the now stale 2M page.
+        */
+
+       err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+       if (err)
+               goto out_unpin;
+
+       err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+       i915_vma_unpin(vma);
+out_close:
+       i915_vma_close(vma);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static int igt_tmpfs_fallback(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct vfsmount *gemfs = i915->mm.gemfs;
+       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       u32 *vaddr;
+       int err = 0;
+
+       /*
+        * Make sure that we don't burst into a ball of flames upon falling back
+        * to tmpfs, which we rely on if on the off-chance we encouter a failure
+        * when setting up gemfs.
+        */
+
+       i915->mm.gemfs = NULL;
+
+       obj = i915_gem_object_create(i915, PAGE_SIZE);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto out_restore;
+       }
+
+       vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(vaddr)) {
+               err = PTR_ERR(vaddr);
+               goto out_put;
+       }
+       *vaddr = 0xdeadbeaf;
+
+       i915_gem_object_unpin_map(obj);
+
+       vma = i915_vma_instance(obj, vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto out_put;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err)
+               goto out_close;
+
+       err = igt_check_page_sizes(vma);
+
+       i915_vma_unpin(vma);
+out_close:
+       i915_vma_close(vma);
+out_put:
+       i915_gem_object_put(obj);
+out_restore:
+       i915->mm.gemfs = gemfs;
+
+       return err;
+}
+
+static int igt_shrink_thp(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       unsigned int flags = PIN_USER;
+       int err;
+
+       /*
+        * Sanity check shrinking huge-paged object -- make sure nothing blows
+        * up.
+        */
+
+       if (!igt_can_allocate_thp(i915)) {
+               pr_info("missing THP support, skipping\n");
+               return 0;
+       }
+
+       obj = i915_gem_object_create(i915, SZ_2M);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_vma_instance(obj, vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto out_put;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, flags);
+       if (err)
+               goto out_close;
+
+       if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+               pr_info("failed to allocate THP, finishing test early\n");
+               goto out_unpin;
+       }
+
+       err = igt_check_page_sizes(vma);
+       if (err)
+               goto out_unpin;
+
+       err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+       if (err)
+               goto out_unpin;
+
+       i915_vma_unpin(vma);
+
+       /*
+        * Now that the pages are *unpinned* shrink-all should invoke
+        * shmem to truncate our pages.
+        */
+       i915_gem_shrink_all(i915);
+       if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+               pr_err("shrink-all didn't truncate the pages\n");
+               err = -EINVAL;
+               goto out_close;
+       }
+
+       if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
+               pr_err("residual page-size bits left\n");
+               err = -EINVAL;
+               goto out_close;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, flags);
+       if (err)
+               goto out_close;
+
+       err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+       i915_vma_unpin(vma);
+out_close:
+       i915_vma_close(vma);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+int i915_gem_huge_page_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_mock_exhaust_device_supported_pages),
+               SUBTEST(igt_mock_ppgtt_misaligned_dma),
+               SUBTEST(igt_mock_ppgtt_huge_fill),
+               SUBTEST(igt_mock_ppgtt_64K),
+       };
+       int saved_ppgtt = i915_modparams.enable_ppgtt;
+       struct drm_i915_private *dev_priv;
+       struct pci_dev *pdev;
+       struct i915_hw_ppgtt *ppgtt;
+       int err;
+
+       dev_priv = mock_gem_device();
+       if (!dev_priv)
+               return -ENOMEM;
+
+       /* Pretend to be a device which supports the 48b PPGTT */
+       i915_modparams.enable_ppgtt = 3;
+
+       pdev = dev_priv->drm.pdev;
+       dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+       if (IS_ERR(ppgtt)) {
+               err = PTR_ERR(ppgtt);
+               goto out_unlock;
+       }
+
+       if (!i915_vm_is_48bit(&ppgtt->base)) {
+               pr_err("failed to create 48b PPGTT\n");
+               err = -EINVAL;
+               goto out_close;
+       }
+
+       /* If we were ever hit this then it's time to mock the 64K scratch */
+       if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+               pr_err("PPGTT missing 64K scratch page\n");
+               err = -EINVAL;
+               goto out_close;
+       }
+
+       err = i915_subtests(tests, ppgtt);
+
+out_close:
+       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_put(ppgtt);
+
+out_unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       i915_modparams.enable_ppgtt = saved_ppgtt;
+
+       drm_dev_unref(&dev_priv->drm);
+
+       return err;
+}
+
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_shrink_thp),
+               SUBTEST(igt_ppgtt_pin_update),
+               SUBTEST(igt_tmpfs_fallback),
+               SUBTEST(igt_ppgtt_exhaust_huge),
+               SUBTEST(igt_ppgtt_gemfs_huge),
+               SUBTEST(igt_ppgtt_internal_huge),
+       };
+       struct drm_file *file;
+       struct i915_gem_context *ctx;
+       int err;
+
+       if (!USES_PPGTT(dev_priv)) {
+               pr_info("PPGTT not supported, skipping live-selftests\n");
+               return 0;
+       }
+
+       file = mock_file(dev_priv);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+
+       ctx = live_context(dev_priv, file);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto out_unlock;
+       }
+
+       err = i915_subtests(tests, ctx);
+
+out_unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       mock_file_free(dev_priv, file);
+
+       return err;
+}
index 6b132caffa184d8689a495613857bba1475022ef..9da0c9f999167a62821a6c05ff7cdf831204426e 100644 (file)
@@ -39,25 +39,26 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
        kfree(pages);
 }
 
-static struct sg_table *
-fake_get_pages(struct drm_i915_gem_object *obj)
+static int fake_get_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
 #define PFN_BIAS 0x1000
        struct sg_table *pages;
        struct scatterlist *sg;
+       unsigned int sg_page_sizes;
        typeof(obj->base.size) rem;
 
        pages = kmalloc(sizeof(*pages), GFP);
        if (!pages)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        rem = round_up(obj->base.size, BIT(31)) >> 31;
        if (sg_alloc_table(pages, rem, GFP)) {
                kfree(pages);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
+       sg_page_sizes = 0;
        rem = obj->base.size;
        for (sg = pages->sgl; sg; sg = sg_next(sg)) {
                unsigned long len = min_t(typeof(rem), rem, BIT(31));
@@ -66,13 +67,17 @@ fake_get_pages(struct drm_i915_gem_object *obj)
                sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
                sg_dma_address(sg) = page_to_phys(sg_page(sg));
                sg_dma_len(sg) = len;
+               sg_page_sizes |= len;
 
                rem -= len;
        }
        GEM_BUG_ON(rem);
 
        obj->mm.madv = I915_MADV_DONTNEED;
-       return pages;
+
+       __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+       return 0;
 #undef GFP
 }
 
index 8f011c447e4103464253d1cd1490065703f01bfb..1b8774a42e4872e307367600f69bf81a0d3cb682 100644 (file)
@@ -251,14 +251,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
                        return PTR_ERR(io);
                }
 
-               err = i915_vma_get_fence(vma);
-               if (err) {
-                       pr_err("Failed to get fence for partial view: offset=%lu\n",
-                              page);
-                       i915_vma_unpin_iomap(vma);
-                       return err;
-               }
-
                iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
                i915_vma_unpin_iomap(vma);
 
index 6664cb2eb0b8dd3cd12152b361004c19416f426a..a999161e8db1ea576ee00b3447aa8e424a405b1c 100644 (file)
@@ -215,7 +215,9 @@ static int igt_request_rewind(void *arg)
        }
        i915_gem_request_get(vip);
        i915_add_request(vip);
+       rcu_read_lock();
        request->engine->submit_request(request);
+       rcu_read_unlock();
 
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -418,7 +420,10 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
                err = PTR_ERR(cmd);
                goto err;
        }
+
        *cmd = MI_BATCH_BUFFER_END;
+       i915_gem_chipset_flush(i915);
+
        i915_gem_object_unpin_map(obj);
 
        err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -605,8 +610,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
                *cmd++ = lower_32_bits(vma->node.start);
        }
        *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+       i915_gem_chipset_flush(i915);
 
-       wmb();
        i915_gem_object_unpin_map(obj);
 
        return vma;
@@ -625,7 +630,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
                return PTR_ERR(cmd);
 
        *cmd = MI_BATCH_BUFFER_END;
-       wmb();
+       i915_gem_chipset_flush(batch->vm->i915);
 
        i915_gem_object_unpin_map(batch->obj);
 
@@ -858,7 +863,8 @@ out_request:
                                              I915_MAP_WC);
                if (!IS_ERR(cmd)) {
                        *cmd = MI_BATCH_BUFFER_END;
-                       wmb();
+                       i915_gem_chipset_flush(i915);
+
                        i915_gem_object_unpin_map(request[id]->batch->obj);
                }
 
index 18b174d855ca7079956951368a8284804275d44b..64acd7eccc5c67213f436a5d0f2c53caa9a64002 100644 (file)
@@ -15,5 +15,6 @@ selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
index fc74687501ba923c97f5afe14a13deba7ee6c893..9961b44f76ed4e3166761137bda500fa076f79cf 100644 (file)
@@ -21,3 +21,4 @@ selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
 selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
 selftest(gtt, i915_gem_gtt_mock_selftests)
+selftest(hugepages, i915_gem_huge_page_mock_selftests)
index 377c1de766ce3c53d8eba827f0f34f312fd3240d..71ce06680d66ad0db52c194d163315faaaa7ef7d 100644 (file)
@@ -165,6 +165,7 @@ static int emit_recurse_batch(struct hang *h,
                *batch++ = lower_32_bits(vma->node.start);
        }
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+       i915_gem_chipset_flush(h->i915);
 
        flags = 0;
        if (INTEL_GEN(vm->i915) <= 5)
@@ -231,7 +232,7 @@ static u32 hws_seqno(const struct hang *h,
 static void hang_fini(struct hang *h)
 {
        *h->batch = MI_BATCH_BUFFER_END;
-       wmb();
+       i915_gem_chipset_flush(h->i915);
 
        i915_gem_object_unpin_map(h->obj);
        i915_gem_object_put(h->obj);
@@ -275,6 +276,8 @@ static int igt_hang_sanitycheck(void *arg)
                i915_gem_request_get(rq);
 
                *h.batch = MI_BATCH_BUFFER_END;
+               i915_gem_chipset_flush(i915);
+
                __i915_add_request(rq, true);
 
                timeout = i915_wait_request(rq,
@@ -621,8 +624,11 @@ static int igt_wait_reset(void *arg)
        __i915_add_request(rq, true);
 
        if (!wait_for_hang(&h, rq)) {
+               struct drm_printer p = drm_info_printer(i915->drm.dev);
+
                pr_err("Failed to start request %x, at %x\n",
                       rq->fence.seqno, hws_seqno(&h, rq));
+               intel_engine_dump(rq->engine, &p);
 
                i915_reset(i915, 0);
                i915_gem_set_wedged(i915);
@@ -713,8 +719,12 @@ static int igt_reset_queue(void *arg)
                        __i915_add_request(rq, true);
 
                        if (!wait_for_hang(&h, prev)) {
+                               struct drm_printer p = drm_info_printer(i915->drm.dev);
+
                                pr_err("Failed to start request %x, at %x\n",
                                       prev->fence.seqno, hws_seqno(&h, prev));
+                               intel_engine_dump(rq->engine, &p);
+
                                i915_gem_request_put(rq);
                                i915_gem_request_put(prev);
 
@@ -765,7 +775,7 @@ static int igt_reset_queue(void *arg)
                pr_info("%s: Completed %d resets\n", engine->name, count);
 
                *h.batch = MI_BATCH_BUFFER_END;
-               wmb();
+               i915_gem_chipset_flush(i915);
 
                i915_gem_request_put(prev);
        }
@@ -815,8 +825,11 @@ static int igt_handle_error(void *arg)
        __i915_add_request(rq, true);
 
        if (!wait_for_hang(&h, rq)) {
+               struct drm_printer p = drm_info_printer(i915->drm.dev);
+
                pr_err("Failed to start request %x, at %x\n",
                       rq->fence.seqno, hws_seqno(&h, rq));
+               intel_engine_dump(rq->engine, &p);
 
                i915_reset(i915, 0);
                i915_gem_set_wedged(i915);
@@ -865,9 +878,16 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_reset_queue),
                SUBTEST(igt_handle_error),
        };
+       int err;
 
        if (!intel_has_gpu_reset(i915))
                return 0;
 
-       return i915_subtests(tests, i915);
+       intel_runtime_pm_get(i915);
+
+       err = i915_subtests(tests, i915);
+
+       intel_runtime_pm_put(i915);
+
+       return err;
 }
index 2388424a14dabb68adedba50e97d6f849d4d48a0..04eb9362f4f87c881184f4f53accd38de8527123 100644 (file)
@@ -83,6 +83,8 @@ static void mock_device_release(struct drm_device *dev)
        kmem_cache_destroy(i915->vmas);
        kmem_cache_destroy(i915->objects);
 
+       i915_gemfs_fini(i915);
+
        drm_dev_fini(&i915->drm);
        put_device(&i915->drm.pdev->dev);
 }
@@ -146,7 +148,7 @@ struct drm_i915_private *mock_gem_device(void)
        dev_set_name(&pdev->dev, "mock");
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 
-#if IS_ENABLED(CONFIG_IOMMU_API)
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
        /* hack to disable iommu for the fake device; force identity mapping */
        pdev->dev.archdata.iommu = (void *)-1;
 #endif
@@ -172,6 +174,11 @@ struct drm_i915_private *mock_gem_device(void)
 
        mkwrite_device_info(i915)->gen = -1;
 
+       mkwrite_device_info(i915)->page_sizes =
+               I915_GTT_PAGE_SIZE_4K |
+               I915_GTT_PAGE_SIZE_64K |
+               I915_GTT_PAGE_SIZE_2M;
+
        spin_lock_init(&i915->mm.object_stat_lock);
        mock_uncore_init(i915);
 
@@ -239,8 +246,16 @@ struct drm_i915_private *mock_gem_device(void)
        if (!i915->kernel_context)
                goto err_engine;
 
+       i915->preempt_context = mock_context(i915, NULL);
+       if (!i915->preempt_context)
+               goto err_kernel_context;
+
+       WARN_ON(i915_gemfs_init(i915));
+
        return i915;
 
+err_kernel_context:
+       i915_gem_context_put(i915->kernel_context);
 err_engine:
        for_each_engine(engine, i915, id)
                mock_engine_free(engine);
index f2118cf535a053926254a523474be87dc5789142..336e1afb250f68147da22be96e8d9b87a3e76595 100644 (file)
@@ -43,7 +43,6 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
                           u32 flags)
 {
        GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
-       vma->pages = vma->obj->mm.pages;
        vma->flags |= I915_VMA_LOCAL_BIND;
        return 0;
 }
@@ -84,6 +83,8 @@ mock_ppgtt(struct drm_i915_private *i915,
        ppgtt->base.insert_entries = mock_insert_entries;
        ppgtt->base.bind_vma = mock_bind_ppgtt;
        ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+       ppgtt->base.set_pages = ppgtt_set_pages;
+       ppgtt->base.clear_pages = clear_pages;
        ppgtt->base.cleanup = mock_cleanup;
 
        return ppgtt;
@@ -93,12 +94,6 @@ static int mock_bind_ggtt(struct i915_vma *vma,
                          enum i915_cache_level cache_level,
                          u32 flags)
 {
-       int err;
-
-       err = i915_get_ggtt_vma_pages(vma);
-       if (err)
-               return err;
-
        vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
        return 0;
 }
@@ -124,6 +119,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
        ggtt->base.insert_entries = mock_insert_entries;
        ggtt->base.bind_vma = mock_bind_ggtt;
        ggtt->base.unbind_vma = mock_unbind_ggtt;
+       ggtt->base.set_pages = ggtt_set_pages;
+       ggtt->base.clear_pages = clear_pages;
        ggtt->base.cleanup = mock_cleanup;
 
        i915_address_space_init(&ggtt->base, i915, "global");
index 1cc5d2931753a159afd9b65dd31fc0594ff93692..cd6d2a16071faf60b8999992dd375aadc1138c31 100644 (file)
@@ -189,6 +189,20 @@ static unsigned int random(unsigned long n,
        return 1 + (prandom_u32_state(rnd) % 1024);
 }
 
+static unsigned int random_page_size_pages(unsigned long n,
+                                          unsigned long count,
+                                          struct rnd_state *rnd)
+{
+       /* 4K, 64K, 2M */
+       static unsigned int page_count[] = {
+               BIT(12) >> PAGE_SHIFT,
+               BIT(16) >> PAGE_SHIFT,
+               BIT(21) >> PAGE_SHIFT,
+       };
+
+       return page_count[(prandom_u32_state(rnd) % 3)];
+}
+
 static inline bool page_contiguous(struct page *first,
                                   struct page *last,
                                   unsigned long npages)
@@ -252,6 +266,7 @@ static const npages_fn_t npages_funcs[] = {
        grow,
        shrink,
        random,
+       random_page_size_pages,
        NULL,
 };
 
index e11fd76e06f43d2365eb07a17066927675ff302e..4d688c8d7853e4bba377cc9cebffbb23a8103d96 100644 (file)
@@ -95,7 +95,7 @@ struct ttm_pool_opts {
        unsigned        small;
 };
 
-#define NUM_POOLS 4
+#define NUM_POOLS 6
 
 /**
  * struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +122,8 @@ struct ttm_pool_manager {
                        struct ttm_page_pool    uc_pool;
                        struct ttm_page_pool    wc_pool_dma32;
                        struct ttm_page_pool    uc_pool_dma32;
+                       struct ttm_page_pool    wc_pool_huge;
+                       struct ttm_page_pool    uc_pool_huge;
                } ;
        };
 };
@@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
 
 /**
  * Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags,
-               enum ttm_caching_state cstate)
+static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
+                                         enum ttm_caching_state cstate)
 {
        int pool_index;
 
@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
        else
                pool_index = 0x1;
 
-       if (flags & TTM_PAGE_FLAG_DMA32)
+       if (flags & TTM_PAGE_FLAG_DMA32) {
+               if (huge)
+                       return NULL;
                pool_index |= 0x2;
 
+       } else if (huge) {
+               pool_index |= 0x4;
+       }
+
        return &_manager->pools[pool_index];
 }
 
@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
  * pages returned in pages array.
  */
 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
-               int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+                              int ttm_flags, enum ttm_caching_state cstate,
+                              unsigned count, unsigned order)
 {
        struct page **caching_array;
        struct page *p;
        int r = 0;
-       unsigned i, cpages;
+       unsigned i, j, cpages;
+       unsigned npages = 1 << order;
        unsigned max_cpages = min(count,
                        (unsigned)(PAGE_SIZE/sizeof(struct page *)));
 
@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
        }
 
        for (i = 0, cpages = 0; i < count; ++i) {
-               p = alloc_page(gfp_flags);
+               p = alloc_pages(gfp_flags, order);
 
                if (!p) {
                        pr_err("Unable to get page %u\n", i);
@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
                        goto out;
                }
 
+               list_add(&p->lru, pages);
+
 #ifdef CONFIG_HIGHMEM
                /* gfp flags of highmem page should never be dma32 so we
                 * we should be fine in such case
                 */
-               if (!PageHighMem(p))
+               if (PageHighMem(p))
+                       continue;
+
 #endif
-               {
-                       caching_array[cpages++] = p;
+               for (j = 0; j < npages; ++j) {
+                       caching_array[cpages++] = p++;
                        if (cpages == max_cpages) {
 
                                r = ttm_set_pages_caching(caching_array,
@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
                                cpages = 0;
                        }
                }
-
-               list_add(&p->lru, pages);
        }
 
        if (cpages) {
@@ -573,9 +585,9 @@ out:
  * Fill the given pool if there aren't enough pages and the requested number of
  * pages is small.
  */
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
-               int ttm_flags, enum ttm_caching_state cstate, unsigned count,
-               unsigned long *irq_flags)
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
+                                     enum ttm_caching_state cstate,
+                                     unsigned count, unsigned long *irq_flags)
 {
        struct page *p;
        int r;
@@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
 
                INIT_LIST_HEAD(&new_pages);
                r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
-                               cstate, alloc_size);
+                                       cstate, alloc_size, 0);
                spin_lock_irqsave(&pool->lock, *irq_flags);
 
                if (!r) {
@@ -627,22 +639,25 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
 }
 
 /**
- * Cut 'count' number of pages from the pool and put them on the return list.
+ * Allocate pages from the pool and put them on the return list.
  *
- * @return count of pages still required to fulfill the request.
+ * @return zero for success or negative error code.
  */
-static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
-                                       struct list_head *pages,
-                                       int ttm_flags,
-                                       enum ttm_caching_state cstate,
-                                       unsigned count)
+static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+                                  struct list_head *pages,
+                                  int ttm_flags,
+                                  enum ttm_caching_state cstate,
+                                  unsigned count, unsigned order)
 {
        unsigned long irq_flags;
        struct list_head *p;
        unsigned i;
+       int r = 0;
 
        spin_lock_irqsave(&pool->lock, irq_flags);
-       ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+       if (!order)
+               ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
+                                         &irq_flags);
 
        if (count >= pool->npages) {
                /* take all pages from the pool */
@@ -672,32 +687,126 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
        count = 0;
 out:
        spin_unlock_irqrestore(&pool->lock, irq_flags);
-       return count;
+
+       /* clear the pages coming from the pool if requested */
+       if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+               struct page *page;
+
+               list_for_each_entry(page, pages, lru) {
+                       if (PageHighMem(page))
+                               clear_highpage(page);
+                       else
+                               clear_page(page_address(page));
+               }
+       }
+
+       /* If pool didn't have enough pages allocate new one. */
+       if (count) {
+               gfp_t gfp_flags = pool->gfp_flags;
+
+               /* set zero flag for page allocation if required */
+               if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+                       gfp_flags |= __GFP_ZERO;
+
+               /* ttm_alloc_new_pages doesn't reference pool so we can run
+                * multiple requests in parallel.
+                **/
+               r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
+                                       count, order);
+       }
+
+       return r;
 }
 
 /* Put all pages in pages list to correct pool to wait for reuse */
 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                          enum ttm_caching_state cstate)
 {
+       struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
        unsigned long irq_flags;
-       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        unsigned i;
 
        if (pool == NULL) {
                /* No pool for this memory type so free the pages */
-               for (i = 0; i < npages; i++) {
-                       if (pages[i]) {
-                               if (page_count(pages[i]) != 1)
-                                       pr_err("Erroneous page count. Leaking pages.\n");
-                               __free_page(pages[i]);
-                               pages[i] = NULL;
+               i = 0;
+               while (i < npages) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                       struct page *p = pages[i];
+#endif
+                       unsigned order = 0, j;
+
+                       if (!pages[i]) {
+                               ++i;
+                               continue;
+                       }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                       for (j = 0; j < HPAGE_PMD_NR; ++j)
+                               if (p++ != pages[i + j])
+                                   break;
+
+                       if (j == HPAGE_PMD_NR)
+                               order = HPAGE_PMD_ORDER;
+#endif
+
+                       if (page_count(pages[i]) != 1)
+                               pr_err("Erroneous page count. Leaking pages.\n");
+                       __free_pages(pages[i], order);
+
+                       j = 1 << order;
+                       while (j) {
+                               pages[i++] = NULL;
+                               --j;
                        }
                }
                return;
        }
 
+       i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       if (huge) {
+               unsigned max_size, n2free;
+
+               spin_lock_irqsave(&huge->lock, irq_flags);
+               while (i < npages) {
+                       struct page *p = pages[i];
+                       unsigned j;
+
+                       if (!p)
+                               break;
+
+                       for (j = 0; j < HPAGE_PMD_NR; ++j)
+                               if (p++ != pages[i + j])
+                                   break;
+
+                       if (j != HPAGE_PMD_NR)
+                               break;
+
+                       list_add_tail(&pages[i]->lru, &huge->list);
+
+                       for (j = 0; j < HPAGE_PMD_NR; ++j)
+                               pages[i++] = NULL;
+                       huge->npages++;
+               }
+
+               /* Check that we don't go over the pool limit */
+               max_size = _manager->options.max_size;
+               max_size /= HPAGE_PMD_NR;
+               if (huge->npages > max_size)
+                       n2free = huge->npages - max_size;
+               else
+                       n2free = 0;
+               spin_unlock_irqrestore(&huge->lock, irq_flags);
+               if (n2free)
+                       ttm_page_pool_free(huge, n2free, false);
+       }
+#endif
+
        spin_lock_irqsave(&pool->lock, irq_flags);
-       for (i = 0; i < npages; i++) {
+       while (i < npages) {
                if (pages[i]) {
                        if (page_count(pages[i]) != 1)
                                pr_err("Erroneous page count. Leaking pages.\n");
@@ -705,6 +814,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        pages[i] = NULL;
                        pool->npages++;
                }
+               ++i;
        }
        /* Check that we don't go over the pool limit */
        npages = 0;
@@ -727,25 +837,52 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                         enum ttm_caching_state cstate)
 {
-       struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+       struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
        struct list_head plist;
        struct page *p = NULL;
-       gfp_t gfp_flags = GFP_USER;
        unsigned count;
        int r;
 
-       /* set zero flag for page allocation if required */
-       if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
-               gfp_flags |= __GFP_ZERO;
-
        /* No pool for cached pages */
        if (pool == NULL) {
+               gfp_t gfp_flags = GFP_USER;
+               unsigned i;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               unsigned j;
+#endif
+
+               /* set zero flag for page allocation if required */
+               if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+                       gfp_flags |= __GFP_ZERO;
+
                if (flags & TTM_PAGE_FLAG_DMA32)
                        gfp_flags |= GFP_DMA32;
                else
                        gfp_flags |= GFP_HIGHUSER;
 
-               for (r = 0; r < npages; ++r) {
+               i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               while (npages >= HPAGE_PMD_NR) {
+                       gfp_t huge_flags = gfp_flags;
+
+                       huge_flags |= GFP_TRANSHUGE;
+                       huge_flags &= ~__GFP_MOVABLE;
+                       huge_flags &= ~__GFP_COMP;
+                       p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+                       if (!p)
+                               break;
+
+                       for (j = 0; j < HPAGE_PMD_NR; ++j)
+                               pages[i++] = p++;
+
+                       npages -= HPAGE_PMD_NR;
+               }
+#endif
+
+               while (npages) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
 
@@ -753,49 +890,44 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                                return -ENOMEM;
                        }
 
-                       pages[r] = p;
+                       pages[i++] = p;
+                       --npages;
                }
                return 0;
        }
 
-       /* combine zero flag to pool flags */
-       gfp_flags |= pool->gfp_flags;
-
-       /* First we take pages from the pool */
-       INIT_LIST_HEAD(&plist);
-       npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
        count = 0;
-       list_for_each_entry(p, &plist, lru) {
-               pages[count++] = p;
-       }
 
-       /* clear the pages coming from the pool if requested */
-       if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       if (huge && npages >= HPAGE_PMD_NR) {
+               INIT_LIST_HEAD(&plist);
+               ttm_page_pool_get_pages(huge, &plist, flags, cstate,
+                                       npages / HPAGE_PMD_NR,
+                                       HPAGE_PMD_ORDER);
+
                list_for_each_entry(p, &plist, lru) {
-                       if (PageHighMem(p))
-                               clear_highpage(p);
-                       else
-                               clear_page(page_address(p));
+                       unsigned j;
+
+                       for (j = 0; j < HPAGE_PMD_NR; ++j)
+                               pages[count++] = &p[j];
                }
        }
+#endif
 
-       /* If pool didn't have enough pages allocate new one. */
-       if (npages > 0) {
-               /* ttm_alloc_new_pages doesn't reference pool so we can run
-                * multiple requests in parallel.
-                **/
-               INIT_LIST_HEAD(&plist);
-               r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
-               list_for_each_entry(p, &plist, lru) {
-                       pages[count++] = p;
-               }
-               if (r) {
-                       /* If there is any pages in the list put them back to
-                        * the pool. */
-                       pr_err("Failed to allocate extra pages for large request\n");
-                       ttm_put_pages(pages, count, flags, cstate);
-                       return r;
-               }
+       INIT_LIST_HEAD(&plist);
+       r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
+                                   npages - count, 0);
+
+       list_for_each_entry(p, &plist, lru)
+               pages[count++] = p;
+
+       if (r) {
+               /* If there is any pages in the list put them back to
+                * the pool.
+                */
+               pr_err("Failed to allocate extra pages for large request\n");
+               ttm_put_pages(pages, count, flags, cstate);
+               return r;
        }
 
        return 0;
@@ -832,6 +964,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
        ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
                                  GFP_USER | GFP_DMA32, "uc dma");
 
+       ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+                                 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+                                 "wc huge");
+
+       ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+                                 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+                                 , "uc huge");
+
        _manager->options.max_size = max_pages;
        _manager->options.small = SMALL_ALLOCATION;
        _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
@@ -873,15 +1013,14 @@ int ttm_pool_populate(struct ttm_tt *ttm)
        if (ttm->state != tt_unpopulated)
                return 0;
 
-       for (i = 0; i < ttm->num_pages; ++i) {
-               ret = ttm_get_pages(&ttm->pages[i], 1,
-                                   ttm->page_flags,
-                                   ttm->caching_state);
-               if (ret != 0) {
-                       ttm_pool_unpopulate(ttm);
-                       return -ENOMEM;
-               }
+       ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+                           ttm->caching_state);
+       if (unlikely(ret != 0)) {
+               ttm_pool_unpopulate(ttm);
+               return ret;
+       }
 
+       for (i = 0; i < ttm->num_pages; ++i) {
                ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
                                                PAGE_SIZE);
                if (unlikely(ret != 0)) {
@@ -908,14 +1047,14 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
        unsigned i;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               if (ttm->pages[i]) {
-                       ttm_mem_global_free_page(ttm->glob->mem_glob,
-                                                ttm->pages[i], PAGE_SIZE);
-                       ttm_put_pages(&ttm->pages[i], 1,
-                                     ttm->page_flags,
-                                     ttm->caching_state);
-               }
+               if (!ttm->pages[i])
+                       continue;
+
+               ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
+                                        PAGE_SIZE);
        }
+       ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+                     ttm->caching_state);
        ttm->state = tt_unpopulated;
 }
 EXPORT_SYMBOL(ttm_pool_unpopulate);
@@ -923,16 +1062,26 @@ EXPORT_SYMBOL(ttm_pool_unpopulate);
 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
 {
-       unsigned i;
+       unsigned i, j;
        int r;
 
        r = ttm_pool_populate(&tt->ttm);
        if (r)
                return r;
 
-       for (i = 0; i < tt->ttm.num_pages; i++) {
+       for (i = 0; i < tt->ttm.num_pages; ++i) {
+               struct page *p = tt->ttm.pages[i];
+               size_t num_pages = 1;
+
+               for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+                       if (++p != tt->ttm.pages[j])
+                               break;
+
+                       ++num_pages;
+               }
+
                tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
-                                                 0, PAGE_SIZE,
+                                                 0, num_pages * PAGE_SIZE,
                                                  DMA_BIDIRECTIONAL);
                if (dma_mapping_error(dev, tt->dma_address[i])) {
                        while (i--) {
@@ -943,6 +1092,11 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
                        ttm_pool_unpopulate(&tt->ttm);
                        return -EFAULT;
                }
+
+               for (j = 1; j < num_pages; ++j) {
+                       tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
+                       ++i;
+               }
        }
        return 0;
 }
@@ -950,13 +1104,28 @@ EXPORT_SYMBOL(ttm_populate_and_map_pages);
 
 void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
 {
-       unsigned i;
-       
-       for (i = 0; i < tt->ttm.num_pages; i++) {
-               if (tt->dma_address[i]) {
-                       dma_unmap_page(dev, tt->dma_address[i],
-                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+       unsigned i, j;
+
+       for (i = 0; i < tt->ttm.num_pages;) {
+               struct page *p = tt->ttm.pages[i];
+               size_t num_pages = 1;
+
+               if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
+                       ++i;
+                       continue;
                }
+
+               for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+                       if (++p != tt->ttm.pages[j])
+                               break;
+
+                       ++num_pages;
+               }
+
+               dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
+
+               i += num_pages;
        }
        ttm_pool_unpopulate(&tt->ttm);
 }
@@ -972,12 +1141,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
                seq_printf(m, "No pool allocator running.\n");
                return 0;
        }
-       seq_printf(m, "%6s %12s %13s %8s\n",
+       seq_printf(m, "%7s %12s %13s %8s\n",
                        h[0], h[1], h[2], h[3]);
        for (i = 0; i < NUM_POOLS; ++i) {
                p = &_manager->pools[i];
 
-               seq_printf(m, "%6s %12ld %13ld %8d\n",
+               seq_printf(m, "%7s %12ld %13ld %8d\n",
                                p->name, p->nrefills,
                                p->nfrees, p->npages);
        }
index e5ef10d347485987cd694a32be4f2442702c55c7..96ad12906621ca5ae499ae61fc200157608b4adc 100644 (file)
@@ -913,6 +913,7 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
        if (huge) {
                gfp_flags |= GFP_TRANSHUGE;
                gfp_flags &= ~__GFP_MOVABLE;
+               gfp_flags &= ~__GFP_COMP;
        }
 
        return gfp_flags;
index b6c3540e07bc31a5aafbf43c61b469f4eeb4fa96..0937d9a7d8fbc96cc1573a369e91ad7ce54ebb41 100644 (file)
@@ -53,6 +53,8 @@ extern struct file *shmem_file_setup(const char *name,
                                        loff_t size, unsigned long flags);
 extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
                                            unsigned long flags);
+extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
+               const char *name, loff_t size, unsigned long flags);
 extern int shmem_zero_setup(struct vm_area_struct *);
 extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags);
index 4c6e8c482ee498d564592fd063ba23229e0bed3d..ff0181829f3db6dfb4323bf4032875ad04516d2e 100644 (file)
@@ -53,6 +53,7 @@ extern "C" {
 #define DRM_AMDGPU_WAIT_FENCES         0x12
 #define DRM_AMDGPU_VM                  0x13
 #define DRM_AMDGPU_FENCE_TO_HANDLE     0x14
+#define DRM_AMDGPU_SCHED               0x15
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE    DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -69,6 +70,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_WAIT_FENCES   DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
 #define DRM_IOCTL_AMDGPU_VM            DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
 #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
+#define DRM_IOCTL_AMDGPU_SCHED         DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
 
 #define AMDGPU_GEM_DOMAIN_CPU          0x1
 #define AMDGPU_GEM_DOMAIN_GTT          0x2
@@ -91,6 +93,8 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS      (1 << 5)
 /* Flag that BO is always valid in this VM */
 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID      (1 << 6)
+/* Flag that BO sharing will be explicitly synchronized */
+#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC                (1 << 7)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */
@@ -166,13 +170,22 @@ union drm_amdgpu_bo_list {
 /* unknown cause */
 #define AMDGPU_CTX_UNKNOWN_RESET       3
 
+/* Context priority level */
+#define AMDGPU_CTX_PRIORITY_UNSET       -2048
+#define AMDGPU_CTX_PRIORITY_VERY_LOW    -1023
+#define AMDGPU_CTX_PRIORITY_LOW         -512
+#define AMDGPU_CTX_PRIORITY_NORMAL      0
+/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
+#define AMDGPU_CTX_PRIORITY_HIGH        512
+#define AMDGPU_CTX_PRIORITY_VERY_HIGH   1023
+
 struct drm_amdgpu_ctx_in {
        /** AMDGPU_CTX_OP_* */
        __u32   op;
        /** For future use, no flags defined so far */
        __u32   flags;
        __u32   ctx_id;
-       __u32   _pad;
+       __s32   priority;
 };
 
 union drm_amdgpu_ctx_out {
@@ -216,6 +229,21 @@ union drm_amdgpu_vm {
        struct drm_amdgpu_vm_out out;
 };
 
+/* sched ioctl */
+#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE      1
+
+struct drm_amdgpu_sched_in {
+       /* AMDGPU_SCHED_OP_* */
+       __u32   op;
+       __u32   fd;
+       __s32   priority;
+       __u32   flags;
+};
+
+union drm_amdgpu_sched {
+       struct drm_amdgpu_sched_in in;
+};
+
 /*
  * This is not a reliable API and you should expect it to fail for any
  * number of reasons and have fallback path that do not use userptr to
@@ -629,6 +657,7 @@ struct drm_amdgpu_cs_chunk_data {
        #define AMDGPU_INFO_SENSOR_VDDGFX               0x7
 /* Number of VRAM page faults on CPU access. */
 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS   0x1E
+#define AMDGPU_INFO_VRAM_LOST_COUNTER          0x1F
 
 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
 #define AMDGPU_INFO_MMR_SE_INDEX_MASK  0xff
index fe25a01c81f22661cb5c8a7d3db34a7851e579b3..125bde7d95045e80473a0e709ac7943a8cb9d5cd 100644 (file)
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_MIN_EU_IN_POOL       39
 #define I915_PARAM_MMAP_GTT_VERSION     40
 
-/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
  * priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
  */
 #define I915_PARAM_HAS_SCHEDULER        41
+#define   I915_SCHEDULER_CAP_ENABLED   (1ul << 0)
+#define   I915_SCHEDULER_CAP_PRIORITY  (1ul << 1)
+#define   I915_SCHEDULER_CAP_PREEMPTION        (1ul << 2)
+
 #define I915_PARAM_HUC_STATUS           42
 
 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1308,7 +1318,7 @@ struct drm_i915_reg_read {
         * be specified
         */
        __u64 offset;
-#define I915_REG_READ_8B_WA BIT(0)
+#define I915_REG_READ_8B_WA (1ul << 0)
 
        __u64 val; /* Return value */
 };
@@ -1360,6 +1370,10 @@ struct drm_i915_gem_context_param {
 #define I915_CONTEXT_PARAM_GTT_SIZE    0x3
 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE    0x4
 #define I915_CONTEXT_PARAM_BANNABLE    0x5
+#define I915_CONTEXT_PARAM_PRIORITY    0x6
+#define   I915_CONTEXT_MAX_USER_PRIORITY       1023 /* inclusive */
+#define   I915_CONTEXT_DEFAULT_PRIORITY                0
+#define   I915_CONTEXT_MIN_USER_PRIORITY       -1023 /* inclusive */
        __u64 value;
 };
 
index 07a1d22807beb7cfb5f85c3343a5eacdc28fb523..3229d27503ec79ba68f7dec597bb94df0426460d 100644 (file)
@@ -4183,7 +4183,7 @@ static const struct dentry_operations anon_ops = {
        .d_dname = simple_dname
 };
 
-static struct file *__shmem_file_setup(const char *name, loff_t size,
+static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
                                       unsigned long flags, unsigned int i_flags)
 {
        struct file *res;
@@ -4192,8 +4192,8 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        struct super_block *sb;
        struct qstr this;
 
-       if (IS_ERR(shm_mnt))
-               return ERR_CAST(shm_mnt);
+       if (IS_ERR(mnt))
+               return ERR_CAST(mnt);
 
        if (size < 0 || size > MAX_LFS_FILESIZE)
                return ERR_PTR(-EINVAL);
@@ -4205,8 +4205,8 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        this.name = name;
        this.len = strlen(name);
        this.hash = 0; /* will go */
-       sb = shm_mnt->mnt_sb;
-       path.mnt = mntget(shm_mnt);
+       sb = mnt->mnt_sb;
+       path.mnt = mntget(mnt);
        path.dentry = d_alloc_pseudo(sb, &this);
        if (!path.dentry)
                goto put_memory;
@@ -4251,7 +4251,7 @@ put_path:
  */
 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
 {
-       return __shmem_file_setup(name, size, flags, S_PRIVATE);
+       return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
 }
 
 /**
@@ -4262,10 +4262,24 @@ struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned lon
  */
 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
 {
-       return __shmem_file_setup(name, size, flags, 0);
+       return __shmem_file_setup(shm_mnt, name, size, flags, 0);
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+/**
+ * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
+ * @mnt: the tmpfs mount where the file will be created
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
+                                      loff_t size, unsigned long flags)
+{
+       return __shmem_file_setup(mnt, name, size, flags, 0);
+}
+EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -4281,7 +4295,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
         * accessible to the user through its mapping, use S_PRIVATE flag to
         * bypass file security, in the same way as shmem_kernel_file_setup().
         */
-       file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
+       file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
        if (IS_ERR(file))
                return PTR_ERR(file);