]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
UBUNTU: SAUCE: IPU6 driver release for kernel 5.13
authorWang Yating <yating.wang@intel.com>
Thu, 29 Jul 2021 06:48:32 +0000 (14:48 +0800)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Tue, 31 May 2022 16:26:27 +0000 (18:26 +0200)
BugLink: https://bugs.launchpad.net/bugs/1955383
Signed-off-by: Wang Yating <yating.wang@intel.com>
(cherry picked from commit d6b6959e1ba207eb1ae16ad296818ceae12879c4 github.com/intel/ipu6-drivers)
Signed-off-by: You-Sheng Yang <vicamo.yang@canonical.com>
Acked-by: Andrea Righi <andrea.righi@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
24 files changed:
drivers/media/i2c/hm11b1.c
drivers/media/i2c/ov01a1s.c
drivers/media/pci/intel/ipu-bus.c
drivers/media/pci/intel/ipu-buttress.c
drivers/media/pci/intel/ipu-dma.c
drivers/media/pci/intel/ipu-fw-isys.c
drivers/media/pci/intel/ipu-fw-isys.h
drivers/media/pci/intel/ipu-fw-psys.c
drivers/media/pci/intel/ipu-fw-psys.h
drivers/media/pci/intel/ipu-isys-csi2-be-soc.c
drivers/media/pci/intel/ipu-isys-video.c
drivers/media/pci/intel/ipu-mmu.c
drivers/media/pci/intel/ipu-mmu.h
drivers/media/pci/intel/ipu-pdata.h
drivers/media/pci/intel/ipu-psys.c
drivers/media/pci/intel/ipu-trace.c
drivers/media/pci/intel/ipu6/Makefile
drivers/media/pci/intel/ipu6/ipu-resources.c
drivers/media/pci/intel/ipu6/ipu6-fw-resources.c
drivers/media/pci/intel/ipu6/ipu6-platform-resources.h
drivers/media/pci/intel/ipu6/ipu6-ppg.c
drivers/media/pci/intel/ipu6/ipu6.c
drivers/media/pci/intel/ipu6/ipu6se-fw-resources.c
drivers/media/pci/intel/ipu6/ipu6se-platform-resources.h

index bf6e221150dee8f3d2d576938cabd0b9f8b02014..ecb2c19583ae9217623d733d3877d6e05da94308 100644 (file)
@@ -1305,7 +1305,7 @@ static int hm11b1_probe(struct i2c_client *client)
                goto probe_error_v4l2_ctrl_handler_free;
        }
 
-       ret = v4l2_async_register_subdev_sensor_common(&hm11b1->sd);
+       ret = v4l2_async_register_subdev_sensor(&hm11b1->sd);
        if (ret < 0) {
                dev_err(&client->dev, "failed to register V4L2 subdev: %d",
                        ret);
index 8cd9e2dd4e7e85c0fed00b0b47df55bcf727054c..dd299204d9e9e5f94cadc0abeeb6b539b0f75177 100644 (file)
@@ -872,7 +872,7 @@ static int ov01a1s_probe(struct i2c_client *client)
                goto probe_error_v4l2_ctrl_handler_free;
        }
 
-       ret = v4l2_async_register_subdev_sensor_common(&ov01a1s->sd);
+       ret = v4l2_async_register_subdev_sensor(&ov01a1s->sd);
        if (ret < 0) {
                dev_err(&client->dev, "failed to register V4L2 subdev: %d",
                        ret);
index 1c671535fe27d2278e6a217490897e2e58e7530b..084aa5f946d0cdfd014361f292a2ff664e8680a5 100644 (file)
@@ -165,6 +165,7 @@ struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev,
                                      IPU_MMU_ADDRESS_BITS :
                                      IPU_MMU_ADDRESS_BITS_NON_SECURE);
        adev->dev.dma_mask = &adev->dma_mask;
+       adev->dev.dma_parms = pdev->dev.dma_parms;
        adev->dev.coherent_dma_mask = adev->dma_mask;
        adev->ctrl = ctrl;
        adev->pdata = pdata;
index ee014a8bea661baea49ab84503995e7d6c8770d6..eac4a43ed7b19a4e6d8cfa0f128819cca5d4c1f7 100644 (file)
@@ -1112,7 +1112,7 @@ static int ipu_buttress_psys_force_freq_set(void *data, u64 val)
        return 0;
 }
 
-int ipu_buttress_isys_freq_get(void *data, u64 *val)
+static int ipu_buttress_isys_freq_get(void *data, u64 *val)
 {
        struct ipu_device *isp = data;
        u32 reg_val;
@@ -1135,7 +1135,7 @@ int ipu_buttress_isys_freq_get(void *data, u64 *val)
        return 0;
 }
 
-int ipu_buttress_isys_freq_set(void *data, u64 val)
+static int ipu_buttress_isys_freq_set(void *data, u64 val)
 {
        struct ipu_device *isp = data;
        int rval;
index 2e844dd16e6121cc9716499702cc9a2ed0905360..a661257a30de4211df433e8f9c722fed3579a63d 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2013 - 2020 Intel Corporation
+// Copyright (C) 2013 - 2021 Intel Corporation
 
 #include <asm/cacheflush.h>
 
@@ -162,10 +162,8 @@ static void *ipu_dma_alloc(struct device *dev, size_t size,
 
        iova = alloc_iova(&mmu->dmap->iovad, count,
                          dma_get_mask(dev) >> PAGE_SHIFT, 0);
-       if (!iova) {
-               kfree(info);
-               return NULL;
-       }
+       if (!iova)
+               goto out_kfree;
 
        pages = __dma_alloc_buffer(dev, size, gfp, attrs);
        if (!pages)
@@ -185,8 +183,6 @@ static void *ipu_dma_alloc(struct device *dev, size_t size,
 
        *dma_handle = iova->pfn_lo << PAGE_SHIFT;
 
-       mmu->tlb_invalidate(mmu);
-
        info->pages = pages;
        info->size = size;
        list_add(&info->list, &mmu->vma_list);
@@ -202,6 +198,7 @@ out_unmap:
 
 out_free_iova:
        __free_iova(&mmu->dmap->iovad, iova);
+out_kfree:
        kfree(info);
 
        return NULL;
@@ -243,10 +240,10 @@ static void ipu_dma_free(struct device *dev, size_t size, void *vaddr,
 
        __dma_free_buffer(dev, pages, size, attrs);
 
-       __free_iova(&mmu->dmap->iovad, iova);
-
        mmu->tlb_invalidate(mmu);
 
+       __free_iova(&mmu->dmap->iovad, iova);
+
        kfree(info);
 }
 
index ee064faaa0133392eec64991d585ef15a70dd1bd..fb03a9183025d75846a5fd76390b139de5e69467 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2013 - 2020 Intel Corporation
+// Copyright (C) 2013 - 2021 Intel Corporation
 
 #include <asm/cacheflush.h>
 
@@ -293,8 +293,6 @@ static int ipu6_isys_fwcom_cfg_init(struct ipu_isys *isys,
        struct ipu_fw_syscom_queue_config *input_queue_cfg;
        struct ipu_fw_syscom_queue_config *output_queue_cfg;
        struct ipu6_fw_isys_fw_config *isys_fw_cfg;
-       int num_in_message_queues = clamp_t(unsigned int, num_streams, 1,
-                                           IPU6_ISYS_NUM_STREAMS);
        int num_out_message_queues = 1;
        int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY;
        int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV;
@@ -302,119 +300,23 @@ static int ipu6_isys_fwcom_cfg_init(struct ipu_isys *isys,
        int base_dev_send = IPU_BASE_DEV_SEND_QUEUES;
        int base_msg_send = IPU_BASE_MSG_SEND_QUEUES;
        int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES;
-
-       isys_fw_cfg = devm_kzalloc(&isys->adev->dev, sizeof(*isys_fw_cfg),
-                                  GFP_KERNEL);
-       if (!isys_fw_cfg)
-               return -ENOMEM;
-
-       isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
-               IPU_N_MAX_PROXY_SEND_QUEUES;
-       isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] =
-               IPU_N_MAX_DEV_SEND_QUEUES;
-       isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
-               num_in_message_queues;
-       isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
-               IPU_N_MAX_PROXY_RECV_QUEUES;
-       /* Common msg/dev return queue */
-       isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = 0;
-       isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
-               num_out_message_queues;
-
-       size = sizeof(*input_queue_cfg) * IPU6_N_MAX_SEND_QUEUES;
-       input_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
-       if (!input_queue_cfg)
-               return -ENOMEM;
-
-       size = sizeof(*output_queue_cfg) * IPU_N_MAX_RECV_QUEUES;
-       output_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
-       if (!output_queue_cfg)
-               return -ENOMEM;
-
-       fwcom->input = input_queue_cfg;
-       fwcom->output = output_queue_cfg;
-
-       fwcom->num_input_queues =
-               isys_fw_cfg->num_send_queues[type_proxy] +
-               isys_fw_cfg->num_send_queues[type_dev] +
-               isys_fw_cfg->num_send_queues[type_msg];
-
-       fwcom->num_output_queues =
-               isys_fw_cfg->num_recv_queues[type_proxy] +
-               isys_fw_cfg->num_recv_queues[type_dev] +
-               isys_fw_cfg->num_recv_queues[type_msg];
-
-       /* SRAM partitioning. Equal partitioning is set. */
-       for (i = 0; i < IPU6_NOF_SRAM_BLOCKS_MAX; i++) {
-               if (i < num_in_message_queues)
-                       isys_fw_cfg->buffer_partition.num_gda_pages[i] =
-                               (IPU_DEVICE_GDA_NR_PAGES *
-                                IPU_DEVICE_GDA_VIRT_FACTOR) /
-                               num_in_message_queues;
-               else
-                       isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0;
-       }
-
-       /* FW assumes proxy interface at fwcom queue 0 */
-       for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) {
-               input_queue_cfg[i].token_size =
-                       sizeof(struct ipu_fw_proxy_send_queue_token);
-               input_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_SEND_QUEUE;
-       }
-
-       for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
-               input_queue_cfg[base_dev_send + i].token_size =
-                       sizeof(struct ipu_fw_send_queue_token);
-               input_queue_cfg[base_dev_send + i].queue_size =
-                       IPU6_DEV_SEND_QUEUE_SIZE;
-       }
-
-       for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
-               input_queue_cfg[base_msg_send + i].token_size =
-                       sizeof(struct ipu_fw_send_queue_token);
-               input_queue_cfg[base_msg_send + i].queue_size =
-                       IPU_ISYS_SIZE_SEND_QUEUE;
-       }
-
-       for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) {
-               output_queue_cfg[i].token_size =
-                       sizeof(struct ipu_fw_proxy_resp_queue_token);
-               output_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_RECV_QUEUE;
-       }
-       /* There is no recv DEV queue */
-       for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) {
-               output_queue_cfg[base_msg_recv + i].token_size =
-                       sizeof(struct ipu_fw_resp_queue_token);
-               output_queue_cfg[base_msg_recv + i].queue_size =
-                       IPU_ISYS_SIZE_RECV_QUEUE;
+       int num_in_message_queues;
+       unsigned int max_streams;
+       unsigned int max_send_queues, max_sram_blocks, max_devq_size;
+
+       max_streams = IPU6_ISYS_NUM_STREAMS;
+       max_send_queues = IPU6_N_MAX_SEND_QUEUES;
+       max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
+       max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
+       if (ipu_ver == IPU_VER_6SE) {
+               max_streams = IPU6SE_ISYS_NUM_STREAMS;
+               max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
+               max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
+               max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
        }
 
-       fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset;
-       fwcom->specific_addr = isys_fw_cfg;
-       fwcom->specific_size = sizeof(*isys_fw_cfg);
-
-       return 0;
-}
-
-static int ipu6se_isys_fwcom_cfg_init(struct ipu_isys *isys,
-                                     struct ipu_fw_com_cfg *fwcom,
-                                     unsigned int num_streams)
-{
-       int i;
-       unsigned int size;
-       struct ipu_fw_syscom_queue_config *input_queue_cfg;
-       struct ipu_fw_syscom_queue_config *output_queue_cfg;
-       struct ipu6se_fw_isys_fw_config *isys_fw_cfg;
-       int num_in_message_queues = clamp_t(unsigned int, num_streams, 1,
-                                           IPU6SE_ISYS_NUM_STREAMS);
-       int num_out_message_queues = 1;
-       int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY;
-       int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV;
-       int type_msg = IPU_FW_ISYS_QUEUE_TYPE_MSG;
-       int base_dev_send = IPU_BASE_DEV_SEND_QUEUES;
-       int base_msg_send = IPU_BASE_MSG_SEND_QUEUES;
-       int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES;
-
+       num_in_message_queues = clamp_t(unsigned int, num_streams, 1,
+                                       max_streams);
        isys_fw_cfg = devm_kzalloc(&isys->adev->dev, sizeof(*isys_fw_cfg),
                                   GFP_KERNEL);
        if (!isys_fw_cfg)
@@ -433,7 +335,7 @@ static int ipu6se_isys_fwcom_cfg_init(struct ipu_isys *isys,
        isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
                num_out_message_queues;
 
-       size = sizeof(*input_queue_cfg) * IPU6SE_N_MAX_SEND_QUEUES;
+       size = sizeof(*input_queue_cfg) * max_send_queues;
        input_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
        if (!input_queue_cfg)
                return -ENOMEM;
@@ -457,7 +359,7 @@ static int ipu6se_isys_fwcom_cfg_init(struct ipu_isys *isys,
                isys_fw_cfg->num_recv_queues[type_msg];
 
        /* SRAM partitioning. Equal partitioning is set. */
-       for (i = 0; i < IPU6SE_NOF_SRAM_BLOCKS_MAX; i++) {
+       for (i = 0; i < max_sram_blocks; i++) {
                if (i < num_in_message_queues)
                        isys_fw_cfg->buffer_partition.num_gda_pages[i] =
                                (IPU_DEVICE_GDA_NR_PAGES *
@@ -477,8 +379,7 @@ static int ipu6se_isys_fwcom_cfg_init(struct ipu_isys *isys,
        for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
                input_queue_cfg[base_dev_send + i].token_size =
                        sizeof(struct ipu_fw_send_queue_token);
-               input_queue_cfg[base_dev_send + i].queue_size =
-                       IPU6SE_DEV_SEND_QUEUE_SIZE;
+               input_queue_cfg[base_dev_send + i].queue_size = max_devq_size;
        }
 
        for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
@@ -521,14 +422,7 @@ int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams)
        struct device *dev = &isys->adev->dev;
        int rval;
 
-       if (ipu_ver == IPU_VER_6SE) {
-               ipu6se_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
-       } else if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) {
-               ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
-       } else {
-               dev_err(dev, "unsupported ipu_ver %d\n", ipu_ver);
-               return -EINVAL;
-       }
+       ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
 
        isys->fwcom = ipu_fw_com_prepare(&fwcom, isys->adev, isys->pdata->base);
        if (!isys->fwcom) {
@@ -585,6 +479,12 @@ void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg)
                    N_IPU_FW_ISYS_MIPI_DATA_TYPE;
                stream_cfg->input_pins[i].mipi_decompression =
                    IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION;
+               /*
+                * CSI BE can be used to crop and change bayer order.
+                * NOTE: currently it only crops first and last lines in height.
+                */
+               if (stream_cfg->crop.top_offset & 1)
+                       stream_cfg->input_pins[i].crop_first_and_last_lines = 1;
                stream_cfg->input_pins[i].capture_mode =
                        IPU_FW_ISYS_CAPTURE_MODE_REGULAR;
        }
index ec4c4c310fec1a745e22f5cf2916a30a94ac2834..0dc320474b77a12e98bf0a94135e88ef403212da 100644 (file)
@@ -505,16 +505,6 @@ struct ipu6_fw_isys_fw_config {
        u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
 };
 
-struct ipu6se_fw_isys_buffer_partition_abi {
-       u32 num_gda_pages[IPU6SE_STREAM_ID_MAX];
-};
-
-struct ipu6se_fw_isys_fw_config {
-       struct ipu6se_fw_isys_buffer_partition_abi buffer_partition;
-       u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
-       u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
-};
-
 /**
  * struct ipu_fw_isys_resolution_abi: Generic resolution structure.
  * @Width
index f7639fd1e72bd388446d210751327093a84d853f..68da73fa5c7a17dd4f569658ae0ec666d6c67015 100644 (file)
@@ -185,10 +185,7 @@ int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal,
 void ipu_fw_psys_pg_dump(struct ipu_psys *psys,
                         struct ipu_psys_kcmd *kcmd, const char *note)
 {
-       if (ipu_ver == IPU_VER_6SE)
-               ipu6se_fw_psys_pg_dump(psys, kcmd, note);
-       else
-               ipu6_fw_psys_pg_dump(psys, kcmd, note);
+       ipu6_fw_psys_pg_dump(psys, kcmd, note);
 }
 
 int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd)
index f1dcc9dd946ccacde297b8a945118f31922cca02..912a0986c060d5fe65e41a6716fd359a4306abf4 100644 (file)
@@ -376,20 +376,7 @@ int ipu6_fw_psys_get_program_manifest_by_process(
        struct ipu_fw_generic_program_manifest *gen_pm,
        const struct ipu_fw_psys_program_group_manifest *pg_manifest,
        struct ipu_fw_psys_process *process);
-int ipu6se_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
-                                   u16 value);
-int ipu6se_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
-                                      u16 id, u32 bitmap,
-                                      u32 active_bitmap);
-int ipu6se_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
-                                      u16 type_id, u16 mem_id, u16 offset);
-int ipu6se_fw_psys_get_program_manifest_by_process(
-       struct ipu_fw_generic_program_manifest *gen_pm,
-       const struct ipu_fw_psys_program_group_manifest *pg_manifest,
-       struct ipu_fw_psys_process *process);
 void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
                          struct ipu_psys_kcmd *kcmd, const char *note);
-void ipu6se_fw_psys_pg_dump(struct ipu_psys *psys,
-                           struct ipu_psys_kcmd *kcmd, const char *note);
 void ipu6_psys_hw_res_variant_init(void);
 #endif /* IPU_FW_PSYS_H */
index 501d5620e7e15b67d04d6c908bd76c8d048191d5..daaed97d1b137f5c8c0e6864e9736b9f7d51c806 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2014 - 2020 Intel Corporation
+// Copyright (C) 2014 - 2021 Intel Corporation
 
 #include <linux/device.h>
 #include <linux/module.h>
@@ -40,6 +40,37 @@ static const u32 csi2_be_soc_supported_codes_pad[] = {
        0,
 };
 
+/*
+ * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes.
+ * Otherwise pixel order calculation below WILL BREAK!
+ */
+static const u32 csi2_be_soc_supported_raw_bayer_codes_pad[] = {
+       MEDIA_BUS_FMT_SBGGR12_1X12,
+       MEDIA_BUS_FMT_SGBRG12_1X12,
+       MEDIA_BUS_FMT_SGRBG12_1X12,
+       MEDIA_BUS_FMT_SRGGB12_1X12,
+       MEDIA_BUS_FMT_SBGGR10_1X10,
+       MEDIA_BUS_FMT_SGBRG10_1X10,
+       MEDIA_BUS_FMT_SGRBG10_1X10,
+       MEDIA_BUS_FMT_SRGGB10_1X10,
+       MEDIA_BUS_FMT_SBGGR8_1X8,
+       MEDIA_BUS_FMT_SGBRG8_1X8,
+       MEDIA_BUS_FMT_SGRBG8_1X8,
+       MEDIA_BUS_FMT_SRGGB8_1X8,
+       0,
+};
+
+static int get_supported_code_index(u32 code)
+{
+       int i;
+
+       for (i = 0; csi2_be_soc_supported_raw_bayer_codes_pad[i]; i++) {
+               if (csi2_be_soc_supported_raw_bayer_codes_pad[i] == code)
+                       return i;
+       }
+       return -EINVAL;
+}
+
 static const u32 *csi2_be_soc_supported_codes[NR_OF_CSI2_BE_SOC_PADS];
 
 static struct v4l2_subdev_internal_ops csi2_be_soc_sd_internal_ops = {
@@ -94,21 +125,22 @@ ipu_isys_csi2_be_soc_set_sel(struct v4l2_subdev *sd,
        if (sel->target == V4L2_SEL_TGT_CROP &&
            pad->flags & MEDIA_PAD_FL_SOURCE &&
            asd->valid_tgts[sel->pad].crop) {
-               struct v4l2_rect *r;
                enum isys_subdev_prop_tgt tgt =
                    IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP;
+               struct v4l2_mbus_framefmt *ffmt =
+                       __ipu_isys_get_ffmt(sd, cfg, sel->pad, sel->which);
 
-               r = __ipu_isys_get_selection(sd, cfg, sel->target,
-                                            0, sel->which);
+               if (get_supported_code_index(ffmt->code) < 0) {
+                       /* Non-bayer formats can't be odd lines cropped */
+                       sel->r.left &= ~1;
+                       sel->r.top &= ~1;
+               }
 
-               /* Cropping is not supported by SoC BE.
-                * Only horizontal padding is allowed.
-                */
-               sel->r.top = r->top;
-               sel->r.left = r->left;
-               sel->r.width = clamp(sel->r.width, r->width,
+               sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH,
                                     IPU_ISYS_MAX_WIDTH);
-               sel->r.height = r->height;
+
+               sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT,
+                                     IPU_ISYS_MAX_HEIGHT);
 
                *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad,
                                          sel->which) = sel->r;
@@ -157,15 +189,31 @@ static void csi2_be_soc_set_ffmt(struct v4l2_subdev *sd,
                                              fmt->pad, fmt->which);
        } else if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) {
                struct v4l2_mbus_framefmt *sink_ffmt;
-               struct v4l2_rect *r;
+               struct v4l2_rect *r = __ipu_isys_get_selection(sd, cfg,
+                       V4L2_SEL_TGT_CROP, fmt->pad, fmt->which);
+               struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+               u32 code;
+               int idx;
 
                sink_ffmt = __ipu_isys_get_ffmt(sd, cfg, 0, fmt->which);
-               r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP,
-                                            fmt->pad, fmt->which);
+               code = sink_ffmt->code;
+               idx = get_supported_code_index(code);
+
+               if (asd->valid_tgts[fmt->pad].crop && idx >= 0) {
+                       int crop_info = 0;
+
+                       /* Only croping odd line at top side. */
+                       if (r->top & 1)
+                               crop_info |= CSI2_BE_CROP_VER;
+
+                       code = csi2_be_soc_supported_raw_bayer_codes_pad
+                               [((idx & CSI2_BE_CROP_MASK) ^ crop_info)
+                               + (idx & ~CSI2_BE_CROP_MASK)];
 
+               }
+               ffmt->code = code;
                ffmt->width = r->width;
                ffmt->height = r->height;
-               ffmt->code = sink_ffmt->code;
                ffmt->field = sink_ffmt->field;
 
        }
index 4353ef2a84e29e6f57694bb0ee60054f7ba14217..a1689294857577784cb3674786118588e11ea8ac 100644 (file)
@@ -720,10 +720,8 @@ static int short_packet_queue_setup(struct ipu_isys_pipeline *ip)
                buf->ip = ip;
                buf->ib.type = IPU_ISYS_SHORT_PACKET_BUFFER;
                buf->bytesused = buf_size;
-               buf->buffer = dma_alloc_coherent(&av->isys->adev->dev,
-                                                buf_size,
-                                                &buf->dma_addr,
-                                                GFP_KERNEL);
+               buf->buffer = dma_alloc_coherent(&av->isys->adev->dev, buf_size,
+                                                &buf->dma_addr, GFP_KERNEL);
                if (!buf->buffer) {
                        short_packet_queue_destroy(ip);
                        return -ENOMEM;
index baa9826f950092cd50b33a7b23b86e0f96dfa15d..ad84c7b6344143b7129239260ea42bc93aa75181 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2013 - 2020 Intel Corporation
+// Copyright (C) 2013 - 2021 Intel Corporation
 
 #include <asm/cacheflush.h>
 
 /* The range of stream ID i in L2 cache is from 0 to 15 */
 #define MMUV2_REG_L2_STREAMID(i)       (0x4c + ((i) * 4))
 
-/* ZLW Enable for each stream in L1 MMU AT where i : 0..15 */
-#define MMUV2_AT_REG_L1_ZLW_EN_SID(i)          (0x100 + ((i) * 0x20))
-
-/* ZLW 1D mode Enable for each stream in L1 MMU AT where i : 0..15 */
-#define MMUV2_AT_REG_L1_ZLW_1DMODE_SID(i)      (0x100 + ((i) * 0x20) + 0x0004)
-
-/* Set ZLW insertion N pages ahead per stream 1D where i : 0..15 */
-#define MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(i) (0x100 + ((i) * 0x20) + 0x0008)
-
-/* ZLW 2D mode Enable for each stream in L1 MMU AT where i : 0..15 */
-#define MMUV2_AT_REG_L1_ZLW_2DMODE_SID(i)      (0x100 + ((i) * 0x20) + 0x0010)
-
-/* ZLW Insertion for each stream in L1 MMU AT where i : 0..15 */
-#define MMUV2_AT_REG_L1_ZLW_INSERTION(i)       (0x100 + ((i) * 0x20) + 0x000c)
-
-#define MMUV2_AT_REG_L1_FW_ZLW_FIFO            (0x100 + \
-                       (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20) + 0x003c)
-
-/* FW ZLW has prioty - needed for ZLW invalidations */
-#define MMUV2_AT_REG_L1_FW_ZLW_PRIO            (0x100 + \
-                       (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20))
-
 #define TBL_PHYS_ADDR(a)       ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
-#define TBL_VIRT_ADDR(a)       phys_to_virt(TBL_PHYS_ADDR(a))
-
-static void zlw_invalidate(struct ipu_mmu *mmu, struct ipu_mmu_hw *mmu_hw)
-{
-       unsigned int retry = 0;
-       unsigned int i, j;
-       int ret;
-
-       for (i = 0; i < mmu_hw->nr_l1streams; i++) {
-               /* We need to invalidate only the zlw enabled stream IDs */
-               if (mmu_hw->l1_zlw_en[i]) {
-                       /*
-                        * Maximum 16 blocks per L1 stream
-                        * Write trash buffer iova offset to the FW_ZLW
-                        * register. This will trigger pre-fetching of next 16
-                        * pages from the page table. So we need to increment
-                        * iova address by 16 * 4K to trigger the next 16 pages.
-                        * Once this loop is completed, the L1 cache will be
-                        * filled with trash buffer translation.
-                        *
-                        * TODO: Instead of maximum 16 blocks, use the allocated
-                        * block size
-                        */
-                       for (j = 0; j < mmu_hw->l1_block_sz[i]; j++)
-                               writel(mmu->iova_addr_trash +
-                                          j * MMUV2_TRASH_L1_BLOCK_OFFSET,
-                                          mmu_hw->base +
-                                          MMUV2_AT_REG_L1_ZLW_INSERTION(i));
-
-                       /*
-                        * Now we need to fill the L2 cache entry. L2 cache
-                        * entries will be automatically updated, based on the
-                        * L1 entry. The above loop for L1 will update only one
-                        * of the two entries in L2 as the L1 is under 4MB
-                        * range. To force the other entry in L2 to update, we
-                        * just need to trigger another pre-fetch which is
-                        * outside the above 4MB range.
-                        */
-                       writel(mmu->iova_addr_trash +
-                                  MMUV2_TRASH_L2_BLOCK_OFFSET,
-                                  mmu_hw->base +
-                                  MMUV2_AT_REG_L1_ZLW_INSERTION(0));
-               }
-       }
-
-       /*
-        * Wait until AT is ready. FIFO read should return 2 when AT is ready.
-        * Retry value of 1000 is just by guess work to avoid the forever loop.
-        */
-       do {
-               if (retry > 1000) {
-                       dev_err(mmu->dev, "zlw invalidation failed\n");
-                       return;
-               }
-               ret = readl(mmu_hw->base + MMUV2_AT_REG_L1_FW_ZLW_FIFO);
-               retry++;
-       } while (ret != 2);
-}
 
 static void tlb_invalidate(struct ipu_mmu *mmu)
 {
@@ -139,22 +59,21 @@ static void tlb_invalidate(struct ipu_mmu *mmu)
                 * MMUs on successive invalidate calls, we need to first do a
                 * read to the page table base before writing the invalidate
                 * register. MMUs which need to implement this WA, will have
-                * the insert_read_before_invalidate flasg set as true.
+                * the insert_read_before_invalidate flags set as true.
                 * Disregard the return value of the read.
                 */
                if (mmu->mmu_hw[i].insert_read_before_invalidate)
                        readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
 
-               /* Normal invalidate or zlw invalidate */
-               if (mmu->mmu_hw[i].zlw_invalidate) {
-                       /* trash buffer must be mapped by now, just in case! */
-                       WARN_ON(!mmu->iova_addr_trash);
-
-                       zlw_invalidate(mmu, &mmu->mmu_hw[i]);
-               } else {
-                       writel(0xffffffff, mmu->mmu_hw[i].base +
-                                  REG_TLB_INVALIDATE);
-               }
+               writel(0xffffffff, mmu->mmu_hw[i].base +
+                      REG_TLB_INVALIDATE);
+               /*
+                * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
+                * When the actual MMIO write reaches the IPU TLB Invalidate
+                * register, wmb() will force the TLB invalidate out if the CPU
+                * attempts to update the IOMMU page table (or sooner).
+                */
+               wmb();
        }
        spin_unlock_irqrestore(&mmu->ready_lock, flags);
 }
@@ -164,47 +83,162 @@ static void page_table_dump(struct ipu_mmu_info *mmu_info)
 {
        u32 l1_idx;
 
-       pr_debug("begin IOMMU page table dump\n");
+       dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
 
        for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
                u32 l2_idx;
                u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
 
-               if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl)
+               if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
                        continue;
-               pr_debug("l1 entry %u; iovas 0x%8.8x--0x%8.8x, at %p\n",
-                        l1_idx, iova, iova + ISP_PAGE_SIZE,
-                        (void *)TBL_PHYS_ADDR(mmu_info->pgtbl[l1_idx]));
+               dev_dbg(mmu_info->dev,
+                       "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %p\n",
+                       l1_idx, iova, iova + ISP_PAGE_SIZE,
+                       (void *)TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
 
                for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
-                       u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
+                       u32 *l2_pt = mmu_info->l2_pts[l1_idx];
                        u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
 
-                       if (l2_pt[l2_idx] == mmu_info->dummy_page)
+                       if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
                                continue;
 
-                       pr_debug("\tl2 entry %u; iova 0x%8.8x, phys %p\n",
-                                l2_idx, iova2,
-                                (void *)TBL_PHYS_ADDR(l2_pt[l2_idx]));
+                       dev_dbg(mmu_info->dev,
+                               "\tl2 entry %u; iova 0x%8.8x, phys %p\n",
+                               l2_idx, iova2,
+                               (void *)TBL_PHYS_ADDR(l2_pt[l2_idx]));
                }
        }
 
-       pr_debug("end IOMMU page table dump\n");
+       dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
 }
 #endif /* DEBUG */
 
-static u32 *alloc_page_table(struct ipu_mmu_info *mmu_info, bool l1)
+static dma_addr_t map_single(struct ipu_mmu_info *mmu_info, void *ptr)
+{
+       dma_addr_t dma;
+
+       dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(mmu_info->dev, dma))
+               return 0;
+
+       return dma;
+}
+
+static int get_dummy_page(struct ipu_mmu_info *mmu_info)
+{
+       dma_addr_t dma;
+       void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+
+       if (!pt)
+               return -ENOMEM;
+
+       dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+       dma = map_single(mmu_info, pt);
+       if (!dma) {
+               dev_err(mmu_info->dev, "Failed to map dummy page\n");
+               goto err_free_page;
+       }
+
+       mmu_info->dummy_page = pt;
+       mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
+
+       return 0;
+
+err_free_page:
+       free_page((unsigned long)pt);
+       return -ENOMEM;
+}
+
+static void free_dummy_page(struct ipu_mmu_info *mmu_info)
+{
+       dma_unmap_single(mmu_info->dev,
+                        TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
+                        PAGE_SIZE, DMA_BIDIRECTIONAL);
+       free_page((unsigned long)mmu_info->dummy_page);
+}
+
+static int alloc_dummy_l2_pt(struct ipu_mmu_info *mmu_info)
+{
+       dma_addr_t dma;
+       u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+       int i;
+
+       if (!pt)
+               return -ENOMEM;
+
+       dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+       dma = map_single(mmu_info, pt);
+       if (!dma) {
+               dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+               goto err_free_page;
+       }
+
+       for (i = 0; i < ISP_L2PT_PTES; i++)
+               pt[i] = mmu_info->dummy_page_pteval;
+
+       mmu_info->dummy_l2_pt = pt;
+       mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
+
+       return 0;
+
+err_free_page:
+       free_page((unsigned long)pt);
+       return -ENOMEM;
+}
+
+static void free_dummy_l2_pt(struct ipu_mmu_info *mmu_info)
+{
+       dma_unmap_single(mmu_info->dev,
+                        TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
+                        PAGE_SIZE, DMA_BIDIRECTIONAL);
+       free_page((unsigned long)mmu_info->dummy_l2_pt);
+}
+
+static u32 *alloc_l1_pt(struct ipu_mmu_info *mmu_info)
 {
+       dma_addr_t dma;
        u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
        int i;
 
        if (!pt)
                return NULL;
 
-       pr_debug("get_zeroed_page() == %p\n", pt);
+       dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
 
        for (i = 0; i < ISP_L1PT_PTES; i++)
-               pt[i] = l1 ? mmu_info->dummy_l2_tbl : mmu_info->dummy_page;
+               pt[i] = mmu_info->dummy_l2_pteval;
+
+       dma = map_single(mmu_info, pt);
+       if (!dma) {
+               dev_err(mmu_info->dev, "Failed to map l1pt page\n");
+               goto err_free_page;
+       }
+
+       mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+       dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
+
+       return pt;
+
+err_free_page:
+       free_page((unsigned long)pt);
+       return NULL;
+}
+
+static u32 *alloc_l2_pt(struct ipu_mmu_info *mmu_info)
+{
+       u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+       int i;
+
+       if (!pt)
+               return NULL;
+
+       dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+       for (i = 0; i < ISP_L1PT_PTES; i++)
+               pt[i] = mmu_info->dummy_page_pteval;
 
        return pt;
 }
@@ -213,60 +247,69 @@ static int l2_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
                  phys_addr_t paddr, size_t size)
 {
        u32 l1_idx = iova >> ISP_L1PT_SHIFT;
-       u32 l1_entry = mmu_info->pgtbl[l1_idx];
-       u32 *l2_pt;
+       u32 l1_entry;
+       u32 *l2_pt, *l2_virt;
        u32 iova_start = iova;
        unsigned int l2_idx;
        unsigned long flags;
+       dma_addr_t dma;
 
-       pr_debug("mapping l2 page table for l1 index %u (iova %8.8x)\n",
-                l1_idx, (u32)iova);
+       dev_dbg(mmu_info->dev,
+               "mapping l2 page table for l1 index %u (iova %8.8x)\n",
+               l1_idx, (u32)iova);
 
        spin_lock_irqsave(&mmu_info->lock, flags);
-       if (l1_entry == mmu_info->dummy_l2_tbl) {
-               u32 *l2_virt = alloc_page_table(mmu_info, false);
+       l1_entry = mmu_info->l1_pt[l1_idx];
+       if (l1_entry == mmu_info->dummy_l2_pteval) {
+               l2_virt = mmu_info->l2_pts[l1_idx];
+               if (likely(!l2_virt)) {
+                       l2_virt = alloc_l2_pt(mmu_info);
+                       if (!l2_virt) {
+                               spin_unlock_irqrestore(&mmu_info->lock, flags);
+                               return -ENOMEM;
+                       }
+               }
 
-               if (!l2_virt) {
+               dma = map_single(mmu_info, l2_virt);
+               if (!dma) {
+                       dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+                       free_page((unsigned long)l2_virt);
                        spin_unlock_irqrestore(&mmu_info->lock, flags);
-                       return -ENOMEM;
+                       return -EINVAL;
                }
 
-               l1_entry = virt_to_phys(l2_virt) >> ISP_PADDR_SHIFT;
-               pr_debug("allocated page for l1_idx %u\n", l1_idx);
+               l1_entry = dma >> ISP_PADDR_SHIFT;
 
-               if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl) {
-                       mmu_info->pgtbl[l1_idx] = l1_entry;
-#ifdef CONFIG_X86
-                       clflush_cache_range(&mmu_info->pgtbl[l1_idx],
-                                           sizeof(mmu_info->pgtbl[l1_idx]));
-#endif /* CONFIG_X86 */
-               } else {
-                       free_page((unsigned long)TBL_VIRT_ADDR(l1_entry));
-               }
+               dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n",
+                       l1_idx, l2_virt);
+               mmu_info->l1_pt[l1_idx] = l1_entry;
+               mmu_info->l2_pts[l1_idx] = l2_virt;
+               clflush_cache_range(&mmu_info->l1_pt[l1_idx],
+                                   sizeof(mmu_info->l1_pt[l1_idx]));
        }
 
-       l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
+       l2_pt = mmu_info->l2_pts[l1_idx];
 
-       pr_debug("l2_pt at %p\n", l2_pt);
+       dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry);
 
        paddr = ALIGN(paddr, ISP_PAGE_SIZE);
 
        l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
 
-       pr_debug("l2_idx %u, phys 0x%8.8x\n", l2_idx, l2_pt[l2_idx]);
-       if (l2_pt[l2_idx] != mmu_info->dummy_page) {
+       dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx,
+               l2_pt[l2_idx]);
+       if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) {
                spin_unlock_irqrestore(&mmu_info->lock, flags);
-               return -EBUSY;
+               return -EINVAL;
        }
 
        l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
 
-#ifdef CONFIG_X86
        clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
-#endif /* CONFIG_X86 */
        spin_unlock_irqrestore(&mmu_info->lock, flags);
 
-       pr_debug("l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]);
+       dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
+               l2_pt[l2_idx]);
 
        return 0;
 }
@@ -277,9 +320,9 @@ static int __ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
        u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
        u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
 
-       pr_debug
-           ("mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
-            iova_start, iova_end, size, paddr);
+       dev_dbg(mmu_info->dev,
+               "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+               iova_start, iova_end, size, paddr);
 
        return l2_map(mmu_info, iova_start, paddr, size);
 }
@@ -288,34 +331,37 @@ static size_t l2_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
                       phys_addr_t dummy, size_t size)
 {
        u32 l1_idx = iova >> ISP_L1PT_SHIFT;
-       u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]);
+       u32 *l2_pt;
        u32 iova_start = iova;
        unsigned int l2_idx;
        size_t unmapped = 0;
+       unsigned long flags;
 
-       pr_debug("unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
-                l1_idx, iova);
-
-       if (mmu_info->pgtbl[l1_idx] == mmu_info->dummy_l2_tbl)
-               return -EINVAL;
+       dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+               l1_idx, iova);
 
-       pr_debug("l2_pt at %p\n", l2_pt);
+       spin_lock_irqsave(&mmu_info->lock, flags);
+       if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+               spin_unlock_irqrestore(&mmu_info->lock, flags);
+               dev_err(mmu_info->dev,
+                       "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+                       iova, l1_idx);
+               return 0;
+       }
 
        for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
             (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
             < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
-               unsigned long flags;
+               l2_pt = mmu_info->l2_pts[l1_idx];
+               dev_dbg(mmu_info->dev,
+                       "unmap l2 index %u with pteval 0x%10.10llx\n",
+                       l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+               l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
 
-               pr_debug("l2 index %u unmapped, was 0x%10.10llx\n",
-                        l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
-               spin_lock_irqsave(&mmu_info->lock, flags);
-               l2_pt[l2_idx] = mmu_info->dummy_page;
-               spin_unlock_irqrestore(&mmu_info->lock, flags);
-#ifdef CONFIG_X86
                clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
-#endif /* CONFIG_X86 */
                unmapped++;
        }
+       spin_unlock_irqrestore(&mmu_info->lock, flags);
 
        return unmapped << ISP_PAGE_SHIFT;
 }
@@ -332,6 +378,7 @@ static int allocate_trash_buffer(struct ipu_mmu *mmu)
        struct iova *iova;
        u32 iova_addr;
        unsigned int i;
+       dma_addr_t dma;
        int ret;
 
        /* Allocate 8MB in iova range */
@@ -342,6 +389,16 @@ static int allocate_trash_buffer(struct ipu_mmu *mmu)
                return -ENOMEM;
        }
 
+       dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
+                          PAGE_SIZE, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
+               dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
+               ret = -ENOMEM;
+               goto out_free_iova;
+       }
+
+       mmu->pci_trash_page = dma;
+
        /*
         * Map the 8MB iova address range to the same physical trash page
         * mmu->trash_page which is already reserved at the probe
@@ -349,7 +406,7 @@ static int allocate_trash_buffer(struct ipu_mmu *mmu)
        iova_addr = iova->pfn_lo;
        for (i = 0; i < n_pages; i++) {
                ret = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
-                                 page_to_phys(mmu->trash_page), PAGE_SIZE);
+                                 mmu->pci_trash_page, PAGE_SIZE);
                if (ret) {
                        dev_err(mmu->dev,
                                "mapping trash buffer range failed\n");
@@ -359,15 +416,17 @@ static int allocate_trash_buffer(struct ipu_mmu *mmu)
                iova_addr++;
        }
 
-       /* save the address for the ZLW invalidation */
-       mmu->iova_addr_trash = iova->pfn_lo << PAGE_SHIFT;
+       mmu->iova_trash_page = iova->pfn_lo << PAGE_SHIFT;
        dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
-               mmu->mmid, (unsigned int)mmu->iova_addr_trash);
+               mmu->mmid, (unsigned int)mmu->iova_trash_page);
        return 0;
 
 out_unmap:
        ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
                      (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+       dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
+                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_iova:
        __free_iova(&mmu->dmap->iovad, iova);
        return ret;
 }
@@ -389,9 +448,8 @@ int ipu_mmu_hw_init(struct ipu_mmu *mmu)
                u16 block_addr;
 
                /* Write page table address per MMU */
-               writel((phys_addr_t)virt_to_phys(mmu_info->pgtbl)
-                          >> ISP_PADDR_SHIFT,
-                          mmu->mmu_hw[i].base + REG_L1_PHYS);
+               writel((phys_addr_t)mmu_info->l1_pt_dma,
+                      mmu->mmu_hw[i].base + REG_L1_PHYS);
 
                /* Set info bits per MMU */
                writel(mmu->mmu_hw[i].info_bits,
@@ -423,20 +481,14 @@ int ipu_mmu_hw_init(struct ipu_mmu *mmu)
                }
        }
 
-       /*
-        * Allocate 1 page of physical memory for the trash buffer.
-        */
        if (!mmu->trash_page) {
+               int ret;
+
                mmu->trash_page = alloc_page(GFP_KERNEL);
                if (!mmu->trash_page) {
                        dev_err(mmu->dev, "insufficient memory for trash buffer\n");
                        return -ENOMEM;
                }
-       }
-
-       /* Allocate trash buffer, if not allocated. Only once per MMU */
-       if (!mmu->iova_addr_trash) {
-               int ret;
 
                ret = allocate_trash_buffer(mmu);
                if (ret) {
@@ -458,7 +510,7 @@ EXPORT_SYMBOL(ipu_mmu_hw_init);
 static struct ipu_mmu_info *ipu_mmu_alloc(struct ipu_device *isp)
 {
        struct ipu_mmu_info *mmu_info;
-       void *ptr;
+       int ret;
 
        mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
        if (!mmu_info)
@@ -466,40 +518,44 @@ static struct ipu_mmu_info *ipu_mmu_alloc(struct ipu_device *isp)
 
        mmu_info->aperture_start = 0;
        mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
-                                     IPU_MMU_ADDRESS_BITS :
-                                     IPU_MMU_ADDRESS_BITS_NON_SECURE);
+                                             IPU_MMU_ADDRESS_BITS :
+                                             IPU_MMU_ADDRESS_BITS_NON_SECURE);
        mmu_info->pgsize_bitmap = SZ_4K;
+       mmu_info->dev = &isp->pdev->dev;
 
-       ptr = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
-       if (!ptr)
-               goto err_mem;
-
-       mmu_info->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT;
+       ret = get_dummy_page(mmu_info);
+       if (ret)
+               goto err_free_info;
 
-       ptr = alloc_page_table(mmu_info, false);
-       if (!ptr)
-               goto err;
+       ret = alloc_dummy_l2_pt(mmu_info);
+       if (ret)
+               goto err_free_dummy_page;
 
-       mmu_info->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT;
+       mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
+       if (!mmu_info->l2_pts)
+               goto err_free_dummy_l2_pt;
 
        /*
         * We always map the L1 page table (a single page as well as
         * the L2 page tables).
         */
-       mmu_info->pgtbl = alloc_page_table(mmu_info, true);
-       if (!mmu_info->pgtbl)
-               goto err;
+       mmu_info->l1_pt = alloc_l1_pt(mmu_info);
+       if (!mmu_info->l1_pt)
+               goto err_free_l2_pts;
 
        spin_lock_init(&mmu_info->lock);
 
-       pr_debug("domain initialised\n");
+       dev_dbg(mmu_info->dev, "domain initialised\n");
 
        return mmu_info;
 
-err:
-       free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_page));
-       free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_l2_tbl));
-err_mem:
+err_free_l2_pts:
+       vfree(mmu_info->l2_pts);
+err_free_dummy_l2_pt:
+       free_dummy_l2_pt(mmu_info);
+err_free_dummy_page:
+       free_dummy_page(mmu_info);
+err_free_info:
        kfree(mmu_info);
 
        return NULL;
@@ -535,7 +591,7 @@ static struct ipu_dma_mapping *alloc_dma_mapping(struct ipu_device *isp)
 
        kref_init(&dmap->ref);
 
-       pr_debug("alloc mapping\n");
+       dev_dbg(&isp->pdev->dev, "alloc mapping\n");
 
        iova_cache_get();
 
@@ -545,15 +601,22 @@ static struct ipu_dma_mapping *alloc_dma_mapping(struct ipu_device *isp)
 phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info,
                                 dma_addr_t iova)
 {
-       u32 *l2_pt = TBL_VIRT_ADDR(mmu_info->pgtbl[iova >> ISP_L1PT_SHIFT]);
+       unsigned long flags;
+       u32 *l2_pt;
+       phys_addr_t phy_addr;
 
-       return (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]
-           << ISP_PAGE_SHIFT;
+       spin_lock_irqsave(&mmu_info->lock, flags);
+       l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
+       phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
+       phy_addr <<= ISP_PAGE_SHIFT;
+       spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+       return phy_addr;
 }
 
 /*
  * The following four functions are implemented based on iommu.c
- * drivers/iommu/iommu.c/iommu_pgsize().
+ * drivers/iommu/iommu.c:iommu_pgsize().
  */
 static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap,
                             unsigned long addr_merge, size_t size)
@@ -588,7 +651,7 @@ static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap,
        return pgsize;
 }
 
-/* drivers/iommu/iommu.c/iommu_unmap() */
+/* drivers/iommu/iommu.c:iommu_unmap() */
 size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
                     size_t size)
 {
@@ -621,7 +684,7 @@ size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
                if (!unmapped_page)
                        break;
 
-               dev_dbg(NULL, "unmapped: iova 0x%lx size 0x%zx\n",
+               dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
                        iova, unmapped_page);
 
                iova += unmapped_page;
@@ -631,7 +694,7 @@ size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
        return unmapped;
 }
 
-/* drivers/iommu/iommu.c/iommu_map() */
+/* drivers/iommu/iommu.c:iommu_map() */
 int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
                phys_addr_t paddr, size_t size)
 {
@@ -652,19 +715,22 @@ int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
         * size of the smallest page supported by the hardware
         */
        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
-                      iova, &paddr, size, min_pagesz);
+               dev_err(mmu_info->dev,
+                       "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+                       iova, &paddr, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
+       dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+               iova, &paddr, size);
 
        while (size) {
                size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
                                               iova | paddr, size);
 
-               pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
-                        iova, &paddr, pgsize);
+               dev_dbg(mmu_info->dev,
+                       "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+                       iova, &paddr, pgsize);
 
                ret = __ipu_mmu_map(mmu_info, iova, paddr, pgsize);
                if (ret)
@@ -689,9 +755,9 @@ static void ipu_mmu_destroy(struct ipu_mmu *mmu)
        struct iova *iova;
        u32 l1_idx;
 
-       if (mmu->iova_addr_trash) {
+       if (mmu->iova_trash_page) {
                iova = find_iova(&dmap->iovad,
-                                mmu->iova_addr_trash >> PAGE_SHIFT);
+                                mmu->iova_trash_page >> PAGE_SHIFT);
                if (iova) {
                        /* unmap and free the trash buffer iova */
                        ipu_mmu_unmap(mmu_info, iova->pfn_lo << PAGE_SHIFT,
@@ -702,20 +768,27 @@ static void ipu_mmu_destroy(struct ipu_mmu *mmu)
                        dev_err(mmu->dev, "trash buffer iova not found.\n");
                }
 
-               mmu->iova_addr_trash = 0;
-       }
-
-       if (mmu->trash_page)
+               mmu->iova_trash_page = 0;
+               dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
+                              PAGE_SIZE, DMA_BIDIRECTIONAL);
+               mmu->pci_trash_page = 0;
                __free_page(mmu->trash_page);
+       }
 
-       for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++)
-               if (mmu_info->pgtbl[l1_idx] != mmu_info->dummy_l2_tbl)
-                       free_page((unsigned long)
-                                 TBL_VIRT_ADDR(mmu_info->pgtbl[l1_idx]));
+       for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+               if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
+                       dma_unmap_single(mmu_info->dev,
+                                        TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
+                                        PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
+               }
+       }
 
-       free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_page));
-       free_page((unsigned long)TBL_VIRT_ADDR(mmu_info->dummy_l2_tbl));
-       free_page((unsigned long)mmu_info->pgtbl);
+       free_dummy_page(mmu_info);
+       dma_unmap_single(mmu_info->dev, mmu_info->l1_pt_dma,
+                        PAGE_SIZE, DMA_BIDIRECTIONAL);
+       free_page((unsigned long)mmu_info->dummy_l2_pt);
+       free_page((unsigned long)mmu_info->l1_pt);
        kfree(mmu_info);
 }
 
index d810024d37fea11ab44a3aa6c896ba33acdec374..5f55d6b831fa185bc339b48037fffb7c1fbeb625 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2013 - 2020 Intel Corporation */
+/* Copyright (C) 2013 - 2021 Intel Corporation */
 
 #ifndef IPU_MMU_H
 #define IPU_MMU_H
  * @pgtbl: virtual address of the l1 page table (one page)
  */
 struct ipu_mmu_info {
-       u32 __iomem *pgtbl;
+       struct device *dev;
+
+       u32 __iomem *l1_pt;
+       u32 l1_pt_dma;
+       u32 **l2_pts;
+
+       u32 *dummy_l2_pt;
+       u32 dummy_l2_pteval;
+       void *dummy_page;
+       u32 dummy_page_pteval;
+
        dma_addr_t aperture_start;
        dma_addr_t aperture_end;
        unsigned long pgsize_bitmap;
 
        spinlock_t lock;        /* Serialize access to users */
        struct ipu_dma_mapping *dmap;
-       u32 dummy_l2_tbl;
-       u32 dummy_page;
 };
 
 /*
@@ -44,7 +52,8 @@ struct ipu_mmu {
        struct list_head vma_list;
 
        struct page *trash_page;
-       dma_addr_t iova_addr_trash;
+       dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */
+       dma_addr_t iova_trash_page; /* IOVA for IPU child nodes to use */
 
        bool ready;
        spinlock_t ready_lock;  /* Serialize access to bool ready */
index b342132965f55daf77ab712e42b3b29135d1420d..a8f21f81da6dfdd69066a45f1fc4262de73e1572 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2013 - 2020 Intel Corporation */
+/* Copyright (C) 2013 - 2021 Intel Corporation */
 
 #ifndef IPU_PDATA_H
 #define IPU_PDATA_H
@@ -186,8 +186,6 @@ struct ipu_mmu_hw {
        u8 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS];
        /* flag to track if WA is needed for successive invalidate HW bug */
        bool insert_read_before_invalidate;
-       /* flag to track if zlw based mmu invalidation is needed */
-       bool zlw_invalidate;
 };
 
 struct ipu_mmu_pdata {
index 37964b2965d904ea00506e28e261554a6690f457..d0c71c86928fa8918049b3b0496ed2337f169047 100644 (file)
@@ -349,25 +349,30 @@ static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
        return -ENOTTY;
 }
 
-static void *ipu_dma_buf_vmap(struct dma_buf *dmabuf)
+static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
        struct dma_buf_attachment *attach;
        struct ipu_dma_buf_attach *ipu_attach;
 
        if (list_empty(&dmabuf->attachments))
-               return NULL;
+               return -EINVAL;
 
        attach = list_last_entry(&dmabuf->attachments,
                                 struct dma_buf_attachment, node);
        ipu_attach = attach->priv;
 
        if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)
-               return NULL;
+               return -EINVAL;
+
+       map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0);
+       map->is_iomem = false;
+       if (!map->vaddr)
+               return -EINVAL;
 
-       return vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0);
+       return 0;
 }
 
-static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
 {
        struct dma_buf_attachment *attach;
        struct ipu_dma_buf_attach *ipu_attach;
@@ -382,7 +387,7 @@ static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
        if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages))
                return;
 
-       vm_unmap_ram(vaddr, ipu_attach->npages);
+       vm_unmap_ram(map->vaddr, ipu_attach->npages);
 }
 
 struct dma_buf_ops ipu_dma_buf_ops = {
@@ -441,8 +446,12 @@ static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer *kbuf)
                return;
 
        kbuf->valid = false;
-       if (kbuf->kaddr)
-               dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr);
+       if (kbuf->kaddr) {
+               struct dma_buf_map dmap;
+
+               dma_buf_map_set_vaddr(&dmap, kbuf->kaddr);
+               dma_buf_vunmap(kbuf->dbuf, &dmap);
+       }
        if (kbuf->sgt)
                dma_buf_unmap_attachment(kbuf->db_attach,
                                         kbuf->sgt,
@@ -564,6 +573,7 @@ int ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh,
 {
        struct ipu_psys *psys = fh->psys;
        struct dma_buf *dbuf;
+       struct dma_buf_map dmap;
        int ret;
 
        dbuf = dma_buf_get(fd);
@@ -635,12 +645,12 @@ int ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh,
 
        kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl);
 
-       kbuf->kaddr = dma_buf_vmap(kbuf->dbuf);
-       if (!kbuf->kaddr) {
-               ret = -EINVAL;
+       ret = dma_buf_vmap(kbuf->dbuf, &dmap);
+       if (ret) {
                dev_dbg(&psys->adev->dev, "dma buf vmap failed\n");
                goto kbuf_map_fail;
        }
+       kbuf->kaddr = dmap.vaddr;
 
        dev_dbg(&psys->adev->dev, "%s kbuf %p fd %d with len %llu mapped\n",
                __func__, kbuf, fd, kbuf->len);
index 99b209df3d1f040ad9a5bcfc851628a8a2910509..4e7ee02a423bf415fcabada73aba3fccc12c5bf4 100644 (file)
@@ -31,6 +31,8 @@ struct trace_register_range {
 #define MEMORY_RING_BUFFER_OVERREAD    MEMORY_RING_BUFFER_GUARD
 #define MAX_TRACE_REGISTERS            200
 #define TRACE_CONF_DUMP_BUFFER_SIZE    (MAX_TRACE_REGISTERS * 2 * 32)
+#define TRACE_CONF_DATA_MAX_LEN                (1024 * 4)
+#define WPT_TRACE_CONF_DATA_MAX_LEN    (1024 * 64)
 
 struct config_value {
        u32 reg;
@@ -112,11 +114,10 @@ static void __ipu_trace_restore(struct device *dev)
 
        if (!sys->memory.memory_buffer) {
                sys->memory.memory_buffer =
-                       dma_alloc_coherent(dev,
-                                          MEMORY_RING_BUFFER_SIZE +
-                                          MEMORY_RING_BUFFER_GUARD,
-                                          &sys->memory.dma_handle,
-                                          GFP_KERNEL);
+                   dma_alloc_coherent(dev, MEMORY_RING_BUFFER_SIZE +
+                                      MEMORY_RING_BUFFER_GUARD,
+                                      &sys->memory.dma_handle,
+                                      GFP_KERNEL);
        }
 
        if (!sys->memory.memory_buffer) {
@@ -405,7 +406,8 @@ static ssize_t traceconf_write(struct file *file, const char __user *buf,
        u32 ipu_trace_number = 0;
        struct config_value *cfg_buffer = NULL;
 
-       if ((*ppos < 0) || (len < sizeof(ipu_trace_number))) {
+       if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) ||
+           (len < sizeof(ipu_trace_number))) {
                dev_info(&isp->pdev->dev,
                        "length is error, len:%ld, loff:%lld\n",
                        len, *ppos);
@@ -605,7 +607,8 @@ static ssize_t wptraceconf_write(struct file *file, const char __user *buf,
        struct config_value *wpt_buffer = NULL;
        struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt;
 
-       if ((*ppos < 0) || (len < sizeof(wp_node_number))) {
+       if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) ||
+           (len < sizeof(wp_node_number))) {
                dev_info(&isp->pdev->dev,
                        "length is error, len:%ld, loff:%lld\n",
                        len, *ppos);
index db20fcdd3c01960268fc8b1547cad51baa517375..11464fd482d5af729de56d1b243dd8e461c2940d 100644 (file)
@@ -5,7 +5,7 @@ ifneq ($(EXTERNAL_BUILD), 1)
 srcpath := $(srctree)
 endif
 
-ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=1 -DIPU_TPG_FRAME_SYNC -DIPU_PSYS_GPC \
+ccflags-y += -DIPU_TPG_FRAME_SYNC -DIPU_PSYS_GPC \
                -DIPU_ISYS_GPC
 
 intel-ipu6-objs                                += ../ipu.o \
index 41c634b0483745dde0c7922d634caff14caca7ae..b246fc037890fda06f72ebfc779b7d927e7b5c82 100644 (file)
@@ -18,33 +18,22 @@ void ipu6_psys_hw_res_variant_init(void)
        if (ipu_ver == IPU_VER_6SE) {
                hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
                hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID;
-               hw_var.set_proc_dev_chn = ipu6se_fw_psys_set_proc_dev_chn;
-               hw_var.set_proc_dfm_bitmap = ipu6se_fw_psys_set_proc_dfm_bitmap;
-               hw_var.set_proc_ext_mem = ipu6se_fw_psys_set_process_ext_mem;
-               hw_var.get_pgm_by_proc =
-                       ipu6se_fw_psys_get_program_manifest_by_process;
-               return;
        } else if (ipu_ver == IPU_VER_6) {
                hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
                hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID;
-               hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn;
-               hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap;
-               hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem;
-               hw_var.get_pgm_by_proc =
-                       ipu6_fw_psys_get_program_manifest_by_process;
-               return;
        } else if (ipu_ver == IPU_VER_6EP) {
                hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
                hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID;
-               hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn;
-               hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap;
-               hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem;
-               hw_var.get_pgm_by_proc =
-                       ipu6_fw_psys_get_program_manifest_by_process;
-               return;
+       } else {
+               WARN(1, "ipu6 psys res var is not initialised correctly.");
        }
 
-       WARN(1, "ipu6 psys res var is not initialised correctly.");
+       hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn;
+       hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap;
+       hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem;
+       hw_var.get_pgm_by_proc =
+               ipu6_fw_psys_get_program_manifest_by_process;
+       return;
 }
 
 static const struct ipu_fw_resource_definitions *get_res(void)
@@ -703,6 +692,7 @@ int ipu_psys_allocate_resources(const struct device *dev,
                                                        bank, alloc);
                                if (ret)
                                        goto free_out;
+
                                /* no return value check here because fw api
                                 * will do some checks, and would return
                                 * non-zero except mem_type_id == 0.
index 4b646783704dbf9ce5de9d519e6762eb17b28cfe..9569a146e739140617e40b011f1d53c57e0f50f9 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2015 - 2020 Intel Corporation
+// Copyright (C) 2015 - 2021 Intel Corporation
 
 #include <linux/err.h>
 #include <linux/string.h>
@@ -547,6 +547,20 @@ void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
        u8 processes = pg->process_count;
        u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset);
        unsigned int p, chn, mem, mem_id;
+       unsigned int mem_type, max_mem_id, dev_chn;
+
+       if (ipu_ver == IPU_VER_6SE) {
+               mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID;
+               max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID;
+               dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID;
+       } else if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) {
+               mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID;
+               max_mem_id = IPU6_FW_PSYS_N_MEM_ID;
+               dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID;
+       } else {
+               WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver);
+               return;
+       }
 
        dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n",
                __func__, note, pgid, processes);
@@ -563,16 +577,15 @@ void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
                if (!process->process_extension_offset)
                        continue;
 
-               for (mem = 0; mem < IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID;
-                   mem++) {
+               for (mem = 0; mem < mem_type; mem++) {
                        mem_id = pm_ext->ext_mem_id[mem];
-                       if (mem_id != IPU6_FW_PSYS_N_MEM_ID)
+                       if (mem_id != max_mem_id)
                                dev_dbg(&psys->adev->dev,
                                        "\t mem type %u id %d offset=0x%x",
                                        mem, mem_id,
                                        pm_ext->ext_mem_offset[mem]);
                }
-               for (chn = 0; chn < IPU6_FW_PSYS_N_DEV_CHN_ID; chn++) {
+               for (chn = 0; chn < dev_chn; chn++) {
                        if (pm_ext->dev_chn_offset[chn] != (u16)(-1))
                                dev_dbg(&psys->adev->dev,
                                        "\t dev_chn[%u]=0x%x\n",
index 07d4b4e688fb44160beb0dfec37debc9d91f461e..329901ac3acb665be9429760fad7eff702ec5807 100644 (file)
@@ -191,7 +191,6 @@ struct ipu6_fw_psys_process_ext {
        u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
        u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID];
        u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
-       u8 padding[IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT];
 };
 
 #endif /* IPU6_PLATFORM_RESOURCES_H */
index a6860df5db180304c3aa8076ea2e908a30c9dd68..c6acf9cb70ef38b796ad4f11336e74e2a1415a15 100644 (file)
@@ -503,8 +503,11 @@ void ipu_psys_enter_power_gating(struct ipu_psys *psys)
 
                list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
                        mutex_lock(&kppg->mutex);
-                       /* kppg has already power down */
-                       if (kppg->state == PPG_STATE_STOPPED) {
+                       /*
+                        * Only for SUSPENDED kppgs, STOPPED kppgs has already
+                        * power down and new kppgs might come now.
+                        */
+                       if (kppg->state != PPG_STATE_SUSPENDED) {
                                mutex_unlock(&kppg->mutex);
                                continue;
                        }
@@ -539,9 +542,8 @@ void ipu_psys_exit_power_gating(struct ipu_psys *psys)
 
                list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
                        mutex_lock(&kppg->mutex);
-                       /* kppg is not started and power up */
-                       if (kppg->state == PPG_STATE_START ||
-                           kppg->state == PPG_STATE_STARTING) {
+                       /* Only for SUSPENDED kppgs */
+                       if (kppg->state != PPG_STATE_SUSPENDED) {
                                mutex_unlock(&kppg->mutex);
                                continue;
                        }
index 8054c4cb03453193909c1b95a5e5c4e5e82869f5..ad89abd8bd2ed4340c4db74354255d1bc780f174 100644 (file)
@@ -89,7 +89,6 @@ struct ipu_isys_internal_pdata isys_ipdata = {
                                                   2, 2, 2, 2, 2, 2
                                   },
                                   .insert_read_before_invalidate = false,
-                                  .zlw_invalidate = false,
                                   .l1_stream_id_reg_offset =
                                   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
                                   .l2_stream_id_reg_offset =
@@ -109,7 +108,6 @@ struct ipu_isys_internal_pdata isys_ipdata = {
                                                   2, 2, 2, 2, 2, 2
                                   },
                                   .insert_read_before_invalidate = false,
-                                  .zlw_invalidate = false,
                                   .l1_stream_id_reg_offset =
                                   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
                                   .l2_stream_id_reg_offset =
@@ -151,7 +149,6 @@ struct ipu_psys_internal_pdata psys_ipdata = {
                                                   2, 2, 2, 2, 2, 2
                                   },
                                   .insert_read_before_invalidate = false,
-                                  .zlw_invalidate = false,
                                   .l1_stream_id_reg_offset =
                                   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
                                   .l2_stream_id_reg_offset =
@@ -175,7 +172,6 @@ struct ipu_psys_internal_pdata psys_ipdata = {
                                                   2, 2, 2, 2, 2, 2
                                   },
                                   .insert_read_before_invalidate = false,
-                                  .zlw_invalidate = false,
                                   .l1_stream_id_reg_offset =
                                   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
                                   .l2_stream_id_reg_offset =
@@ -195,7 +191,6 @@ struct ipu_psys_internal_pdata psys_ipdata = {
                                                   2, 2, 2, 2, 2, 2
                                   },
                                   .insert_read_before_invalidate = false,
-                                  .zlw_invalidate = false,
                                   .l1_stream_id_reg_offset =
                                   IPU_MMU_L1_STREAM_ID_REG_OFFSET,
                                   .l2_stream_id_reg_offset =
index c0413fbddef6132dbb64ef36fb473e99e7d60045..f94df275b37c1b47431d0c1eff34c1d61a3ddc37 100644 (file)
@@ -192,161 +192,3 @@ static const struct ipu_fw_resource_definitions ipu6se_defs = {
 };
 
 const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs;
-
-int ipu6se_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
-                                   u16 value)
-{
-       struct ipu6se_fw_psys_process_ext *pm_ext;
-       u8 ps_ext_offset;
-
-       ps_ext_offset = ptr->process_extension_offset;
-       if (!ps_ext_offset)
-               return -EINVAL;
-
-       pm_ext = (struct ipu6se_fw_psys_process_ext *)((u8 *)ptr +
-                                                      ps_ext_offset);
-
-       pm_ext->dev_chn_offset[offset] = value;
-
-       return 0;
-}
-
-int ipu6se_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
-                                      u16 id, u32 bitmap,
-                                      u32 active_bitmap)
-{
-       struct ipu6se_fw_psys_process_ext *pm_ext;
-       u8 ps_ext_offset;
-
-       ps_ext_offset = ptr->process_extension_offset;
-       if (!ps_ext_offset)
-               return -EINVAL;
-
-       pm_ext = (struct ipu6se_fw_psys_process_ext *)((u8 *)ptr +
-                                                      ps_ext_offset);
-
-       pm_ext->dfm_port_bitmap[id] = bitmap;
-       pm_ext->dfm_active_port_bitmap[id] = active_bitmap;
-
-       return 0;
-}
-
-int ipu6se_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
-                                      u16 type_id, u16 mem_id, u16 offset)
-{
-       struct ipu6se_fw_psys_process_ext *pm_ext;
-       u8 ps_ext_offset;
-
-       ps_ext_offset = ptr->process_extension_offset;
-       if (!ps_ext_offset)
-               return -EINVAL;
-
-       pm_ext = (struct ipu6se_fw_psys_process_ext *)((u8 *)ptr +
-                                                      ps_ext_offset);
-
-       pm_ext->ext_mem_offset[type_id] = offset;
-       pm_ext->ext_mem_id[type_id] = mem_id;
-
-       return 0;
-}
-
-static struct ipu_fw_psys_program_manifest *
-get_program_manifest(const struct ipu_fw_psys_program_group_manifest *manifest,
-                    const unsigned int program_index)
-{
-       struct ipu_fw_psys_program_manifest *prg_manifest_base;
-       u8 *program_manifest = NULL;
-       u8 program_count;
-       unsigned int i;
-
-       program_count = manifest->program_count;
-
-       prg_manifest_base = (struct ipu_fw_psys_program_manifest *)
-               ((char *)manifest + manifest->program_manifest_offset);
-       if (program_index < program_count) {
-               program_manifest = (u8 *)prg_manifest_base;
-               for (i = 0; i < program_index; i++)
-                       program_manifest +=
-                               ((struct ipu_fw_psys_program_manifest *)
-                                program_manifest)->size;
-       }
-
-       return (struct ipu_fw_psys_program_manifest *)program_manifest;
-}
-
-int ipu6se_fw_psys_get_program_manifest_by_process(
-       struct ipu_fw_generic_program_manifest *gen_pm,
-       const struct ipu_fw_psys_program_group_manifest *pg_manifest,
-       struct ipu_fw_psys_process *process)
-{
-       u32 program_id = process->program_idx;
-       struct ipu_fw_psys_program_manifest *pm;
-       struct ipu6se_fw_psys_program_manifest_ext *pm_ext;
-
-       pm = get_program_manifest(pg_manifest, program_id);
-
-       if (!pm)
-               return -ENOENT;
-
-       if (pm->program_extension_offset) {
-               pm_ext = (struct ipu6se_fw_psys_program_manifest_ext *)
-                       ((u8 *)pm + pm->program_extension_offset);
-
-               gen_pm->dev_chn_size = pm_ext->dev_chn_size;
-               gen_pm->dev_chn_offset = pm_ext->dev_chn_offset;
-               gen_pm->ext_mem_size = pm_ext->ext_mem_size;
-               gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset;
-               gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable;
-               gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap;
-               gen_pm->dfm_active_port_bitmap =
-                       pm_ext->dfm_active_port_bitmap;
-       }
-
-       memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells));
-       gen_pm->cell_id = pm->cells[0];
-       gen_pm->cell_type_id = pm->cell_type_id;
-
-       return 0;
-}
-
-void ipu6se_fw_psys_pg_dump(struct ipu_psys *psys,
-                           struct ipu_psys_kcmd *kcmd, const char *note)
-{
-       struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg;
-       u32 pgid = pg->ID;
-       u8 processes = pg->process_count;
-       u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset);
-       unsigned int p, chn, mem, mem_id;
-
-       dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n",
-               __func__, note, pgid, processes);
-
-       for (p = 0; p < processes; p++) {
-               struct ipu_fw_psys_process *process =
-                   (struct ipu_fw_psys_process *)
-                   ((char *)pg + process_offset_table[p]);
-               struct ipu6se_fw_psys_process_ext *pm_ext =
-                   (struct ipu6se_fw_psys_process_ext *)((u8 *)process
-                   + process->process_extension_offset);
-               dev_dbg(&psys->adev->dev, "\t process %i size=%u",
-                       p, process->size);
-               if (!process->process_extension_offset)
-                       continue;
-
-               for (mem = 0; mem < IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID;
-                   mem++) {
-                       mem_id = pm_ext->ext_mem_id[mem];
-                       if (mem_id != IPU6SE_FW_PSYS_N_MEM_ID)
-                               dev_dbg(&psys->adev->dev,
-                                       "\t mem type %u id %d offset=0x%x",
-                                       mem, mem_id,
-                                       pm_ext->ext_mem_offset[mem]);
-               }
-               for (chn = 0; chn < IPU6SE_FW_PSYS_N_DEV_CHN_ID; chn++) {
-                       if (pm_ext->dev_chn_offset[chn] != (u16)(-1))
-                               dev_dbg(&psys->adev->dev,
-                                       "\t dev_chn[%u]=0x%x\n",
-                                       chn, pm_ext->dev_chn_offset[chn]);
-               }
-       }
-}
index 5a28a975a8b849329939b30774c49c12363796e8..fcb52da3d65bfc5a0b31f98eb4e8b8cdd17a0a70 100644 (file)
@@ -100,28 +100,4 @@ enum {
 #define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE              32
 #define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE               32
 
-struct ipu6se_fw_psys_program_manifest_ext {
-       u32 dfm_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
-       u32 dfm_active_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
-       u16 ext_mem_size[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
-       u16 ext_mem_offset[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
-       u16 dev_chn_size[IPU6SE_FW_PSYS_N_DEV_CHN_ID];
-       u16 dev_chn_offset[IPU6SE_FW_PSYS_N_DEV_CHN_ID];
-       u8 is_dfm_relocatable[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
-       u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
-       u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
-       u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
-       u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
-       u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT];
-};
-
-struct ipu6se_fw_psys_process_ext {
-       u32 dfm_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
-       u32 dfm_active_port_bitmap[IPU6SE_FW_PSYS_N_DEV_DFM_ID];
-       u16 ext_mem_offset[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
-       u16 dev_chn_offset[IPU6SE_FW_PSYS_N_DEV_CHN_ID];
-       u8 ext_mem_id[IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID];
-       u8 padding[IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT];
-};
-
 #endif /* IPU6SE_PLATFORM_RESOURCES_H */