int set_sz;
int err;
- if (!MLX5_CAP_GEN(dev, pg))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
+ !MLX5_CAP_GEN(dev, pg))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
return err;
}
+static int set_hca_cap(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ err = handle_hca_cap(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap failed\n");
+ goto out;
+ }
+
+ err = handle_hca_cap_atomic(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+ goto out;
+ }
+
+ err = handle_hca_cap_odp(dev);
+ if (err) {
+ dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ goto out;
+ }
+
+out:
+ return err;
+}
+
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianness he_in;
goto reclaim_boot_pages;
}
- err = handle_hca_cap(dev);
- if (err) {
- dev_err(&pdev->dev, "handle_hca_cap failed\n");
- goto reclaim_boot_pages;
- }
-
- err = handle_hca_cap_atomic(dev);
- if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
- goto reclaim_boot_pages;
- }
-
- err = handle_hca_cap_odp(dev);
+ err = set_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ dev_err(&pdev->dev, "set_hca_cap failed\n");
goto reclaim_boot_pages;
}