]>
Commit | Line | Data |
---|---|---|
fc385b7a MB |
1 | /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ |
2 | /* | |
3 | * Copyright (c) 2018 Mellanox Technologies. All rights reserved. | |
4 | */ | |
5 | ||
6 | #include "ib_rep.h" | |
7 | ||
b5ca15ad MB |
8 | static const struct mlx5_ib_profile rep_profile = { |
9 | STAGE_CREATE(MLX5_IB_STAGE_INIT, | |
10 | mlx5_ib_stage_init_init, | |
11 | mlx5_ib_stage_init_cleanup), | |
12 | STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, | |
13 | mlx5_ib_stage_rep_flow_db_init, | |
14 | NULL), | |
15 | STAGE_CREATE(MLX5_IB_STAGE_CAPS, | |
16 | mlx5_ib_stage_caps_init, | |
17 | NULL), | |
18 | STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, | |
19 | mlx5_ib_stage_rep_non_default_cb, | |
20 | NULL), | |
21 | STAGE_CREATE(MLX5_IB_STAGE_ROCE, | |
22 | mlx5_ib_stage_rep_roce_init, | |
23 | mlx5_ib_stage_rep_roce_cleanup), | |
24 | STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, | |
25 | mlx5_ib_stage_dev_res_init, | |
26 | mlx5_ib_stage_dev_res_cleanup), | |
27 | STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, | |
28 | mlx5_ib_stage_counters_init, | |
29 | mlx5_ib_stage_counters_cleanup), | |
30 | STAGE_CREATE(MLX5_IB_STAGE_BFREG, | |
31 | mlx5_ib_stage_bfrag_init, | |
32 | mlx5_ib_stage_bfrag_cleanup), | |
03fe2deb DM |
33 | STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, |
34 | NULL, | |
35 | mlx5_ib_stage_pre_ib_reg_umr_cleanup), | |
b5ca15ad MB |
36 | STAGE_CREATE(MLX5_IB_STAGE_IB_REG, |
37 | mlx5_ib_stage_ib_reg_init, | |
38 | mlx5_ib_stage_ib_reg_cleanup), | |
03fe2deb DM |
39 | STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, |
40 | mlx5_ib_stage_post_ib_reg_umr_init, | |
41 | NULL), | |
b5ca15ad MB |
42 | STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, |
43 | mlx5_ib_stage_class_attr_init, | |
44 | NULL), | |
45 | }; | |
46 | ||
fc385b7a MB |
47 | static int |
48 | mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) | |
49 | { | |
50 | return 0; | |
51 | } | |
52 | ||
53 | static void | |
54 | mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep) | |
55 | { | |
b5ca15ad | 56 | rep->rep_if[REP_IB].priv = NULL; |
fc385b7a MB |
57 | } |
58 | ||
59 | static int | |
60 | mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) | |
61 | { | |
b5ca15ad MB |
62 | struct mlx5_ib_dev *ibdev; |
63 | ||
64 | ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev)); | |
65 | if (!ibdev) | |
66 | return -ENOMEM; | |
67 | ||
68 | ibdev->rep = rep; | |
69 | ibdev->mdev = dev; | |
70 | ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports), | |
71 | MLX5_CAP_GEN(dev, num_vhca_ports)); | |
72 | if (!__mlx5_ib_add(ibdev, &rep_profile)) | |
73 | return -EINVAL; | |
74 | ||
75 | rep->rep_if[REP_IB].priv = ibdev; | |
76 | ||
fc385b7a MB |
77 | return 0; |
78 | } | |
79 | ||
80 | static void | |
81 | mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) | |
82 | { | |
b5ca15ad MB |
83 | struct mlx5_ib_dev *dev; |
84 | ||
85 | if (!rep->rep_if[REP_IB].priv) | |
86 | return; | |
87 | ||
88 | dev = mlx5_ib_rep_to_dev(rep); | |
89 | __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); | |
90 | rep->rep_if[REP_IB].priv = NULL; | |
fc385b7a MB |
91 | } |
92 | ||
93 | static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) | |
94 | { | |
95 | return mlx5_ib_rep_to_dev(rep); | |
96 | } | |
97 | ||
98 | static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev) | |
99 | { | |
100 | struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; | |
101 | int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev); | |
102 | int vport; | |
103 | ||
104 | for (vport = 1; vport < total_vfs; vport++) { | |
105 | struct mlx5_eswitch_rep_if rep_if = {}; | |
106 | ||
107 | rep_if.load = mlx5_ib_vport_rep_load; | |
108 | rep_if.unload = mlx5_ib_vport_rep_unload; | |
109 | rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; | |
110 | mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB); | |
111 | } | |
112 | } | |
113 | ||
114 | static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev) | |
115 | { | |
116 | struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; | |
117 | int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev); | |
118 | int vport; | |
119 | ||
120 | for (vport = 1; vport < total_vfs; vport++) | |
121 | mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB); | |
122 | } | |
123 | ||
124 | void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) | |
125 | { | |
126 | struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; | |
127 | struct mlx5_eswitch_rep_if rep_if = {}; | |
128 | ||
129 | rep_if.load = mlx5_ib_nic_rep_load; | |
130 | rep_if.unload = mlx5_ib_nic_rep_unload; | |
131 | rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; | |
132 | rep_if.priv = dev; | |
133 | ||
134 | mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB); | |
135 | ||
136 | mlx5_ib_rep_register_vf_vports(dev); | |
137 | } | |
138 | ||
139 | void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) | |
140 | { | |
141 | struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; | |
142 | ||
143 | mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */ | |
144 | mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/ | |
145 | } | |
146 | ||
147 | u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) | |
148 | { | |
149 | return mlx5_eswitch_mode(esw); | |
150 | } | |
151 | ||
152 | struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, | |
153 | int vport_index) | |
154 | { | |
155 | return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB); | |
156 | } | |
157 | ||
158 | struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, | |
159 | int vport_index) | |
160 | { | |
161 | return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH); | |
162 | } | |
163 | ||
b5ca15ad MB |
164 | struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw) |
165 | { | |
166 | return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB); | |
167 | } | |
168 | ||
fc385b7a MB |
169 | struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport) |
170 | { | |
171 | return mlx5_eswitch_vport_rep(esw, vport); | |
172 | } | |
b96c9dde MB |
173 | |
174 | int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, | |
175 | struct mlx5_ib_sq *sq) | |
176 | { | |
177 | struct mlx5_flow_handle *flow_rule; | |
178 | struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; | |
179 | ||
180 | if (!dev->rep) | |
181 | return 0; | |
182 | ||
183 | flow_rule = | |
184 | mlx5_eswitch_add_send_to_vport_rule(esw, | |
185 | dev->rep->vport, | |
186 | sq->base.mqp.qpn); | |
187 | if (IS_ERR(flow_rule)) | |
188 | return PTR_ERR(flow_rule); | |
189 | sq->flow_rule = flow_rule; | |
190 | ||
191 | return 0; | |
192 | } |