]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/net/ethernet/mellanox/mlx4/profile.c
net/mlx4_core: Flexible (asymmetric) allocation of EQs and MSI-X vectors for PF/VFs
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / profile.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/slab.h>
36
37 #include "mlx4.h"
38 #include "fw.h"
39
40 enum {
41 MLX4_RES_QP,
42 MLX4_RES_RDMARC,
43 MLX4_RES_ALTC,
44 MLX4_RES_AUXC,
45 MLX4_RES_SRQ,
46 MLX4_RES_CQ,
47 MLX4_RES_EQ,
48 MLX4_RES_DMPT,
49 MLX4_RES_CMPT,
50 MLX4_RES_MTT,
51 MLX4_RES_MCG,
52 MLX4_RES_NUM
53 };
54
55 static const char *res_name[] = {
56 [MLX4_RES_QP] = "QP",
57 [MLX4_RES_RDMARC] = "RDMARC",
58 [MLX4_RES_ALTC] = "ALTC",
59 [MLX4_RES_AUXC] = "AUXC",
60 [MLX4_RES_SRQ] = "SRQ",
61 [MLX4_RES_CQ] = "CQ",
62 [MLX4_RES_EQ] = "EQ",
63 [MLX4_RES_DMPT] = "DMPT",
64 [MLX4_RES_CMPT] = "CMPT",
65 [MLX4_RES_MTT] = "MTT",
66 [MLX4_RES_MCG] = "MCG",
67 };
68
69 u64 mlx4_make_profile(struct mlx4_dev *dev,
70 struct mlx4_profile *request,
71 struct mlx4_dev_cap *dev_cap,
72 struct mlx4_init_hca_param *init_hca)
73 {
74 struct mlx4_priv *priv = mlx4_priv(dev);
75 struct mlx4_resource {
76 u64 size;
77 u64 start;
78 int type;
79 u32 num;
80 int log_num;
81 };
82
83 u64 total_size = 0;
84 struct mlx4_resource *profile;
85 struct mlx4_resource tmp;
86 struct sysinfo si;
87 int i, j;
88
89 profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
90 if (!profile)
91 return -ENOMEM;
92
93 /*
94 * We want to scale the number of MTTs with the size of the
95 * system memory, since it makes sense to register a lot of
96 * memory on a system with a lot of memory. As a heuristic,
97 * make sure we have enough MTTs to cover twice the system
98 * memory (with PAGE_SIZE entries).
99 *
100 * This number has to be a power of two and fit into 32 bits
101 * due to device limitations, so cap this at 2^31 as well.
102 * That limits us to 8TB of memory registration per HCA with
103 * 4KB pages, which is probably OK for the next few months.
104 */
105 si_meminfo(&si);
106 request->num_mtt =
107 roundup_pow_of_two(max_t(unsigned, request->num_mtt,
108 min(1UL << (31 - log_mtts_per_seg),
109 si.totalram >> (log_mtts_per_seg - 1))));
110
111 profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
112 profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
113 profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
114 profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
115 profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
116 profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
117 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
118 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
119 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
120 profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
121 profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
122
123 profile[MLX4_RES_QP].num = request->num_qp;
124 profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
125 profile[MLX4_RES_ALTC].num = request->num_qp;
126 profile[MLX4_RES_AUXC].num = request->num_qp;
127 profile[MLX4_RES_SRQ].num = request->num_srq;
128 profile[MLX4_RES_CQ].num = request->num_cq;
129 profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs :
130 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
131 profile[MLX4_RES_DMPT].num = request->num_mpt;
132 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
133 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
134 profile[MLX4_RES_MCG].num = request->num_mcg;
135
136 for (i = 0; i < MLX4_RES_NUM; ++i) {
137 profile[i].type = i;
138 profile[i].num = roundup_pow_of_two(profile[i].num);
139 profile[i].log_num = ilog2(profile[i].num);
140 profile[i].size *= profile[i].num;
141 profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
142 }
143
144 /*
145 * Sort the resources in decreasing order of size. Since they
146 * all have sizes that are powers of 2, we'll be able to keep
147 * resources aligned to their size and pack them without gaps
148 * using the sorted order.
149 */
150 for (i = MLX4_RES_NUM; i > 0; --i)
151 for (j = 1; j < i; ++j) {
152 if (profile[j].size > profile[j - 1].size) {
153 tmp = profile[j];
154 profile[j] = profile[j - 1];
155 profile[j - 1] = tmp;
156 }
157 }
158
159 for (i = 0; i < MLX4_RES_NUM; ++i) {
160 if (profile[i].size) {
161 profile[i].start = total_size;
162 total_size += profile[i].size;
163 }
164
165 if (total_size > dev_cap->max_icm_sz) {
166 mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
167 (unsigned long long) total_size,
168 (unsigned long long) dev_cap->max_icm_sz);
169 kfree(profile);
170 return -ENOMEM;
171 }
172
173 if (profile[i].size)
174 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
175 i, res_name[profile[i].type],
176 profile[i].log_num,
177 (unsigned long long) profile[i].start,
178 (unsigned long long) profile[i].size);
179 }
180
181 mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
182 (int) (total_size >> 10));
183
184 for (i = 0; i < MLX4_RES_NUM; ++i) {
185 switch (profile[i].type) {
186 case MLX4_RES_QP:
187 dev->caps.num_qps = profile[i].num;
188 init_hca->qpc_base = profile[i].start;
189 init_hca->log_num_qps = profile[i].log_num;
190 break;
191 case MLX4_RES_RDMARC:
192 for (priv->qp_table.rdmarc_shift = 0;
193 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
194 ++priv->qp_table.rdmarc_shift)
195 ; /* nothing */
196 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
197 priv->qp_table.rdmarc_base = (u32) profile[i].start;
198 init_hca->rdmarc_base = profile[i].start;
199 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
200 break;
201 case MLX4_RES_ALTC:
202 init_hca->altc_base = profile[i].start;
203 break;
204 case MLX4_RES_AUXC:
205 init_hca->auxc_base = profile[i].start;
206 break;
207 case MLX4_RES_SRQ:
208 dev->caps.num_srqs = profile[i].num;
209 init_hca->srqc_base = profile[i].start;
210 init_hca->log_num_srqs = profile[i].log_num;
211 break;
212 case MLX4_RES_CQ:
213 dev->caps.num_cqs = profile[i].num;
214 init_hca->cqc_base = profile[i].start;
215 init_hca->log_num_cqs = profile[i].log_num;
216 break;
217 case MLX4_RES_EQ:
218 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
219 init_hca->log_num_eqs = 0x1f;
220 init_hca->eqc_base = profile[i].start;
221 init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
222 } else {
223 dev->caps.num_eqs = roundup_pow_of_two(
224 min_t(unsigned,
225 dev_cap->max_eqs,
226 MAX_MSIX));
227 init_hca->eqc_base = profile[i].start;
228 init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
229 }
230 break;
231 case MLX4_RES_DMPT:
232 dev->caps.num_mpts = profile[i].num;
233 priv->mr_table.mpt_base = profile[i].start;
234 init_hca->dmpt_base = profile[i].start;
235 init_hca->log_mpt_sz = profile[i].log_num;
236 break;
237 case MLX4_RES_CMPT:
238 init_hca->cmpt_base = profile[i].start;
239 break;
240 case MLX4_RES_MTT:
241 dev->caps.num_mtts = profile[i].num;
242 priv->mr_table.mtt_base = profile[i].start;
243 init_hca->mtt_base = profile[i].start;
244 break;
245 case MLX4_RES_MCG:
246 init_hca->mc_base = profile[i].start;
247 init_hca->log_mc_entry_sz =
248 ilog2(mlx4_get_mgm_entry_size(dev));
249 init_hca->log_mc_table_sz = profile[i].log_num;
250 if (dev->caps.steering_mode ==
251 MLX4_STEERING_MODE_DEVICE_MANAGED) {
252 dev->caps.num_mgms = profile[i].num;
253 } else {
254 init_hca->log_mc_hash_sz =
255 profile[i].log_num - 1;
256 dev->caps.num_mgms = profile[i].num >> 1;
257 dev->caps.num_amgms = profile[i].num >> 1;
258 }
259 break;
260 default:
261 break;
262 }
263 }
264
265 /*
266 * PDs don't take any HCA memory, but we assign them as part
267 * of the HCA profile anyway.
268 */
269 dev->caps.num_pds = MLX4_NUM_PDS;
270
271 kfree(profile);
272 return total_size;
273 }