]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/mthca/mthca_av.c
IB/core: Define 'ib' and 'roce' rdma_ah_attr types
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / mthca / mthca_av.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
cd4e8fb4 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
1da177e4
LT
32 */
33
4e57b681
TS
34#include <linux/string.h>
35#include <linux/slab.h>
1da177e4 36
a4d61e84
RD
37#include <rdma/ib_verbs.h>
38#include <rdma/ib_cache.h>
1da177e4
LT
39
40#include "mthca_dev.h"
41
bf6a9e31
JM
42enum {
43 MTHCA_RATE_TAVOR_FULL = 0,
44 MTHCA_RATE_TAVOR_1X = 1,
45 MTHCA_RATE_TAVOR_4X = 2,
46 MTHCA_RATE_TAVOR_1X_DDR = 3
47};
48
49enum {
50 MTHCA_RATE_MEMFREE_FULL = 0,
51 MTHCA_RATE_MEMFREE_QUARTER = 1,
52 MTHCA_RATE_MEMFREE_EIGHTH = 2,
53 MTHCA_RATE_MEMFREE_HALF = 3
54};
55
1da177e4 56struct mthca_av {
97f52eb4
SH
57 __be32 port_pd;
58 u8 reserved1;
59 u8 g_slid;
60 __be16 dlid;
61 u8 reserved2;
62 u8 gid_index;
63 u8 msg_sr;
64 u8 hop_limit;
65 __be32 sl_tclass_flowlabel;
66 __be32 dgid[4];
1da177e4
LT
67};
68
bf6a9e31
JM
69static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
70{
71 switch (mthca_rate) {
72 case MTHCA_RATE_MEMFREE_EIGHTH:
73 return mult_to_ib_rate(port_rate >> 3);
74 case MTHCA_RATE_MEMFREE_QUARTER:
75 return mult_to_ib_rate(port_rate >> 2);
76 case MTHCA_RATE_MEMFREE_HALF:
77 return mult_to_ib_rate(port_rate >> 1);
78 case MTHCA_RATE_MEMFREE_FULL:
79 default:
80 return mult_to_ib_rate(port_rate);
81 }
82}
83
84static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
85{
86 switch (mthca_rate) {
87 case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
88 case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
89 case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
b046a04e 90 default: return mult_to_ib_rate(port_rate);
bf6a9e31
JM
91 }
92}
93
94enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
95{
96 if (mthca_is_memfree(dev)) {
97 /* Handle old Arbel FW */
98 if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
99 return IB_RATE_2_5_GBPS;
100
101 return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
102 } else
103 return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
104}
105
106static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
107{
108 if (cur_rate <= req_rate)
109 return 0;
110
111 /*
112 * Inter-packet delay (IPD) to get from rate X down to a rate
113 * no more than Y is (X - 1) / Y.
114 */
115 switch ((cur_rate - 1) / req_rate) {
116 case 0: return MTHCA_RATE_MEMFREE_FULL;
117 case 1: return MTHCA_RATE_MEMFREE_HALF;
118 case 2: /* fall through */
119 case 3: return MTHCA_RATE_MEMFREE_QUARTER;
120 default: return MTHCA_RATE_MEMFREE_EIGHTH;
121 }
122}
123
124static u8 ib_rate_to_tavor(u8 static_rate)
125{
126 switch (static_rate) {
127 case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
128 case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
129 case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
130 default: return MTHCA_RATE_TAVOR_FULL;
131 }
132}
133
134u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
135{
136 u8 rate;
137
138 if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
139 return 0;
140
141 if (mthca_is_memfree(dev))
142 rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
143 dev->rate[port - 1]);
144 else
145 rate = ib_rate_to_tavor(static_rate);
146
147 if (!(dev->limits.stat_rate_support & (1 << rate)))
148 rate = 1;
149
150 return rate;
151}
152
1da177e4
LT
153int mthca_create_ah(struct mthca_dev *dev,
154 struct mthca_pd *pd,
90898850 155 struct rdma_ah_attr *ah_attr,
1da177e4
LT
156 struct mthca_ah *ah)
157{
158 u32 index = -1;
159 struct mthca_av *av = NULL;
160
161 ah->type = MTHCA_AH_PCI_POOL;
162
d10ddbf6 163 if (mthca_is_memfree(dev)) {
8df8a34d 164 ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC);
1da177e4
LT
165 if (!ah->av)
166 return -ENOMEM;
167
168 ah->type = MTHCA_AH_KMALLOC;
169 av = ah->av;
170 } else if (!atomic_read(&pd->sqp_count) &&
171 !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
172 index = mthca_alloc(&dev->av_table.alloc);
173
174 /* fall back to allocate in host memory */
175 if (index == -1)
176 goto on_hca_fail;
177
8df8a34d 178 av = kmalloc(sizeof *av, GFP_ATOMIC);
1da177e4
LT
179 if (!av)
180 goto on_hca_fail;
181
182 ah->type = MTHCA_AH_ON_HCA;
183 ah->avdma = dev->av_table.ddr_av_base +
184 index * MTHCA_AV_SIZE;
185 }
186
187on_hca_fail:
188 if (ah->type == MTHCA_AH_PCI_POOL) {
7ceb740c
SJ
189 ah->av = pci_pool_zalloc(dev->av_table.pool,
190 GFP_ATOMIC, &ah->avdma);
1da177e4
LT
191 if (!ah->av)
192 return -ENOMEM;
193
194 av = ah->av;
195 }
196
197 ah->key = pd->ntmr.ibmr.lkey;
198
d8966fcd
DC
199 av->port_pd = cpu_to_be32(pd->pd_num |
200 (rdma_ah_get_port_num(ah_attr) << 24));
201 av->g_slid = rdma_ah_get_path_bits(ah_attr);
202 av->dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
1da177e4 203 av->msg_sr = (3 << 4) | /* 2K message */
d8966fcd
DC
204 mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr),
205 rdma_ah_get_port_num(ah_attr));
206 av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
207 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
208 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
209
1da177e4 210 av->g_slid |= 0x80;
d8966fcd
DC
211 av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) *
212 dev->limits.gid_table_len +
213 grh->sgid_index;
214 av->hop_limit = grh->hop_limit;
1da177e4 215 av->sl_tclass_flowlabel |=
d8966fcd
DC
216 cpu_to_be32((grh->traffic_class << 20) |
217 grh->flow_label);
218 memcpy(av->dgid, grh->dgid.raw, 16);
1da177e4
LT
219 } else {
220 /* Arbel workaround -- low byte of GID must be 2 */
221 av->dgid[3] = cpu_to_be32(2);
222 }
223
224 if (0) {
225 int j;
226
227 mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
228 av, (unsigned long) ah->avdma);
229 for (j = 0; j < 8; ++j)
230 printk(KERN_DEBUG " [%2x] %08x\n",
97f52eb4 231 j * 4, be32_to_cpu(((__be32 *) av)[j]));
1da177e4
LT
232 }
233
234 if (ah->type == MTHCA_AH_ON_HCA) {
235 memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
236 av, MTHCA_AV_SIZE);
237 kfree(av);
238 }
239
240 return 0;
241}
242
243int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
244{
245 switch (ah->type) {
246 case MTHCA_AH_ON_HCA:
247 mthca_free(&dev->av_table.alloc,
2fa5e2eb 248 (ah->avdma - dev->av_table.ddr_av_base) /
1da177e4
LT
249 MTHCA_AV_SIZE);
250 break;
251
252 case MTHCA_AH_PCI_POOL:
253 pci_pool_free(dev->av_table.pool, ah->av, ah->avdma);
254 break;
255
256 case MTHCA_AH_KMALLOC:
257 kfree(ah->av);
258 break;
259 }
260
261 return 0;
262}
263
9eacee2a
MT
264int mthca_ah_grh_present(struct mthca_ah *ah)
265{
266 return !!(ah->av->g_slid & 0x80);
267}
268
1da177e4
LT
269int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
270 struct ib_ud_header *header)
271{
272 if (ah->type == MTHCA_AH_ON_HCA)
273 return -EINVAL;
274
275 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
276 header->lrh.destination_lid = ah->av->dlid;
97f52eb4 277 header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
9eacee2a 278 if (mthca_ah_grh_present(ah)) {
1da177e4
LT
279 header->grh.traffic_class =
280 (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
281 header->grh.flow_label =
282 ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
3f37cae6 283 header->grh.hop_limit = ah->av->hop_limit;
1da177e4
LT
284 ib_get_cached_gid(&dev->ib_dev,
285 be32_to_cpu(ah->av->port_pd) >> 24,
f9e61929 286 ah->av->gid_index % dev->limits.gid_table_len,
55ee3ab2 287 &header->grh.source_gid, NULL);
1da177e4
LT
288 memcpy(header->grh.destination_gid.raw,
289 ah->av->dgid, 16);
1da177e4
LT
290 }
291
292 return 0;
293}
294
90898850 295int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
1d89b1ae
JM
296{
297 struct mthca_ah *ah = to_mah(ibah);
298 struct mthca_dev *dev = to_mdev(ibah->device);
d8966fcd 299 u8 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
1d89b1ae
JM
300
301 /* Only implement for MAD and memfree ah for now. */
302 if (ah->type == MTHCA_AH_ON_HCA)
303 return -ENOSYS;
304
305 memset(attr, 0, sizeof *attr);
44c58487 306 attr->type = ibah->type;
d8966fcd
DC
307 rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid));
308 rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28);
309 rdma_ah_set_port_num(attr, port_num);
310 rdma_ah_set_static_rate(attr,
311 mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
312 port_num));
313 rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F);
314 if (mthca_ah_grh_present(ah)) {
315 u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel);
316
317 rdma_ah_set_grh(attr, NULL,
318 tc_fl & 0xfffff,
319 ah->av->gid_index &
320 (dev->limits.gid_table_len - 1),
321 ah->av->hop_limit,
322 (tc_fl >> 20) & 0xff);
323 rdma_ah_set_dgid_raw(attr, ah->av->dgid);
1d89b1ae
JM
324 }
325
326 return 0;
327}
328
f4f3d0f0 329int mthca_init_av_table(struct mthca_dev *dev)
1da177e4
LT
330{
331 int err;
332
d10ddbf6 333 if (mthca_is_memfree(dev))
1da177e4
LT
334 return 0;
335
336 err = mthca_alloc_init(&dev->av_table.alloc,
337 dev->av_table.num_ddr_avs,
338 dev->av_table.num_ddr_avs - 1,
339 0);
340 if (err)
341 return err;
342
343 dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev,
344 MTHCA_AV_SIZE,
345 MTHCA_AV_SIZE, 0);
346 if (!dev->av_table.pool)
347 goto out_free_alloc;
348
349 if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
350 dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
351 dev->av_table.ddr_av_base -
352 dev->ddr_start,
353 dev->av_table.num_ddr_avs *
354 MTHCA_AV_SIZE);
355 if (!dev->av_table.av_map)
356 goto out_free_pool;
357 } else
358 dev->av_table.av_map = NULL;
359
360 return 0;
361
362 out_free_pool:
363 pci_pool_destroy(dev->av_table.pool);
364
365 out_free_alloc:
366 mthca_alloc_cleanup(&dev->av_table.alloc);
367 return -ENOMEM;
368}
369
e1f7868c 370void mthca_cleanup_av_table(struct mthca_dev *dev)
1da177e4 371{
d10ddbf6 372 if (mthca_is_memfree(dev))
1da177e4
LT
373 return;
374
375 if (dev->av_table.av_map)
376 iounmap(dev->av_table.av_map);
377 pci_pool_destroy(dev->av_table.pool);
378 mthca_alloc_cleanup(&dev->av_table.alloc);
379}