]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/mellanox/mlx4/cq.c
mlx4: Extanding port_mask functionality
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
225c7b1f 37#include <linux/hardirq.h>
ee40fa06 38#include <linux/export.h>
5a0e3ad6 39#include <linux/gfp.h>
225c7b1f
RD
40
41#include <linux/mlx4/cmd.h>
3fdcb97f 42#include <linux/mlx4/cq.h>
225c7b1f
RD
43
44#include "mlx4.h"
45#include "icm.h"
46
47struct mlx4_cq_context {
48 __be32 flags;
49 u16 reserved1[3];
50 __be16 page_offset;
51 __be32 logsize_usrpage;
3fdcb97f
EC
52 __be16 cq_period;
53 __be16 cq_max_count;
54 u8 reserved2[3];
225c7b1f
RD
55 u8 comp_eqn;
56 u8 log_page_size;
3fdcb97f 57 u8 reserved3[2];
225c7b1f
RD
58 u8 mtt_base_addr_h;
59 __be32 mtt_base_addr_l;
60 __be32 last_notified_index;
61 __be32 solicit_producer_index;
62 __be32 consumer_index;
63 __be32 producer_index;
3fdcb97f 64 u32 reserved4[2];
225c7b1f
RD
65 __be64 db_rec_addr;
66};
67
68#define MLX4_CQ_STATUS_OK ( 0 << 28)
69#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
70#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
71#define MLX4_CQ_FLAG_CC ( 1 << 18)
72#define MLX4_CQ_FLAG_OI ( 1 << 17)
73#define MLX4_CQ_STATE_ARMED ( 9 << 8)
74#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
75#define MLX4_EQ_STATE_FIRED (10 << 8)
76
77void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
78{
79 struct mlx4_cq *cq;
80
81 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
82 cqn & (dev->caps.num_cqs - 1));
83 if (!cq) {
84 mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
85 return;
86 }
87
88 ++cq->arm_sn;
89
90 cq->comp(cq);
91}
92
93void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
94{
95 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
96 struct mlx4_cq *cq;
97
98 spin_lock(&cq_table->lock);
99
100 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
101 if (cq)
102 atomic_inc(&cq->refcount);
103
104 spin_unlock(&cq_table->lock);
105
106 if (!cq) {
107 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
108 return;
109 }
110
111 cq->event(cq, event_type);
112
113 if (atomic_dec_and_test(&cq->refcount))
114 complete(&cq->free);
115}
116
117static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
118 int cq_num)
119{
120 return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
121 MLX4_CMD_TIME_CLASS_A);
122}
123
3fdcb97f
EC
124static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125 int cq_num, u32 opmod)
126{
127 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
128 MLX4_CMD_TIME_CLASS_A);
129}
130
225c7b1f
RD
131static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
132 int cq_num)
133{
134 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
135 mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
136 MLX4_CMD_TIME_CLASS_A);
137}
138
3fdcb97f
EC
139int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
140 u16 count, u16 period)
141{
142 struct mlx4_cmd_mailbox *mailbox;
143 struct mlx4_cq_context *cq_context;
144 int err;
145
146 mailbox = mlx4_alloc_cmd_mailbox(dev);
147 if (IS_ERR(mailbox))
148 return PTR_ERR(mailbox);
149
150 cq_context = mailbox->buf;
151 memset(cq_context, 0, sizeof *cq_context);
152
153 cq_context->cq_max_count = cpu_to_be16(count);
154 cq_context->cq_period = cpu_to_be16(period);
155
156 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
157
158 mlx4_free_cmd_mailbox(dev, mailbox);
159 return err;
160}
161EXPORT_SYMBOL_GPL(mlx4_cq_modify);
162
bbf8eed1
VS
163int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
164 int entries, struct mlx4_mtt *mtt)
165{
166 struct mlx4_cmd_mailbox *mailbox;
167 struct mlx4_cq_context *cq_context;
168 u64 mtt_addr;
169 int err;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return PTR_ERR(mailbox);
174
175 cq_context = mailbox->buf;
176 memset(cq_context, 0, sizeof *cq_context);
177
178 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
179 cq_context->log_page_size = mtt->page_shift - 12;
180 mtt_addr = mlx4_mtt_addr(dev, mtt);
181 cq_context->mtt_base_addr_h = mtt_addr >> 32;
182 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
183
f5b3a096 184 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
bbf8eed1
VS
185
186 mlx4_free_cmd_mailbox(dev, mailbox);
187 return err;
188}
189EXPORT_SYMBOL_GPL(mlx4_cq_resize);
190
225c7b1f 191int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
e463c7b1 192 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
b8dd786f 193 unsigned vector, int collapsed)
225c7b1f
RD
194{
195 struct mlx4_priv *priv = mlx4_priv(dev);
196 struct mlx4_cq_table *cq_table = &priv->cq_table;
197 struct mlx4_cmd_mailbox *mailbox;
198 struct mlx4_cq_context *cq_context;
199 u64 mtt_addr;
200 int err;
201
0b7ca5a9 202 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
b8dd786f
YP
203 return -EINVAL;
204
205 cq->vector = vector;
206
225c7b1f
RD
207 cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
208 if (cq->cqn == -1)
209 return -ENOMEM;
210
211 err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
212 if (err)
213 goto err_out;
214
215 err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
216 if (err)
217 goto err_put;
218
219 spin_lock_irq(&cq_table->lock);
220 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
221 spin_unlock_irq(&cq_table->lock);
222 if (err)
223 goto err_cmpt_put;
224
225 mailbox = mlx4_alloc_cmd_mailbox(dev);
226 if (IS_ERR(mailbox)) {
227 err = PTR_ERR(mailbox);
228 goto err_radix;
229 }
230
231 cq_context = mailbox->buf;
232 memset(cq_context, 0, sizeof *cq_context);
233
e463c7b1 234 cq_context->flags = cpu_to_be32(!!collapsed << 18);
225c7b1f 235 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
b8dd786f 236 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
225c7b1f
RD
237 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
238
239 mtt_addr = mlx4_mtt_addr(dev, mtt);
240 cq_context->mtt_base_addr_h = mtt_addr >> 32;
241 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
242 cq_context->db_rec_addr = cpu_to_be64(db_rec);
243
244 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
245 mlx4_free_cmd_mailbox(dev, mailbox);
246 if (err)
247 goto err_radix;
248
249 cq->cons_index = 0;
250 cq->arm_sn = 1;
251 cq->uar = uar;
252 atomic_set(&cq->refcount, 1);
253 init_completion(&cq->free);
254
255 return 0;
256
257err_radix:
258 spin_lock_irq(&cq_table->lock);
259 radix_tree_delete(&cq_table->tree, cq->cqn);
260 spin_unlock_irq(&cq_table->lock);
261
262err_cmpt_put:
263 mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
264
265err_put:
266 mlx4_table_put(dev, &cq_table->table, cq->cqn);
267
268err_out:
269 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
270
271 return err;
272}
273EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
274
275void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
276{
277 struct mlx4_priv *priv = mlx4_priv(dev);
278 struct mlx4_cq_table *cq_table = &priv->cq_table;
279 int err;
280
281 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
282 if (err)
283 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
284
b8dd786f 285 synchronize_irq(priv->eq_table.eq[cq->vector].irq);
225c7b1f
RD
286
287 spin_lock_irq(&cq_table->lock);
288 radix_tree_delete(&cq_table->tree, cq->cqn);
289 spin_unlock_irq(&cq_table->lock);
290
291 if (atomic_dec_and_test(&cq->refcount))
292 complete(&cq->free);
293 wait_for_completion(&cq->free);
294
295 mlx4_table_put(dev, &cq_table->table, cq->cqn);
296 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
297}
298EXPORT_SYMBOL_GPL(mlx4_cq_free);
299
3d73c288 300int mlx4_init_cq_table(struct mlx4_dev *dev)
225c7b1f
RD
301{
302 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
303 int err;
304
305 spin_lock_init(&cq_table->lock);
306 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
307
308 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
93fc9e1b 309 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
225c7b1f
RD
310 if (err)
311 return err;
312
313 return 0;
314}
315
316void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
317{
318 /* Nothing to do to clean up radix_tree */
319 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
320}