]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlx4/cq.c
Merge tag 'modules-for-v4.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
225c7b1f 37#include <linux/hardirq.h>
ee40fa06 38#include <linux/export.h>
225c7b1f
RD
39
40#include <linux/mlx4/cmd.h>
3fdcb97f 41#include <linux/mlx4/cq.h>
225c7b1f
RD
42
43#include "mlx4.h"
44#include "icm.h"
45
225c7b1f
RD
46#define MLX4_CQ_STATUS_OK ( 0 << 28)
47#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
48#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
49#define MLX4_CQ_FLAG_CC ( 1 << 18)
50#define MLX4_CQ_FLAG_OI ( 1 << 17)
51#define MLX4_CQ_STATE_ARMED ( 9 << 8)
52#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
53#define MLX4_EQ_STATE_FIRED (10 << 8)
54
3dca0f42
MB
55#define TASKLET_MAX_TIME 2
56#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
57
58void mlx4_cq_tasklet_cb(unsigned long data)
59{
60 unsigned long flags;
61 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
62 struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
63 struct mlx4_cq *mcq, *temp;
64
65 spin_lock_irqsave(&ctx->lock, flags);
66 list_splice_tail_init(&ctx->list, &ctx->process_list);
67 spin_unlock_irqrestore(&ctx->lock, flags);
68
69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
70 list_del_init(&mcq->tasklet_ctx.list);
71 mcq->tasklet_ctx.comp(mcq);
72 if (atomic_dec_and_test(&mcq->refcount))
73 complete(&mcq->free);
74 if (time_after(jiffies, end))
75 break;
76 }
77
78 if (!list_empty(&ctx->process_list))
79 tasklet_schedule(&ctx->task);
80}
81
82static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
83{
84 unsigned long flags;
85 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
86
87 spin_lock_irqsave(&tasklet_ctx->lock, flags);
88 /* When migrating CQs between EQs will be implemented, please note
89 * that you need to sync this point. It is possible that
90 * while migrating a CQ, completions on the old EQs could
91 * still arrive.
92 */
93 if (list_empty_careful(&cq->tasklet_ctx.list)) {
94 atomic_inc(&cq->refcount);
95 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
96 }
97 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
98}
99
225c7b1f
RD
100void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
101{
102 struct mlx4_cq *cq;
103
291c566a 104 rcu_read_lock();
225c7b1f
RD
105 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
106 cqn & (dev->caps.num_cqs - 1));
291c566a
JM
107 rcu_read_unlock();
108
225c7b1f 109 if (!cq) {
d7233386 110 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
225c7b1f
RD
111 return;
112 }
113
291c566a
JM
114 /* Acessing the CQ outside of rcu_read_lock is safe, because
115 * the CQ is freed only after interrupt handling is completed.
116 */
225c7b1f
RD
117 ++cq->arm_sn;
118
119 cq->comp(cq);
120}
121
122void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
123{
124 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
125 struct mlx4_cq *cq;
126
291c566a 127 rcu_read_lock();
225c7b1f 128 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
291c566a 129 rcu_read_unlock();
225c7b1f
RD
130
131 if (!cq) {
291c566a 132 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
225c7b1f
RD
133 return;
134 }
135
291c566a
JM
136 /* Acessing the CQ outside of rcu_read_lock is safe, because
137 * the CQ is freed only after interrupt handling is completed.
138 */
225c7b1f 139 cq->event(cq, event_type);
225c7b1f
RD
140}
141
142static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
143 int cq_num)
144{
eb41049f 145 return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
d7233386
JM
146 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
147 MLX4_CMD_WRAPPED);
225c7b1f
RD
148}
149
3fdcb97f
EC
150static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
151 int cq_num, u32 opmod)
152{
153 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
f9baff50 154 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
3fdcb97f
EC
155}
156
225c7b1f
RD
157static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
158 int cq_num)
159{
eb41049f 160 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
d7233386 161 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
f9baff50 162 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
225c7b1f
RD
163}
164
3fdcb97f
EC
165int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
166 u16 count, u16 period)
167{
168 struct mlx4_cmd_mailbox *mailbox;
169 struct mlx4_cq_context *cq_context;
170 int err;
171
172 mailbox = mlx4_alloc_cmd_mailbox(dev);
173 if (IS_ERR(mailbox))
174 return PTR_ERR(mailbox);
175
176 cq_context = mailbox->buf;
3fdcb97f
EC
177 cq_context->cq_max_count = cpu_to_be16(count);
178 cq_context->cq_period = cpu_to_be16(period);
179
180 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
181
182 mlx4_free_cmd_mailbox(dev, mailbox);
183 return err;
184}
185EXPORT_SYMBOL_GPL(mlx4_cq_modify);
186
bbf8eed1
VS
187int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
188 int entries, struct mlx4_mtt *mtt)
189{
190 struct mlx4_cmd_mailbox *mailbox;
191 struct mlx4_cq_context *cq_context;
192 u64 mtt_addr;
193 int err;
194
195 mailbox = mlx4_alloc_cmd_mailbox(dev);
196 if (IS_ERR(mailbox))
197 return PTR_ERR(mailbox);
198
199 cq_context = mailbox->buf;
bbf8eed1
VS
200 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
201 cq_context->log_page_size = mtt->page_shift - 12;
202 mtt_addr = mlx4_mtt_addr(dev, mtt);
203 cq_context->mtt_base_addr_h = mtt_addr >> 32;
204 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
205
f5b3a096 206 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
bbf8eed1
VS
207
208 mlx4_free_cmd_mailbox(dev, mailbox);
209 return err;
210}
211EXPORT_SYMBOL_GPL(mlx4_cq_resize);
212
c82e9aa0 213int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
d7233386
JM
214{
215 struct mlx4_priv *priv = mlx4_priv(dev);
216 struct mlx4_cq_table *cq_table = &priv->cq_table;
217 int err;
218
219 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
220 if (*cqn == -1)
221 return -ENOMEM;
222
40f2287b 223 err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
d7233386
JM
224 if (err)
225 goto err_out;
226
40f2287b 227 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
d7233386
JM
228 if (err)
229 goto err_put;
230 return 0;
231
232err_put:
233 mlx4_table_put(dev, &cq_table->table, *cqn);
234
235err_out:
7c6d74d2 236 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
d7233386
JM
237 return err;
238}
239
240static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
241{
242 u64 out_param;
243 int err;
244
245 if (mlx4_is_mfunc(dev)) {
246 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
247 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
248 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
249 if (err)
250 return err;
251 else {
252 *cqn = get_param_l(&out_param);
253 return 0;
254 }
255 }
256 return __mlx4_cq_alloc_icm(dev, cqn);
257}
258
c82e9aa0 259void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
d7233386
JM
260{
261 struct mlx4_priv *priv = mlx4_priv(dev);
262 struct mlx4_cq_table *cq_table = &priv->cq_table;
263
264 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
265 mlx4_table_put(dev, &cq_table->table, cqn);
7c6d74d2 266 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
d7233386
JM
267}
268
269static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
270{
e7dbeba8 271 u64 in_param = 0;
d7233386
JM
272 int err;
273
274 if (mlx4_is_mfunc(dev)) {
275 set_param_l(&in_param, cqn);
276 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
277 MLX4_CMD_FREE_RES,
278 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
279 if (err)
280 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
281 } else
282 __mlx4_cq_free_icm(dev, cqn);
283}
284
ec693d47
AV
285int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
286 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
287 struct mlx4_cq *cq, unsigned vector, int collapsed,
288 int timestamp_en)
225c7b1f
RD
289{
290 struct mlx4_priv *priv = mlx4_priv(dev);
291 struct mlx4_cq_table *cq_table = &priv->cq_table;
292 struct mlx4_cmd_mailbox *mailbox;
293 struct mlx4_cq_context *cq_context;
294 u64 mtt_addr;
295 int err;
296
c66fa19c 297 if (vector >= dev->caps.num_comp_vectors)
b8dd786f
YP
298 return -EINVAL;
299
300 cq->vector = vector;
301
d7233386 302 err = mlx4_cq_alloc_icm(dev, &cq->cqn);
225c7b1f 303 if (err)
d7233386 304 return err;
225c7b1f 305
291c566a 306 spin_lock(&cq_table->lock);
225c7b1f 307 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
291c566a 308 spin_unlock(&cq_table->lock);
225c7b1f 309 if (err)
d7233386 310 goto err_icm;
225c7b1f
RD
311
312 mailbox = mlx4_alloc_cmd_mailbox(dev);
313 if (IS_ERR(mailbox)) {
314 err = PTR_ERR(mailbox);
315 goto err_radix;
316 }
317
318 cq_context = mailbox->buf;
e463c7b1 319 cq_context->flags = cpu_to_be32(!!collapsed << 18);
ec693d47
AV
320 if (timestamp_en)
321 cq_context->flags |= cpu_to_be32(1 << 19);
322
85743f1e
HN
323 cq_context->logsize_usrpage =
324 cpu_to_be32((ilog2(nent) << 24) |
325 mlx4_to_hw_uar_index(dev, uar->index));
c66fa19c 326 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
225c7b1f
RD
327 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
328
329 mtt_addr = mlx4_mtt_addr(dev, mtt);
330 cq_context->mtt_base_addr_h = mtt_addr >> 32;
331 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
332 cq_context->db_rec_addr = cpu_to_be64(db_rec);
333
334 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
335 mlx4_free_cmd_mailbox(dev, mailbox);
336 if (err)
337 goto err_radix;
338
339 cq->cons_index = 0;
340 cq->arm_sn = 1;
341 cq->uar = uar;
342 atomic_set(&cq->refcount, 1);
343 init_completion(&cq->free);
3dca0f42
MB
344 cq->comp = mlx4_add_cq_to_tasklet;
345 cq->tasklet_ctx.priv =
c66fa19c 346 &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
3dca0f42
MB
347 INIT_LIST_HEAD(&cq->tasklet_ctx.list);
348
225c7b1f 349
c66fa19c 350 cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
225c7b1f
RD
351 return 0;
352
353err_radix:
291c566a 354 spin_lock(&cq_table->lock);
225c7b1f 355 radix_tree_delete(&cq_table->tree, cq->cqn);
291c566a 356 spin_unlock(&cq_table->lock);
225c7b1f 357
d7233386
JM
358err_icm:
359 mlx4_cq_free_icm(dev, cq->cqn);
225c7b1f
RD
360
361 return err;
362}
363EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
364
365void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
366{
367 struct mlx4_priv *priv = mlx4_priv(dev);
368 struct mlx4_cq_table *cq_table = &priv->cq_table;
369 int err;
370
371 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
372 if (err)
373 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
374
291c566a
JM
375 spin_lock(&cq_table->lock);
376 radix_tree_delete(&cq_table->tree, cq->cqn);
377 spin_unlock(&cq_table->lock);
378
c66fa19c 379 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
6d90aa5c
MB
380 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
381 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
382 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
225c7b1f 383
225c7b1f
RD
384 if (atomic_dec_and_test(&cq->refcount))
385 complete(&cq->free);
386 wait_for_completion(&cq->free);
387
d7233386 388 mlx4_cq_free_icm(dev, cq->cqn);
225c7b1f
RD
389}
390EXPORT_SYMBOL_GPL(mlx4_cq_free);
391
3d73c288 392int mlx4_init_cq_table(struct mlx4_dev *dev)
225c7b1f
RD
393{
394 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
395 int err;
396
397 spin_lock_init(&cq_table->lock);
398 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
d7233386
JM
399 if (mlx4_is_slave(dev))
400 return 0;
225c7b1f
RD
401
402 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
93fc9e1b 403 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
225c7b1f
RD
404 if (err)
405 return err;
406
407 return 0;
408}
409
410void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
411{
d7233386
JM
412 if (mlx4_is_slave(dev))
413 return;
225c7b1f
RD
414 /* Nothing to do to clean up radix_tree */
415 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
416}