]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
bnx2x: credit-leakage fixup on vlan_mac_del_all
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
CommitLineData
619c5cb6
VZ
1/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
f1deab50
JP
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
042181f5
VZ
22#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
619c5cb6
VZ
31#define BNX2X_MAX_EMUL_MULTI 16
32
ed5162a0
AE
33#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
619c5cb6 35/**** Exe Queue interfaces ****/
042181f5
VZ
36
37/**
619c5cb6 38 * bnx2x_exe_queue_init - init the Exe Queue object
042181f5 39 *
619c5cb6
VZ
40 * @o: poiter to the object
41 * @exe_len: length
42 * @owner: poiter to the owner
43 * @validate: validate function pointer
44 * @optimize: optimize function pointer
45 * @exec: execute function pointer
46 * @get: get function pointer
042181f5 47 */
619c5cb6
VZ
48static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
50 int exe_len,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
460a25cd 53 exe_q_remove remove,
619c5cb6
VZ
54 exe_q_optimize optimize,
55 exe_q_execute exec,
56 exe_q_get get)
042181f5 57{
619c5cb6 58 memset(o, 0, sizeof(*o));
042181f5 59
619c5cb6
VZ
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
042181f5 62
619c5cb6 63 spin_lock_init(&o->lock);
042181f5 64
619c5cb6
VZ
65 o->exe_chunk_len = exe_len;
66 o->owner = owner;
042181f5 67
619c5cb6
VZ
68 /* Owner specific callbacks */
69 o->validate = validate;
460a25cd 70 o->remove = remove;
619c5cb6
VZ
71 o->optimize = optimize;
72 o->execute = exec;
73 o->get = get;
042181f5 74
619c5cb6
VZ
75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
76 "length of %d\n", exe_len);
042181f5
VZ
77}
78
619c5cb6
VZ
79static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
81{
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 kfree(elem);
84}
042181f5 85
619c5cb6 86static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
042181f5 87{
619c5cb6
VZ
88 struct bnx2x_exeq_elem *elem;
89 int cnt = 0;
90
91 spin_lock_bh(&o->lock);
92
93 list_for_each_entry(elem, &o->exe_queue, link)
94 cnt++;
95
96 spin_unlock_bh(&o->lock);
97
98 return cnt;
042181f5
VZ
99}
100
619c5cb6
VZ
101/**
102 * bnx2x_exe_queue_add - add a new element to the execution queue
103 *
104 * @bp: driver handle
105 * @o: queue
106 * @cmd: new command to add
107 * @restore: true - do not optimize the command
042181f5 108 *
619c5cb6 109 * If the element is optimized or is illegal, frees it.
042181f5 110 */
619c5cb6
VZ
111static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
114 bool restore)
042181f5 115{
619c5cb6 116 int rc;
042181f5 117
619c5cb6 118 spin_lock_bh(&o->lock);
042181f5 119
619c5cb6
VZ
120 if (!restore) {
121 /* Try to cancel this element queue */
122 rc = o->optimize(bp, o->owner, elem);
123 if (rc)
124 goto free_and_exit;
125
126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem);
128 if (rc) {
129 BNX2X_ERR("Preamble failed: %d\n", rc);
130 goto free_and_exit;
042181f5
VZ
131 }
132 }
133
619c5cb6
VZ
134 /* If so, add it to the execution queue */
135 list_add_tail(&elem->link, &o->exe_queue);
042181f5 136
619c5cb6 137 spin_unlock_bh(&o->lock);
042181f5 138
619c5cb6 139 return 0;
042181f5 140
619c5cb6
VZ
141free_and_exit:
142 bnx2x_exe_queue_free_elem(bp, elem);
042181f5 143
619c5cb6 144 spin_unlock_bh(&o->lock);
042181f5 145
619c5cb6 146 return rc;
042181f5 147
619c5cb6 148}
042181f5 149
619c5cb6
VZ
150static inline void __bnx2x_exe_queue_reset_pending(
151 struct bnx2x *bp,
152 struct bnx2x_exe_queue_obj *o)
153{
154 struct bnx2x_exeq_elem *elem;
042181f5 155
619c5cb6
VZ
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
042181f5 159
619c5cb6
VZ
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
162 }
042181f5
VZ
163}
164
619c5cb6
VZ
165static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
042181f5 167{
042181f5 168
619c5cb6 169 spin_lock_bh(&o->lock);
042181f5 170
619c5cb6 171 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 172
619c5cb6 173 spin_unlock_bh(&o->lock);
042181f5 174
042181f5
VZ
175}
176
619c5cb6
VZ
177/**
178 * bnx2x_exe_queue_step - execute one execution chunk atomically
179 *
180 * @bp: driver handle
181 * @o: queue
182 * @ramrod_flags: flags
183 *
184 * (Atomicy is ensured using the exe_queue->lock).
185 */
186static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
042181f5 189{
619c5cb6
VZ
190 struct bnx2x_exeq_elem *elem, spacer;
191 int cur_len = 0, rc;
042181f5 192
619c5cb6 193 memset(&spacer, 0, sizeof(spacer));
042181f5 194
619c5cb6 195 spin_lock_bh(&o->lock);
042181f5 196
619c5cb6
VZ
197 /*
198 * Next step should not be performed until the current is finished,
199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 * properly clear object internals without sending any command to the FW
201 * which also implies there won't be any completion to clear the
202 * 'pending' list.
203 */
204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
207 "resetting pending_comp\n");
208 __bnx2x_exe_queue_reset_pending(bp, o);
209 } else {
210 spin_unlock_bh(&o->lock);
211 return 1;
212 }
213 }
042181f5 214
619c5cb6
VZ
215 /*
216 * Run through the pending commands list and create a next
217 * execution chunk.
218 */
219 while (!list_empty(&o->exe_queue)) {
220 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
221 link);
222 WARN_ON(!elem->cmd_len);
042181f5 223
619c5cb6
VZ
224 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
225 cur_len += elem->cmd_len;
042181f5 226 /*
619c5cb6
VZ
227 * Prevent from both lists being empty when moving an
228 * element. This will allow the call of
229 * bnx2x_exe_queue_empty() without locking.
042181f5 230 */
619c5cb6
VZ
231 list_add_tail(&spacer.link, &o->pending_comp);
232 mb();
233 list_del(&elem->link);
234 list_add_tail(&elem->link, &o->pending_comp);
235 list_del(&spacer.link);
236 } else
237 break;
042181f5 238 }
042181f5 239
619c5cb6
VZ
240 /* Sanity check */
241 if (!cur_len) {
242 spin_unlock_bh(&o->lock);
243 return 0;
042181f5
VZ
244 }
245
619c5cb6
VZ
246 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
247 if (rc < 0)
248 /*
249 * In case of an error return the commands back to the queue
250 * and reset the pending_comp.
251 */
252 list_splice_init(&o->pending_comp, &o->exe_queue);
253 else if (!rc)
254 /*
255 * If zero is returned, means there are no outstanding pending
256 * completions and we may dismiss the pending list.
257 */
258 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 259
619c5cb6
VZ
260 spin_unlock_bh(&o->lock);
261 return rc;
262}
042181f5 263
619c5cb6
VZ
264static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
265{
266 bool empty = list_empty(&o->exe_queue);
042181f5 267
619c5cb6
VZ
268 /* Don't reorder!!! */
269 mb();
042181f5 270
619c5cb6
VZ
271 return empty && list_empty(&o->pending_comp);
272}
042181f5 273
619c5cb6
VZ
274static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
275 struct bnx2x *bp)
276{
277 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
278 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
279}
042181f5 280
619c5cb6
VZ
281/************************ raw_obj functions ***********************************/
282static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
283{
284 return !!test_bit(o->state, o->pstate);
042181f5
VZ
285}
286
619c5cb6 287static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
042181f5 288{
619c5cb6
VZ
289 smp_mb__before_clear_bit();
290 clear_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
042181f5 293
619c5cb6
VZ
294static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
295{
296 smp_mb__before_clear_bit();
297 set_bit(o->state, o->pstate);
298 smp_mb__after_clear_bit();
299}
042181f5 300
619c5cb6
VZ
301/**
302 * bnx2x_state_wait - wait until the given bit(state) is cleared
303 *
304 * @bp: device handle
305 * @state: state which is to be cleared
306 * @state_p: state buffer
307 *
308 */
309static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
310 unsigned long *pstate)
311{
312 /* can take a while if any port is running */
313 int cnt = 5000;
042181f5 314
042181f5 315
619c5cb6
VZ
316 if (CHIP_REV_IS_EMUL(bp))
317 cnt *= 20;
042181f5 318
619c5cb6
VZ
319 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
320
321 might_sleep();
322 while (cnt--) {
323 if (!test_bit(state, pstate)) {
324#ifdef BNX2X_STOP_ON_ERROR
325 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
042181f5 326#endif
619c5cb6
VZ
327 return 0;
328 }
042181f5 329
619c5cb6 330 usleep_range(1000, 1000);
042181f5 331
619c5cb6
VZ
332 if (bp->panic)
333 return -EIO;
334 }
042181f5 335
619c5cb6
VZ
336 /* timeout! */
337 BNX2X_ERR("timeout waiting for state %d\n", state);
338#ifdef BNX2X_STOP_ON_ERROR
339 bnx2x_panic();
340#endif
042181f5 341
619c5cb6
VZ
342 return -EBUSY;
343}
042181f5 344
619c5cb6
VZ
345static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
346{
347 return bnx2x_state_wait(bp, raw->state, raw->pstate);
042181f5
VZ
348}
349
619c5cb6
VZ
350/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
351/* credit handling callbacks */
352static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 353{
619c5cb6
VZ
354 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
355
356 WARN_ON(!mp);
357
358 return mp->get_entry(mp, offset);
042181f5
VZ
359}
360
619c5cb6 361static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 362{
619c5cb6
VZ
363 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
364
365 WARN_ON(!mp);
366
367 return mp->get(mp, 1);
042181f5
VZ
368}
369
619c5cb6 370static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 371{
619c5cb6 372 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 373
619c5cb6 374 WARN_ON(!vp);
042181f5 375
619c5cb6 376 return vp->get_entry(vp, offset);
042181f5
VZ
377}
378
619c5cb6 379static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
042181f5 380{
619c5cb6 381 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 382
619c5cb6 383 WARN_ON(!vp);
042181f5 384
619c5cb6 385 return vp->get(vp, 1);
042181f5
VZ
386}
387
619c5cb6 388static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 389{
619c5cb6
VZ
390 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
391 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392
393 if (!mp->get(mp, 1))
394 return false;
042181f5 395
619c5cb6
VZ
396 if (!vp->get(vp, 1)) {
397 mp->put(mp, 1);
398 return false;
399 }
042181f5 400
619c5cb6 401 return true;
042181f5
VZ
402}
403
619c5cb6
VZ
404static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
405{
406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
407
408 return mp->put_entry(mp, offset);
409}
042181f5 410
619c5cb6 411static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 412{
619c5cb6 413 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
042181f5 414
619c5cb6 415 return mp->put(mp, 1);
042181f5
VZ
416}
417
619c5cb6 418static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
042181f5 419{
619c5cb6
VZ
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
421
422 return vp->put_entry(vp, offset);
423}
042181f5 424
619c5cb6
VZ
425static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
426{
427 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 428
619c5cb6 429 return vp->put(vp, 1);
042181f5
VZ
430}
431
619c5cb6 432static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 433{
619c5cb6
VZ
434 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
435 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
436
437 if (!mp->put(mp, 1))
438 return false;
042181f5 439
619c5cb6
VZ
440 if (!vp->put(vp, 1)) {
441 mp->get(mp, 1);
442 return false;
443 }
042181f5 444
619c5cb6 445 return true;
042181f5
VZ
446}
447
ed5162a0
AE
448static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
449 int n, u8 *buf)
450{
451 struct bnx2x_vlan_mac_registry_elem *pos;
452 u8 *next = buf;
453 int counter = 0;
454
455 /* traverse list */
456 list_for_each_entry(pos, &o->head, link) {
457 if (counter < n) {
458 /* place leading zeroes in buffer */
459 memset(next, 0, MAC_LEADING_ZERO_CNT);
460
461 /* place mac after leading zeroes*/
462 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
463 ETH_ALEN);
464
465 /* calculate address of next element and
466 * advance counter
467 */
468 counter++;
469 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
470
471 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
472 counter, next, pos->u.mac.mac);
473 }
474 }
475 return counter * ETH_ALEN;
476}
477
619c5cb6
VZ
478/* check_add() callbacks */
479static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
480 union bnx2x_classification_ramrod_data *data)
042181f5 481{
619c5cb6
VZ
482 struct bnx2x_vlan_mac_registry_elem *pos;
483
484 if (!is_valid_ether_addr(data->mac.mac))
485 return -EINVAL;
042181f5 486
619c5cb6
VZ
487 /* Check if a requested MAC already exists */
488 list_for_each_entry(pos, &o->head, link)
489 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
490 return -EEXIST;
042181f5 491
619c5cb6 492 return 0;
042181f5
VZ
493}
494
619c5cb6
VZ
495static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
496 union bnx2x_classification_ramrod_data *data)
042181f5 497{
619c5cb6 498 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 499
619c5cb6
VZ
500 list_for_each_entry(pos, &o->head, link)
501 if (data->vlan.vlan == pos->u.vlan.vlan)
502 return -EEXIST;
042181f5 503
619c5cb6 504 return 0;
042181f5
VZ
505}
506
619c5cb6
VZ
507static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
508 union bnx2x_classification_ramrod_data *data)
042181f5 509{
619c5cb6
VZ
510 struct bnx2x_vlan_mac_registry_elem *pos;
511
512 list_for_each_entry(pos, &o->head, link)
513 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
514 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
515 ETH_ALEN)))
516 return -EEXIST;
042181f5 517
619c5cb6 518 return 0;
042181f5
VZ
519}
520
619c5cb6
VZ
521
522/* check_del() callbacks */
523static struct bnx2x_vlan_mac_registry_elem *
524 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
525 union bnx2x_classification_ramrod_data *data)
042181f5 526{
619c5cb6
VZ
527 struct bnx2x_vlan_mac_registry_elem *pos;
528
529 list_for_each_entry(pos, &o->head, link)
530 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
531 return pos;
042181f5 532
619c5cb6 533 return NULL;
042181f5
VZ
534}
535
619c5cb6
VZ
536static struct bnx2x_vlan_mac_registry_elem *
537 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
538 union bnx2x_classification_ramrod_data *data)
042181f5 539{
619c5cb6 540 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 541
619c5cb6
VZ
542 list_for_each_entry(pos, &o->head, link)
543 if (data->vlan.vlan == pos->u.vlan.vlan)
544 return pos;
545
546 return NULL;
042181f5
VZ
547}
548
619c5cb6
VZ
549static struct bnx2x_vlan_mac_registry_elem *
550 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
551 union bnx2x_classification_ramrod_data *data)
042181f5 552{
619c5cb6
VZ
553 struct bnx2x_vlan_mac_registry_elem *pos;
554
555 list_for_each_entry(pos, &o->head, link)
556 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
557 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
558 ETH_ALEN)))
559 return pos;
042181f5 560
619c5cb6 561 return NULL;
042181f5
VZ
562}
563
619c5cb6
VZ
564/* check_move() callback */
565static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
566 struct bnx2x_vlan_mac_obj *dst_o,
567 union bnx2x_classification_ramrod_data *data)
042181f5 568{
619c5cb6
VZ
569 struct bnx2x_vlan_mac_registry_elem *pos;
570 int rc;
571
572 /* Check if we can delete the requested configuration from the first
573 * object.
574 */
575 pos = src_o->check_del(src_o, data);
576
577 /* check if configuration can be added */
578 rc = dst_o->check_add(dst_o, data);
579
580 /* If this classification can not be added (is already set)
581 * or can't be deleted - return an error.
582 */
583 if (rc || !pos)
584 return false;
585
586 return true;
042181f5
VZ
587}
588
619c5cb6
VZ
589static bool bnx2x_check_move_always_err(
590 struct bnx2x_vlan_mac_obj *src_o,
591 struct bnx2x_vlan_mac_obj *dst_o,
592 union bnx2x_classification_ramrod_data *data)
042181f5 593{
619c5cb6 594 return false;
042181f5
VZ
595}
596
619c5cb6
VZ
597
598static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
042181f5 599{
619c5cb6
VZ
600 struct bnx2x_raw_obj *raw = &o->raw;
601 u8 rx_tx_flag = 0;
042181f5 602
619c5cb6
VZ
603 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
604 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
605 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
042181f5 606
619c5cb6
VZ
607 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
608 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
609 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
610
611 return rx_tx_flag;
042181f5
VZ
612}
613
619c5cb6
VZ
614/* LLH CAM line allocations */
615enum {
616 LLH_CAM_ISCSI_ETH_LINE = 0,
617 LLH_CAM_ETH_LINE,
618 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
619};
620
621static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
622 bool add, unsigned char *dev_addr, int index)
042181f5 623{
619c5cb6
VZ
624 u32 wb_data[2];
625 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
626 NIG_REG_LLH0_FUNC_MEM;
627
628 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
629 return;
630
631 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
632 (add ? "ADD" : "DELETE"), index);
633
634 if (add) {
635 /* LLH_FUNC_MEM is a u64 WB register */
636 reg_offset += 8*index;
042181f5 637
619c5cb6
VZ
638 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
639 (dev_addr[4] << 8) | dev_addr[5]);
640 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
042181f5 641
619c5cb6
VZ
642 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
643 }
042181f5 644
619c5cb6
VZ
645 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
646 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
647}
042181f5 648
619c5cb6
VZ
649/**
650 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
651 *
652 * @bp: device handle
653 * @o: queue for which we want to configure this rule
654 * @add: if true the command is an ADD command, DEL otherwise
655 * @opcode: CLASSIFY_RULE_OPCODE_XXX
656 * @hdr: pointer to a header to setup
657 *
658 */
659static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
660 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
661 struct eth_classify_cmd_header *hdr)
662{
663 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 664
619c5cb6
VZ
665 hdr->client_id = raw->cl_id;
666 hdr->func_id = raw->func_id;
042181f5 667
619c5cb6
VZ
668 /* Rx or/and Tx (internal switching) configuration ? */
669 hdr->cmd_general_data |=
670 bnx2x_vlan_mac_get_rx_tx_flag(o);
042181f5 671
619c5cb6
VZ
672 if (add)
673 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
042181f5 674
619c5cb6
VZ
675 hdr->cmd_general_data |=
676 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
677}
042181f5 678
619c5cb6
VZ
679/**
680 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
681 *
682 * @cid: connection id
683 * @type: BNX2X_FILTER_XXX_PENDING
684 * @hdr: poiter to header to setup
685 * @rule_cnt:
686 *
687 * currently we always configure one rule and echo field to contain a CID and an
688 * opcode type.
689 */
690static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
691 struct eth_classify_header *hdr, int rule_cnt)
692{
693 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
694 hdr->rule_cnt = (u8)rule_cnt;
695}
042181f5 696
042181f5 697
619c5cb6
VZ
698/* hw_config() callbacks */
699static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
700 struct bnx2x_vlan_mac_obj *o,
701 struct bnx2x_exeq_elem *elem, int rule_idx,
702 int cam_offset)
703{
704 struct bnx2x_raw_obj *raw = &o->raw;
705 struct eth_classify_rules_ramrod_data *data =
706 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
707 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
708 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
709 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
710 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
711 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
712
713 /*
714 * Set LLH CAM entry: currently only iSCSI and ETH macs are
715 * relevant. In addition, current implementation is tuned for a
716 * single ETH MAC.
717 *
718 * When multiple unicast ETH MACs PF configuration in switch
719 * independent mode is required (NetQ, multiple netdev MACs,
720 * etc.), consider better utilisation of 8 per function MAC
721 * entries in the LLH register. There is also
722 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
723 * total number of CAM entries to 16.
724 *
725 * Currently we won't configure NIG for MACs other than a primary ETH
726 * MAC and iSCSI L2 MAC.
727 *
728 * If this MAC is moving from one Queue to another, no need to change
729 * NIG configuration.
730 */
731 if (cmd != BNX2X_VLAN_MAC_MOVE) {
732 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
733 bnx2x_set_mac_in_nig(bp, add, mac,
734 LLH_CAM_ISCSI_ETH_LINE);
735 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
736 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
042181f5
VZ
737 }
738
619c5cb6
VZ
739 /* Reset the ramrod data buffer for the first rule */
740 if (rule_idx == 0)
741 memset(data, 0, sizeof(*data));
742
743 /* Setup a command header */
744 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
745 &rule_entry->mac.header);
746
0f9dad10
JP
747 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
748 add ? "add" : "delete", mac, raw->cl_id);
619c5cb6
VZ
749
750 /* Set a MAC itself */
751 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
752 &rule_entry->mac.mac_mid,
753 &rule_entry->mac.mac_lsb, mac);
754
755 /* MOVE: Add a rule that will add this MAC to the target Queue */
756 if (cmd == BNX2X_VLAN_MAC_MOVE) {
757 rule_entry++;
758 rule_cnt++;
759
760 /* Setup ramrod data */
761 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
762 elem->cmd_data.vlan_mac.target_obj,
763 true, CLASSIFY_RULE_OPCODE_MAC,
764 &rule_entry->mac.header);
765
766 /* Set a MAC itself */
767 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
768 &rule_entry->mac.mac_mid,
769 &rule_entry->mac.mac_lsb, mac);
042181f5 770 }
619c5cb6
VZ
771
772 /* Set the ramrod data header */
773 /* TODO: take this to the higher level in order to prevent multiple
774 writing */
775 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
776 rule_cnt);
042181f5
VZ
777}
778
619c5cb6
VZ
779/**
780 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
781 *
782 * @bp: device handle
783 * @o: queue
784 * @type:
785 * @cam_offset: offset in cam memory
786 * @hdr: pointer to a header to setup
787 *
788 * E1/E1H
789 */
790static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
791 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
792 struct mac_configuration_hdr *hdr)
042181f5 793{
619c5cb6 794 struct bnx2x_raw_obj *r = &o->raw;
042181f5 795
619c5cb6
VZ
796 hdr->length = 1;
797 hdr->offset = (u8)cam_offset;
798 hdr->client_id = 0xff;
799 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
800}
042181f5 801
619c5cb6
VZ
802static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
803 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
804 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
805{
806 struct bnx2x_raw_obj *r = &o->raw;
807 u32 cl_bit_vec = (1 << r->cl_id);
808
809 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
810 cfg_entry->pf_id = r->func_id;
811 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
812
813 if (add) {
814 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
815 T_ETH_MAC_COMMAND_SET);
816 SET_FLAG(cfg_entry->flags,
817 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
818
819 /* Set a MAC in a ramrod data */
820 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
821 &cfg_entry->middle_mac_addr,
822 &cfg_entry->lsb_mac_addr, mac);
823 } else
824 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
825 T_ETH_MAC_COMMAND_INVALIDATE);
826}
042181f5 827
619c5cb6
VZ
828static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
829 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
830 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
831{
832 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
833 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 834
619c5cb6
VZ
835 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
836 &config->hdr);
837 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
838 cfg_entry);
042181f5 839
0f9dad10
JP
840 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
841 add ? "setting" : "clearing",
842 mac, raw->cl_id, cam_offset);
042181f5
VZ
843}
844
619c5cb6
VZ
845/**
846 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
847 *
848 * @bp: device handle
849 * @o: bnx2x_vlan_mac_obj
850 * @elem: bnx2x_exeq_elem
851 * @rule_idx: rule_idx
852 * @cam_offset: cam_offset
853 */
854static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
855 struct bnx2x_vlan_mac_obj *o,
856 struct bnx2x_exeq_elem *elem, int rule_idx,
857 int cam_offset)
042181f5 858{
619c5cb6
VZ
859 struct bnx2x_raw_obj *raw = &o->raw;
860 struct mac_configuration_cmd *config =
861 (struct mac_configuration_cmd *)(raw->rdata);
862 /*
863 * 57710 and 57711 do not support MOVE command,
864 * so it's either ADD or DEL
865 */
866 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
867 true : false;
042181f5 868
619c5cb6
VZ
869 /* Reset the ramrod data buffer */
870 memset(config, 0, sizeof(*config));
042181f5 871
619c5cb6
VZ
872 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
873 cam_offset, add,
874 elem->cmd_data.vlan_mac.u.mac.mac, 0,
875 ETH_VLAN_FILTER_ANY_VLAN, config);
876}
042181f5 877
619c5cb6
VZ
878static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
879 struct bnx2x_vlan_mac_obj *o,
880 struct bnx2x_exeq_elem *elem, int rule_idx,
881 int cam_offset)
882{
883 struct bnx2x_raw_obj *raw = &o->raw;
884 struct eth_classify_rules_ramrod_data *data =
885 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
886 int rule_cnt = rule_idx + 1;
887 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
888 int cmd = elem->cmd_data.vlan_mac.cmd;
889 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
890 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
891
892 /* Reset the ramrod data buffer for the first rule */
893 if (rule_idx == 0)
894 memset(data, 0, sizeof(*data));
895
896 /* Set a rule header */
897 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
898 &rule_entry->vlan.header);
899
900 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
901 vlan);
902
903 /* Set a VLAN itself */
904 rule_entry->vlan.vlan = cpu_to_le16(vlan);
905
906 /* MOVE: Add a rule that will add this MAC to the target Queue */
907 if (cmd == BNX2X_VLAN_MAC_MOVE) {
908 rule_entry++;
909 rule_cnt++;
910
911 /* Setup ramrod data */
912 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
913 elem->cmd_data.vlan_mac.target_obj,
914 true, CLASSIFY_RULE_OPCODE_VLAN,
915 &rule_entry->vlan.header);
916
917 /* Set a VLAN itself */
918 rule_entry->vlan.vlan = cpu_to_le16(vlan);
919 }
042181f5 920
619c5cb6
VZ
921 /* Set the ramrod data header */
922 /* TODO: take this to the higher level in order to prevent multiple
923 writing */
924 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
925 rule_cnt);
926}
042181f5 927
619c5cb6
VZ
928static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
929 struct bnx2x_vlan_mac_obj *o,
930 struct bnx2x_exeq_elem *elem,
931 int rule_idx, int cam_offset)
932{
933 struct bnx2x_raw_obj *raw = &o->raw;
934 struct eth_classify_rules_ramrod_data *data =
935 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
936 int rule_cnt = rule_idx + 1;
937 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
938 int cmd = elem->cmd_data.vlan_mac.cmd;
939 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
940 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
941 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
942
943
944 /* Reset the ramrod data buffer for the first rule */
945 if (rule_idx == 0)
946 memset(data, 0, sizeof(*data));
947
948 /* Set a rule header */
949 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
950 &rule_entry->pair.header);
951
952 /* Set VLAN and MAC themselvs */
953 rule_entry->pair.vlan = cpu_to_le16(vlan);
954 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
955 &rule_entry->pair.mac_mid,
956 &rule_entry->pair.mac_lsb, mac);
957
958 /* MOVE: Add a rule that will add this MAC to the target Queue */
959 if (cmd == BNX2X_VLAN_MAC_MOVE) {
960 rule_entry++;
961 rule_cnt++;
962
963 /* Setup ramrod data */
964 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
965 elem->cmd_data.vlan_mac.target_obj,
966 true, CLASSIFY_RULE_OPCODE_PAIR,
967 &rule_entry->pair.header);
968
969 /* Set a VLAN itself */
970 rule_entry->pair.vlan = cpu_to_le16(vlan);
971 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
972 &rule_entry->pair.mac_mid,
973 &rule_entry->pair.mac_lsb, mac);
042181f5
VZ
974 }
975
619c5cb6
VZ
976 /* Set the ramrod data header */
977 /* TODO: take this to the higher level in order to prevent multiple
978 writing */
979 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
980 rule_cnt);
981}
042181f5 982
619c5cb6
VZ
983/**
984 * bnx2x_set_one_vlan_mac_e1h -
985 *
986 * @bp: device handle
987 * @o: bnx2x_vlan_mac_obj
988 * @elem: bnx2x_exeq_elem
989 * @rule_idx: rule_idx
990 * @cam_offset: cam_offset
991 */
992static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
993 struct bnx2x_vlan_mac_obj *o,
994 struct bnx2x_exeq_elem *elem,
995 int rule_idx, int cam_offset)
996{
997 struct bnx2x_raw_obj *raw = &o->raw;
998 struct mac_configuration_cmd *config =
999 (struct mac_configuration_cmd *)(raw->rdata);
1000 /*
1001 * 57710 and 57711 do not support MOVE command,
1002 * so it's either ADD or DEL
042181f5 1003 */
619c5cb6
VZ
1004 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1005 true : false;
042181f5 1006
619c5cb6
VZ
1007 /* Reset the ramrod data buffer */
1008 memset(config, 0, sizeof(*config));
042181f5 1009
619c5cb6
VZ
1010 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1011 cam_offset, add,
1012 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1013 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1014 ETH_VLAN_FILTER_CLASSIFY, config);
042181f5
VZ
1015}
1016
619c5cb6
VZ
1017#define list_next_entry(pos, member) \
1018 list_entry((pos)->member.next, typeof(*(pos)), member)
1019
1020/**
1021 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1022 *
1023 * @bp: device handle
1024 * @p: command parameters
1025 * @ppos: pointer to the cooky
1026 *
1027 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1028 * previously configured elements list.
1029 *
1030 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1031 * into an account
1032 *
1033 * pointer to the cooky - that should be given back in the next call to make
1034 * function handle the next element. If *ppos is set to NULL it will restart the
1035 * iterator. If returned *ppos == NULL this means that the last element has been
1036 * handled.
1037 *
1038 */
1039static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1040 struct bnx2x_vlan_mac_ramrod_params *p,
1041 struct bnx2x_vlan_mac_registry_elem **ppos)
1042{
1043 struct bnx2x_vlan_mac_registry_elem *pos;
1044 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1045
1046 /* If list is empty - there is nothing to do here */
1047 if (list_empty(&o->head)) {
1048 *ppos = NULL;
1049 return 0;
1050 }
1051
1052 /* make a step... */
1053 if (*ppos == NULL)
1054 *ppos = list_first_entry(&o->head,
1055 struct bnx2x_vlan_mac_registry_elem,
1056 link);
1057 else
1058 *ppos = list_next_entry(*ppos, link);
1059
1060 pos = *ppos;
1061
1062 /* If it's the last step - return NULL */
1063 if (list_is_last(&pos->link, &o->head))
1064 *ppos = NULL;
1065
1066 /* Prepare a 'user_req' */
1067 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1068
1069 /* Set the command */
1070 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1071
1072 /* Set vlan_mac_flags */
1073 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1074
1075 /* Set a restore bit */
1076 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1077
1078 return bnx2x_config_vlan_mac(bp, p);
1079}
1080
1081/*
1082 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1083 * pointer to an element with a specific criteria and NULL if such an element
1084 * hasn't been found.
1085 */
1086static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1087 struct bnx2x_exe_queue_obj *o,
1088 struct bnx2x_exeq_elem *elem)
1089{
1090 struct bnx2x_exeq_elem *pos;
1091 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1092
1093 /* Check pending for execution commands */
1094 list_for_each_entry(pos, &o->exe_queue, link)
1095 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1096 sizeof(*data)) &&
1097 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1098 return pos;
1099
1100 return NULL;
1101}
1102
1103static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1104 struct bnx2x_exe_queue_obj *o,
1105 struct bnx2x_exeq_elem *elem)
1106{
1107 struct bnx2x_exeq_elem *pos;
1108 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1109
1110 /* Check pending for execution commands */
1111 list_for_each_entry(pos, &o->exe_queue, link)
1112 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1113 sizeof(*data)) &&
1114 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1115 return pos;
1116
1117 return NULL;
1118}
1119
1120static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1121 struct bnx2x_exe_queue_obj *o,
1122 struct bnx2x_exeq_elem *elem)
1123{
1124 struct bnx2x_exeq_elem *pos;
1125 struct bnx2x_vlan_mac_ramrod_data *data =
1126 &elem->cmd_data.vlan_mac.u.vlan_mac;
1127
1128 /* Check pending for execution commands */
1129 list_for_each_entry(pos, &o->exe_queue, link)
1130 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1131 sizeof(*data)) &&
1132 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1133 return pos;
1134
1135 return NULL;
1136}
1137
1138/**
1139 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1140 *
1141 * @bp: device handle
1142 * @qo: bnx2x_qable_obj
1143 * @elem: bnx2x_exeq_elem
1144 *
1145 * Checks that the requested configuration can be added. If yes and if
1146 * requested, consume CAM credit.
1147 *
1148 * The 'validate' is run after the 'optimize'.
1149 *
1150 */
1151static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1152 union bnx2x_qable_obj *qo,
1153 struct bnx2x_exeq_elem *elem)
1154{
1155 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1156 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1157 int rc;
1158
1159 /* Check the registry */
1160 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1161 if (rc) {
1162 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1163 "current registry state\n");
1164 return rc;
1165 }
1166
1167 /*
1168 * Check if there is a pending ADD command for this
1169 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1170 */
1171 if (exeq->get(exeq, elem)) {
1172 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1173 return -EEXIST;
1174 }
1175
1176 /*
1177 * TODO: Check the pending MOVE from other objects where this
1178 * object is a destination object.
1179 */
1180
1181 /* Consume the credit if not requested not to */
1182 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1183 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1184 o->get_credit(o)))
1185 return -EINVAL;
1186
1187 return 0;
1188}
1189
1190/**
1191 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1192 *
1193 * @bp: device handle
1194 * @qo: quable object to check
1195 * @elem: element that needs to be deleted
1196 *
1197 * Checks that the requested configuration can be deleted. If yes and if
1198 * requested, returns a CAM credit.
1199 *
1200 * The 'validate' is run after the 'optimize'.
1201 */
1202static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1203 union bnx2x_qable_obj *qo,
1204 struct bnx2x_exeq_elem *elem)
1205{
1206 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1207 struct bnx2x_vlan_mac_registry_elem *pos;
1208 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1209 struct bnx2x_exeq_elem query_elem;
1210
1211 /* If this classification can not be deleted (doesn't exist)
1212 * - return a BNX2X_EXIST.
1213 */
1214 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1215 if (!pos) {
1216 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1217 "current registry state\n");
1218 return -EEXIST;
1219 }
1220
1221 /*
1222 * Check if there are pending DEL or MOVE commands for this
1223 * MAC/VLAN/VLAN-MAC. Return an error if so.
1224 */
1225 memcpy(&query_elem, elem, sizeof(query_elem));
1226
1227 /* Check for MOVE commands */
1228 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1229 if (exeq->get(exeq, &query_elem)) {
1230 BNX2X_ERR("There is a pending MOVE command already\n");
1231 return -EINVAL;
1232 }
1233
1234 /* Check for DEL commands */
1235 if (exeq->get(exeq, elem)) {
1236 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1237 return -EEXIST;
1238 }
1239
1240 /* Return the credit to the credit pool if not requested not to */
1241 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1242 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1243 o->put_credit(o))) {
1244 BNX2X_ERR("Failed to return a credit\n");
1245 return -EINVAL;
1246 }
1247
1248 return 0;
1249}
1250
1251/**
1252 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1253 *
1254 * @bp: device handle
1255 * @qo: quable object to check (source)
1256 * @elem: element that needs to be moved
1257 *
1258 * Checks that the requested configuration can be moved. If yes and if
1259 * requested, returns a CAM credit.
1260 *
1261 * The 'validate' is run after the 'optimize'.
1262 */
1263static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1264 union bnx2x_qable_obj *qo,
1265 struct bnx2x_exeq_elem *elem)
1266{
1267 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1268 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1269 struct bnx2x_exeq_elem query_elem;
1270 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1271 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1272
1273 /*
1274 * Check if we can perform this operation based on the current registry
1275 * state.
1276 */
1277 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1278 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1279 "current registry state\n");
1280 return -EINVAL;
1281 }
1282
1283 /*
1284 * Check if there is an already pending DEL or MOVE command for the
1285 * source object or ADD command for a destination object. Return an
1286 * error if so.
1287 */
1288 memcpy(&query_elem, elem, sizeof(query_elem));
1289
1290 /* Check DEL on source */
1291 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1292 if (src_exeq->get(src_exeq, &query_elem)) {
1293 BNX2X_ERR("There is a pending DEL command on the source "
1294 "queue already\n");
1295 return -EINVAL;
1296 }
1297
1298 /* Check MOVE on source */
1299 if (src_exeq->get(src_exeq, elem)) {
1300 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1301 return -EEXIST;
1302 }
1303
1304 /* Check ADD on destination */
1305 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1306 if (dest_exeq->get(dest_exeq, &query_elem)) {
1307 BNX2X_ERR("There is a pending ADD command on the "
1308 "destination queue already\n");
1309 return -EINVAL;
1310 }
1311
1312 /* Consume the credit if not requested not to */
1313 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1314 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1315 dest_o->get_credit(dest_o)))
1316 return -EINVAL;
1317
1318 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1319 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1320 src_o->put_credit(src_o))) {
1321 /* return the credit taken from dest... */
1322 dest_o->put_credit(dest_o);
1323 return -EINVAL;
1324 }
1325
1326 return 0;
1327}
1328
1329static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1330 union bnx2x_qable_obj *qo,
1331 struct bnx2x_exeq_elem *elem)
1332{
1333 switch (elem->cmd_data.vlan_mac.cmd) {
1334 case BNX2X_VLAN_MAC_ADD:
1335 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1336 case BNX2X_VLAN_MAC_DEL:
1337 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1338 case BNX2X_VLAN_MAC_MOVE:
1339 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1340 default:
1341 return -EINVAL;
1342 }
1343}
1344
460a25cd
YM
1345static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1346 union bnx2x_qable_obj *qo,
1347 struct bnx2x_exeq_elem *elem)
1348{
1349 int rc = 0;
1350
1351 /* If consumption wasn't required, nothing to do */
1352 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1353 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1354 return 0;
1355
1356 switch (elem->cmd_data.vlan_mac.cmd) {
1357 case BNX2X_VLAN_MAC_ADD:
1358 case BNX2X_VLAN_MAC_MOVE:
1359 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1360 break;
1361 case BNX2X_VLAN_MAC_DEL:
1362 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1363 break;
1364 default:
1365 return -EINVAL;
1366 }
1367
1368 if (rc != true)
1369 return -EINVAL;
1370
1371 return 0;
1372}
1373
619c5cb6
VZ
1374/**
1375 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1376 *
1377 * @bp: device handle
1378 * @o: bnx2x_vlan_mac_obj
1379 *
1380 */
1381static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1382 struct bnx2x_vlan_mac_obj *o)
1383{
1384 int cnt = 5000, rc;
1385 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1386 struct bnx2x_raw_obj *raw = &o->raw;
1387
1388 while (cnt--) {
1389 /* Wait for the current command to complete */
1390 rc = raw->wait_comp(bp, raw);
1391 if (rc)
1392 return rc;
1393
1394 /* Wait until there are no pending commands */
1395 if (!bnx2x_exe_queue_empty(exeq))
1396 usleep_range(1000, 1000);
1397 else
1398 return 0;
1399 }
1400
1401 return -EBUSY;
1402}
1403
1404/**
1405 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1406 *
1407 * @bp: device handle
1408 * @o: bnx2x_vlan_mac_obj
1409 * @cqe:
1410 * @cont: if true schedule next execution chunk
1411 *
1412 */
1413static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1414 struct bnx2x_vlan_mac_obj *o,
1415 union event_ring_elem *cqe,
1416 unsigned long *ramrod_flags)
1417{
1418 struct bnx2x_raw_obj *r = &o->raw;
1419 int rc;
1420
1421 /* Reset pending list */
1422 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1423
1424 /* Clear pending */
1425 r->clear_pending(r);
1426
1427 /* If ramrod failed this is most likely a SW bug */
1428 if (cqe->message.error)
1429 return -EINVAL;
1430
1431 /* Run the next bulk of pending commands if requeted */
1432 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1433 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1434 if (rc < 0)
1435 return rc;
1436 }
1437
1438 /* If there is more work to do return PENDING */
1439 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1440 return 1;
1441
1442 return 0;
1443}
1444
1445/**
1446 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1447 *
1448 * @bp: device handle
1449 * @o: bnx2x_qable_obj
1450 * @elem: bnx2x_exeq_elem
1451 */
1452static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1453 union bnx2x_qable_obj *qo,
1454 struct bnx2x_exeq_elem *elem)
1455{
1456 struct bnx2x_exeq_elem query, *pos;
1457 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1458 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1459
1460 memcpy(&query, elem, sizeof(query));
1461
1462 switch (elem->cmd_data.vlan_mac.cmd) {
1463 case BNX2X_VLAN_MAC_ADD:
1464 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1465 break;
1466 case BNX2X_VLAN_MAC_DEL:
1467 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1468 break;
1469 default:
1470 /* Don't handle anything other than ADD or DEL */
1471 return 0;
1472 }
1473
1474 /* If we found the appropriate element - delete it */
1475 pos = exeq->get(exeq, &query);
1476 if (pos) {
1477
1478 /* Return the credit of the optimized command */
1479 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1480 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1481 if ((query.cmd_data.vlan_mac.cmd ==
1482 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1483 BNX2X_ERR("Failed to return the credit for the "
1484 "optimized ADD command\n");
1485 return -EINVAL;
1486 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1487 BNX2X_ERR("Failed to recover the credit from "
1488 "the optimized DEL command\n");
1489 return -EINVAL;
1490 }
1491 }
1492
1493 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1494 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1495 "ADD" : "DEL");
1496
1497 list_del(&pos->link);
1498 bnx2x_exe_queue_free_elem(bp, pos);
1499 return 1;
1500 }
1501
1502 return 0;
1503}
1504
1505/**
1506 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1507 *
1508 * @bp: device handle
1509 * @o:
1510 * @elem:
1511 * @restore:
1512 * @re:
1513 *
1514 * prepare a registry element according to the current command request.
1515 */
1516static inline int bnx2x_vlan_mac_get_registry_elem(
1517 struct bnx2x *bp,
1518 struct bnx2x_vlan_mac_obj *o,
1519 struct bnx2x_exeq_elem *elem,
1520 bool restore,
1521 struct bnx2x_vlan_mac_registry_elem **re)
1522{
1523 int cmd = elem->cmd_data.vlan_mac.cmd;
1524 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1525
1526 /* Allocate a new registry element if needed. */
1527 if (!restore &&
1528 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1529 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1530 if (!reg_elem)
1531 return -ENOMEM;
1532
1533 /* Get a new CAM offset */
1534 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1535 /*
1536 * This shell never happen, because we have checked the
1537 * CAM availiability in the 'validate'.
1538 */
1539 WARN_ON(1);
1540 kfree(reg_elem);
1541 return -EINVAL;
1542 }
1543
1544 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1545
1546 /* Set a VLAN-MAC data */
1547 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1548 sizeof(reg_elem->u));
1549
1550 /* Copy the flags (needed for DEL and RESTORE flows) */
1551 reg_elem->vlan_mac_flags =
1552 elem->cmd_data.vlan_mac.vlan_mac_flags;
1553 } else /* DEL, RESTORE */
1554 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1555
1556 *re = reg_elem;
1557 return 0;
1558}
1559
1560/**
1561 * bnx2x_execute_vlan_mac - execute vlan mac command
1562 *
1563 * @bp: device handle
1564 * @qo:
1565 * @exe_chunk:
1566 * @ramrod_flags:
1567 *
1568 * go and send a ramrod!
1569 */
1570static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1571 union bnx2x_qable_obj *qo,
1572 struct list_head *exe_chunk,
1573 unsigned long *ramrod_flags)
1574{
1575 struct bnx2x_exeq_elem *elem;
1576 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1577 struct bnx2x_raw_obj *r = &o->raw;
1578 int rc, idx = 0;
1579 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1580 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1581 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1582 int cmd;
1583
1584 /*
1585 * If DRIVER_ONLY execution is requested, cleanup a registry
1586 * and exit. Otherwise send a ramrod to FW.
1587 */
1588 if (!drv_only) {
1589 WARN_ON(r->check_pending(r));
1590
1591 /* Set pending */
1592 r->set_pending(r);
1593
1594 /* Fill tha ramrod data */
1595 list_for_each_entry(elem, exe_chunk, link) {
1596 cmd = elem->cmd_data.vlan_mac.cmd;
1597 /*
1598 * We will add to the target object in MOVE command, so
1599 * change the object for a CAM search.
1600 */
1601 if (cmd == BNX2X_VLAN_MAC_MOVE)
1602 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1603 else
1604 cam_obj = o;
1605
1606 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1607 elem, restore,
1608 &reg_elem);
1609 if (rc)
1610 goto error_exit;
1611
1612 WARN_ON(!reg_elem);
1613
1614 /* Push a new entry into the registry */
1615 if (!restore &&
1616 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1617 (cmd == BNX2X_VLAN_MAC_MOVE)))
1618 list_add(&reg_elem->link, &cam_obj->head);
1619
1620 /* Configure a single command in a ramrod data buffer */
1621 o->set_one_rule(bp, o, elem, idx,
1622 reg_elem->cam_offset);
1623
1624 /* MOVE command consumes 2 entries in the ramrod data */
1625 if (cmd == BNX2X_VLAN_MAC_MOVE)
1626 idx += 2;
1627 else
1628 idx++;
1629 }
1630
53e51e2f
VZ
1631 /*
1632 * No need for an explicit memory barrier here as long we would
1633 * need to ensure the ordering of writing to the SPQ element
1634 * and updating of the SPQ producer which involves a memory
1635 * read and we will have to put a full memory barrier there
1636 * (inside bnx2x_sp_post()).
1637 */
619c5cb6
VZ
1638
1639 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1640 U64_HI(r->rdata_mapping),
1641 U64_LO(r->rdata_mapping),
1642 ETH_CONNECTION_TYPE);
1643 if (rc)
1644 goto error_exit;
1645 }
1646
1647 /* Now, when we are done with the ramrod - clean up the registry */
1648 list_for_each_entry(elem, exe_chunk, link) {
1649 cmd = elem->cmd_data.vlan_mac.cmd;
1650 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1651 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1652 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1653
1654 WARN_ON(!reg_elem);
1655
1656 o->put_cam_offset(o, reg_elem->cam_offset);
1657 list_del(&reg_elem->link);
1658 kfree(reg_elem);
1659 }
1660 }
1661
1662 if (!drv_only)
1663 return 1;
1664 else
1665 return 0;
1666
1667error_exit:
1668 r->clear_pending(r);
1669
1670 /* Cleanup a registry in case of a failure */
1671 list_for_each_entry(elem, exe_chunk, link) {
1672 cmd = elem->cmd_data.vlan_mac.cmd;
1673
1674 if (cmd == BNX2X_VLAN_MAC_MOVE)
1675 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1676 else
1677 cam_obj = o;
1678
1679 /* Delete all newly added above entries */
1680 if (!restore &&
1681 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1682 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1683 reg_elem = o->check_del(cam_obj,
1684 &elem->cmd_data.vlan_mac.u);
1685 if (reg_elem) {
1686 list_del(&reg_elem->link);
1687 kfree(reg_elem);
1688 }
1689 }
1690 }
1691
1692 return rc;
1693}
1694
1695static inline int bnx2x_vlan_mac_push_new_cmd(
1696 struct bnx2x *bp,
1697 struct bnx2x_vlan_mac_ramrod_params *p)
1698{
1699 struct bnx2x_exeq_elem *elem;
1700 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1701 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1702
1703 /* Allocate the execution queue element */
1704 elem = bnx2x_exe_queue_alloc_elem(bp);
1705 if (!elem)
1706 return -ENOMEM;
1707
1708 /* Set the command 'length' */
1709 switch (p->user_req.cmd) {
1710 case BNX2X_VLAN_MAC_MOVE:
1711 elem->cmd_len = 2;
1712 break;
1713 default:
1714 elem->cmd_len = 1;
1715 }
1716
1717 /* Fill the object specific info */
1718 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1719
1720 /* Try to add a new command to the pending list */
1721 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1722}
1723
1724/**
1725 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1726 *
1727 * @bp: device handle
1728 * @p:
1729 *
1730 */
1731int bnx2x_config_vlan_mac(
1732 struct bnx2x *bp,
1733 struct bnx2x_vlan_mac_ramrod_params *p)
1734{
1735 int rc = 0;
1736 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1737 unsigned long *ramrod_flags = &p->ramrod_flags;
1738 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1739 struct bnx2x_raw_obj *raw = &o->raw;
1740
1741 /*
1742 * Add new elements to the execution list for commands that require it.
1743 */
1744 if (!cont) {
1745 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1746 if (rc)
1747 return rc;
1748 }
1749
1750 /*
1751 * If nothing will be executed further in this iteration we want to
1752 * return PENDING if there are pending commands
1753 */
1754 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1755 rc = 1;
1756
79616895
VZ
1757 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1758 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1759 "clearing a pending bit.\n");
1760 raw->clear_pending(raw);
1761 }
1762
619c5cb6
VZ
1763 /* Execute commands if required */
1764 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1765 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1766 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1767 if (rc < 0)
1768 return rc;
1769 }
1770
1771 /*
1772 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1773 * then user want to wait until the last command is done.
1774 */
1775 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1776 /*
1777 * Wait maximum for the current exe_queue length iterations plus
1778 * one (for the current pending command).
1779 */
1780 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1781
1782 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1783 max_iterations--) {
1784
1785 /* Wait for the current command to complete */
1786 rc = raw->wait_comp(bp, raw);
1787 if (rc)
1788 return rc;
1789
1790 /* Make a next step */
1791 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1792 ramrod_flags);
1793 if (rc < 0)
1794 return rc;
1795 }
1796
1797 return 0;
1798 }
1799
1800 return rc;
1801}
1802
1803
1804
1805/**
1806 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1807 *
1808 * @bp: device handle
1809 * @o:
1810 * @vlan_mac_flags:
1811 * @ramrod_flags: execution flags to be used for this deletion
1812 *
1813 * if the last operation has completed successfully and there are no
1814 * moreelements left, positive value if the last operation has completed
1815 * successfully and there are more previously configured elements, negative
1816 * value is current operation has failed.
1817 */
1818static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1819 struct bnx2x_vlan_mac_obj *o,
1820 unsigned long *vlan_mac_flags,
1821 unsigned long *ramrod_flags)
1822{
1823 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1824 int rc = 0;
1825 struct bnx2x_vlan_mac_ramrod_params p;
1826 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1827 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1828
1829 /* Clear pending commands first */
1830
1831 spin_lock_bh(&exeq->lock);
1832
1833 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1834 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
460a25cd
YM
1835 *vlan_mac_flags) {
1836 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1837 if (rc) {
1838 BNX2X_ERR("Failed to remove command\n");
1839 return rc;
1840 }
619c5cb6 1841 list_del(&exeq_pos->link);
460a25cd 1842 }
619c5cb6
VZ
1843 }
1844
1845 spin_unlock_bh(&exeq->lock);
1846
1847 /* Prepare a command request */
1848 memset(&p, 0, sizeof(p));
1849 p.vlan_mac_obj = o;
1850 p.ramrod_flags = *ramrod_flags;
1851 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1852
1853 /*
1854 * Add all but the last VLAN-MAC to the execution queue without actually
1855 * execution anything.
1856 */
1857 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1858 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1859 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1860
1861 list_for_each_entry(pos, &o->head, link) {
1862 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1863 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1864 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1865 rc = bnx2x_config_vlan_mac(bp, &p);
1866 if (rc < 0) {
1867 BNX2X_ERR("Failed to add a new DEL command\n");
1868 return rc;
1869 }
1870 }
1871 }
1872
1873 p.ramrod_flags = *ramrod_flags;
1874 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1875
1876 return bnx2x_config_vlan_mac(bp, &p);
1877}
1878
1879static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1880 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1881 unsigned long *pstate, bnx2x_obj_type type)
1882{
1883 raw->func_id = func_id;
1884 raw->cid = cid;
1885 raw->cl_id = cl_id;
1886 raw->rdata = rdata;
1887 raw->rdata_mapping = rdata_mapping;
1888 raw->state = state;
1889 raw->pstate = pstate;
1890 raw->obj_type = type;
1891 raw->check_pending = bnx2x_raw_check_pending;
1892 raw->clear_pending = bnx2x_raw_clear_pending;
1893 raw->set_pending = bnx2x_raw_set_pending;
1894 raw->wait_comp = bnx2x_raw_wait;
1895}
1896
1897static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1898 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1899 int state, unsigned long *pstate, bnx2x_obj_type type,
1900 struct bnx2x_credit_pool_obj *macs_pool,
1901 struct bnx2x_credit_pool_obj *vlans_pool)
1902{
1903 INIT_LIST_HEAD(&o->head);
1904
1905 o->macs_pool = macs_pool;
1906 o->vlans_pool = vlans_pool;
1907
1908 o->delete_all = bnx2x_vlan_mac_del_all;
1909 o->restore = bnx2x_vlan_mac_restore;
1910 o->complete = bnx2x_complete_vlan_mac;
1911 o->wait = bnx2x_wait_vlan_mac;
1912
1913 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1914 state, pstate, type);
1915}
1916
1917
1918void bnx2x_init_mac_obj(struct bnx2x *bp,
1919 struct bnx2x_vlan_mac_obj *mac_obj,
1920 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1921 dma_addr_t rdata_mapping, int state,
1922 unsigned long *pstate, bnx2x_obj_type type,
1923 struct bnx2x_credit_pool_obj *macs_pool)
1924{
1925 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1926
1927 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1928 rdata_mapping, state, pstate, type,
1929 macs_pool, NULL);
1930
1931 /* CAM credit pool handling */
1932 mac_obj->get_credit = bnx2x_get_credit_mac;
1933 mac_obj->put_credit = bnx2x_put_credit_mac;
1934 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1935 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1936
1937 if (CHIP_IS_E1x(bp)) {
1938 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1939 mac_obj->check_del = bnx2x_check_mac_del;
1940 mac_obj->check_add = bnx2x_check_mac_add;
1941 mac_obj->check_move = bnx2x_check_move_always_err;
1942 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1943
1944 /* Exe Queue */
1945 bnx2x_exe_queue_init(bp,
1946 &mac_obj->exe_queue, 1, qable_obj,
1947 bnx2x_validate_vlan_mac,
460a25cd 1948 bnx2x_remove_vlan_mac,
619c5cb6
VZ
1949 bnx2x_optimize_vlan_mac,
1950 bnx2x_execute_vlan_mac,
1951 bnx2x_exeq_get_mac);
1952 } else {
1953 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1954 mac_obj->check_del = bnx2x_check_mac_del;
1955 mac_obj->check_add = bnx2x_check_mac_add;
1956 mac_obj->check_move = bnx2x_check_move;
1957 mac_obj->ramrod_cmd =
1958 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
ed5162a0 1959 mac_obj->get_n_elements = bnx2x_get_n_elements;
619c5cb6
VZ
1960
1961 /* Exe Queue */
1962 bnx2x_exe_queue_init(bp,
1963 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1964 qable_obj, bnx2x_validate_vlan_mac,
460a25cd 1965 bnx2x_remove_vlan_mac,
619c5cb6
VZ
1966 bnx2x_optimize_vlan_mac,
1967 bnx2x_execute_vlan_mac,
1968 bnx2x_exeq_get_mac);
1969 }
1970}
1971
1972void bnx2x_init_vlan_obj(struct bnx2x *bp,
1973 struct bnx2x_vlan_mac_obj *vlan_obj,
1974 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1975 dma_addr_t rdata_mapping, int state,
1976 unsigned long *pstate, bnx2x_obj_type type,
1977 struct bnx2x_credit_pool_obj *vlans_pool)
1978{
1979 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1980
1981 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1982 rdata_mapping, state, pstate, type, NULL,
1983 vlans_pool);
1984
1985 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1986 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1987 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1988 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1989
1990 if (CHIP_IS_E1x(bp)) {
1991 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1992 BUG();
1993 } else {
1994 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1995 vlan_obj->check_del = bnx2x_check_vlan_del;
1996 vlan_obj->check_add = bnx2x_check_vlan_add;
1997 vlan_obj->check_move = bnx2x_check_move;
1998 vlan_obj->ramrod_cmd =
1999 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2000
2001 /* Exe Queue */
2002 bnx2x_exe_queue_init(bp,
2003 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2004 qable_obj, bnx2x_validate_vlan_mac,
460a25cd 2005 bnx2x_remove_vlan_mac,
619c5cb6
VZ
2006 bnx2x_optimize_vlan_mac,
2007 bnx2x_execute_vlan_mac,
2008 bnx2x_exeq_get_vlan);
2009 }
2010}
2011
2012void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2013 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2014 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2015 dma_addr_t rdata_mapping, int state,
2016 unsigned long *pstate, bnx2x_obj_type type,
2017 struct bnx2x_credit_pool_obj *macs_pool,
2018 struct bnx2x_credit_pool_obj *vlans_pool)
2019{
2020 union bnx2x_qable_obj *qable_obj =
2021 (union bnx2x_qable_obj *)vlan_mac_obj;
2022
2023 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2024 rdata_mapping, state, pstate, type,
2025 macs_pool, vlans_pool);
2026
2027 /* CAM pool handling */
2028 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2029 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2030 /*
2031 * CAM offset is relevant for 57710 and 57711 chips only which have a
2032 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2033 * will be taken from MACs' pool object only.
2034 */
2035 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2036 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2037
2038 if (CHIP_IS_E1(bp)) {
2039 BNX2X_ERR("Do not support chips others than E2\n");
2040 BUG();
2041 } else if (CHIP_IS_E1H(bp)) {
2042 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2043 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2044 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2045 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2046 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2047
2048 /* Exe Queue */
2049 bnx2x_exe_queue_init(bp,
2050 &vlan_mac_obj->exe_queue, 1, qable_obj,
2051 bnx2x_validate_vlan_mac,
460a25cd 2052 bnx2x_remove_vlan_mac,
619c5cb6
VZ
2053 bnx2x_optimize_vlan_mac,
2054 bnx2x_execute_vlan_mac,
2055 bnx2x_exeq_get_vlan_mac);
2056 } else {
2057 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2058 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2059 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2060 vlan_mac_obj->check_move = bnx2x_check_move;
2061 vlan_mac_obj->ramrod_cmd =
2062 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2063
2064 /* Exe Queue */
2065 bnx2x_exe_queue_init(bp,
2066 &vlan_mac_obj->exe_queue,
2067 CLASSIFY_RULES_COUNT,
2068 qable_obj, bnx2x_validate_vlan_mac,
460a25cd 2069 bnx2x_remove_vlan_mac,
619c5cb6
VZ
2070 bnx2x_optimize_vlan_mac,
2071 bnx2x_execute_vlan_mac,
2072 bnx2x_exeq_get_vlan_mac);
2073 }
2074
2075}
2076
2077/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2078static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2079 struct tstorm_eth_mac_filter_config *mac_filters,
2080 u16 pf_id)
2081{
2082 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2083
2084 u32 addr = BAR_TSTRORM_INTMEM +
2085 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2086
2087 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2088}
2089
2090static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2091 struct bnx2x_rx_mode_ramrod_params *p)
2092{
2093 /* update the bp MAC filter structure */
2094 u32 mask = (1 << p->cl_id);
2095
2096 struct tstorm_eth_mac_filter_config *mac_filters =
2097 (struct tstorm_eth_mac_filter_config *)p->rdata;
2098
2099 /* initial seeting is drop-all */
2100 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2101 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2102 u8 unmatched_unicast = 0;
2103
2104 /* In e1x there we only take into account rx acceot flag since tx switching
2105 * isn't enabled. */
2106 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2107 /* accept matched ucast */
2108 drop_all_ucast = 0;
2109
2110 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2111 /* accept matched mcast */
2112 drop_all_mcast = 0;
2113
2114 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2115 /* accept all mcast */
2116 drop_all_ucast = 0;
2117 accp_all_ucast = 1;
2118 }
2119 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2120 /* accept all mcast */
2121 drop_all_mcast = 0;
2122 accp_all_mcast = 1;
2123 }
2124 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2125 /* accept (all) bcast */
2126 accp_all_bcast = 1;
2127 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2128 /* accept unmatched unicasts */
2129 unmatched_unicast = 1;
2130
2131 mac_filters->ucast_drop_all = drop_all_ucast ?
2132 mac_filters->ucast_drop_all | mask :
2133 mac_filters->ucast_drop_all & ~mask;
2134
2135 mac_filters->mcast_drop_all = drop_all_mcast ?
2136 mac_filters->mcast_drop_all | mask :
2137 mac_filters->mcast_drop_all & ~mask;
2138
2139 mac_filters->ucast_accept_all = accp_all_ucast ?
2140 mac_filters->ucast_accept_all | mask :
2141 mac_filters->ucast_accept_all & ~mask;
2142
2143 mac_filters->mcast_accept_all = accp_all_mcast ?
2144 mac_filters->mcast_accept_all | mask :
2145 mac_filters->mcast_accept_all & ~mask;
2146
2147 mac_filters->bcast_accept_all = accp_all_bcast ?
2148 mac_filters->bcast_accept_all | mask :
2149 mac_filters->bcast_accept_all & ~mask;
2150
2151 mac_filters->unmatched_unicast = unmatched_unicast ?
2152 mac_filters->unmatched_unicast | mask :
2153 mac_filters->unmatched_unicast & ~mask;
2154
2155 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2156 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2157 mac_filters->ucast_drop_all,
2158 mac_filters->mcast_drop_all,
2159 mac_filters->ucast_accept_all,
2160 mac_filters->mcast_accept_all,
2161 mac_filters->bcast_accept_all);
2162
2163 /* write the MAC filter structure*/
2164 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2165
2166 /* The operation is completed */
2167 clear_bit(p->state, p->pstate);
2168 smp_mb__after_clear_bit();
2169
2170 return 0;
2171}
2172
2173/* Setup ramrod data */
2174static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2175 struct eth_classify_header *hdr,
2176 u8 rule_cnt)
2177{
2178 hdr->echo = cid;
2179 hdr->rule_cnt = rule_cnt;
2180}
2181
2182static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2183 unsigned long accept_flags,
2184 struct eth_filter_rules_cmd *cmd,
2185 bool clear_accept_all)
2186{
2187 u16 state;
2188
2189 /* start with 'drop-all' */
2190 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2191 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2192
2193 if (accept_flags) {
2194 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2195 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2196
2197 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2198 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2199
2200 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2201 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2202 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2203 }
2204
2205 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2206 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2207 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2208 }
2209 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2210 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2211
2212 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2213 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2214 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2215 }
2216 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2217 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2218 }
2219
2220 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2221 if (clear_accept_all) {
2222 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2223 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2224 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2225 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2226 }
2227
2228 cmd->state = cpu_to_le16(state);
2229
2230}
2231
2232static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2233 struct bnx2x_rx_mode_ramrod_params *p)
2234{
2235 struct eth_filter_rules_ramrod_data *data = p->rdata;
2236 int rc;
2237 u8 rule_idx = 0;
2238
2239 /* Reset the ramrod data buffer */
2240 memset(data, 0, sizeof(*data));
2241
2242 /* Setup ramrod data */
2243
2244 /* Tx (internal switching) */
2245 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2246 data->rules[rule_idx].client_id = p->cl_id;
2247 data->rules[rule_idx].func_id = p->func_id;
2248
2249 data->rules[rule_idx].cmd_general_data =
2250 ETH_FILTER_RULES_CMD_TX_CMD;
2251
2252 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2253 &(data->rules[rule_idx++]), false);
2254 }
2255
2256 /* Rx */
2257 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2258 data->rules[rule_idx].client_id = p->cl_id;
2259 data->rules[rule_idx].func_id = p->func_id;
2260
2261 data->rules[rule_idx].cmd_general_data =
2262 ETH_FILTER_RULES_CMD_RX_CMD;
2263
2264 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2265 &(data->rules[rule_idx++]), false);
2266 }
2267
2268
2269 /*
2270 * If FCoE Queue configuration has been requested configure the Rx and
2271 * internal switching modes for this queue in separate rules.
2272 *
2273 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2274 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2275 */
2276 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2277 /* Tx (internal switching) */
2278 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2279 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2280 data->rules[rule_idx].func_id = p->func_id;
2281
2282 data->rules[rule_idx].cmd_general_data =
2283 ETH_FILTER_RULES_CMD_TX_CMD;
2284
2285 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2286 &(data->rules[rule_idx++]),
2287 true);
2288 }
2289
2290 /* Rx */
2291 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2292 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2293 data->rules[rule_idx].func_id = p->func_id;
2294
2295 data->rules[rule_idx].cmd_general_data =
2296 ETH_FILTER_RULES_CMD_RX_CMD;
2297
2298 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2299 &(data->rules[rule_idx++]),
2300 true);
2301 }
2302 }
2303
2304 /*
2305 * Set the ramrod header (most importantly - number of rules to
2306 * configure).
2307 */
2308 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2309
2310 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2311 "tx_accept_flags 0x%lx\n",
2312 data->header.rule_cnt, p->rx_accept_flags,
2313 p->tx_accept_flags);
2314
53e51e2f
VZ
2315 /*
2316 * No need for an explicit memory barrier here as long we would
2317 * need to ensure the ordering of writing to the SPQ element
2318 * and updating of the SPQ producer which involves a memory
2319 * read and we will have to put a full memory barrier there
2320 * (inside bnx2x_sp_post()).
2321 */
619c5cb6
VZ
2322
2323 /* Send a ramrod */
2324 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2325 U64_HI(p->rdata_mapping),
2326 U64_LO(p->rdata_mapping),
2327 ETH_CONNECTION_TYPE);
2328 if (rc)
2329 return rc;
2330
2331 /* Ramrod completion is pending */
2332 return 1;
2333}
2334
2335static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2336 struct bnx2x_rx_mode_ramrod_params *p)
2337{
2338 return bnx2x_state_wait(bp, p->state, p->pstate);
2339}
2340
2341static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2342 struct bnx2x_rx_mode_ramrod_params *p)
2343{
2344 /* Do nothing */
2345 return 0;
2346}
2347
2348int bnx2x_config_rx_mode(struct bnx2x *bp,
2349 struct bnx2x_rx_mode_ramrod_params *p)
2350{
2351 int rc;
2352
2353 /* Configure the new classification in the chip */
2354 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2355 if (rc < 0)
2356 return rc;
2357
2358 /* Wait for a ramrod completion if was requested */
2359 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2360 rc = p->rx_mode_obj->wait_comp(bp, p);
2361 if (rc)
2362 return rc;
2363 }
2364
2365 return rc;
2366}
2367
2368void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2369 struct bnx2x_rx_mode_obj *o)
2370{
2371 if (CHIP_IS_E1x(bp)) {
2372 o->wait_comp = bnx2x_empty_rx_mode_wait;
2373 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2374 } else {
2375 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2376 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2377 }
2378}
2379
2380/********************* Multicast verbs: SET, CLEAR ****************************/
2381static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2382{
2383 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2384}
2385
2386struct bnx2x_mcast_mac_elem {
2387 struct list_head link;
2388 u8 mac[ETH_ALEN];
2389 u8 pad[2]; /* For a natural alignment of the following buffer */
2390};
2391
2392struct bnx2x_pending_mcast_cmd {
2393 struct list_head link;
2394 int type; /* BNX2X_MCAST_CMD_X */
2395 union {
2396 struct list_head macs_head;
2397 u32 macs_num; /* Needed for DEL command */
2398 int next_bin; /* Needed for RESTORE flow with aprox match */
2399 } data;
2400
2401 bool done; /* set to true, when the command has been handled,
2402 * practically used in 57712 handling only, where one pending
2403 * command may be handled in a few operations. As long as for
2404 * other chips every operation handling is completed in a
2405 * single ramrod, there is no need to utilize this field.
2406 */
2407};
2408
2409static int bnx2x_mcast_wait(struct bnx2x *bp,
2410 struct bnx2x_mcast_obj *o)
2411{
2412 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2413 o->raw.wait_comp(bp, &o->raw))
2414 return -EBUSY;
2415
2416 return 0;
2417}
2418
2419static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2420 struct bnx2x_mcast_obj *o,
2421 struct bnx2x_mcast_ramrod_params *p,
2422 int cmd)
2423{
2424 int total_sz;
2425 struct bnx2x_pending_mcast_cmd *new_cmd;
2426 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2427 struct bnx2x_mcast_list_elem *pos;
2428 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2429 p->mcast_list_len : 0);
2430
2431 /* If the command is empty ("handle pending commands only"), break */
2432 if (!p->mcast_list_len)
2433 return 0;
2434
2435 total_sz = sizeof(*new_cmd) +
2436 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2437
2438 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2439 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2440
2441 if (!new_cmd)
2442 return -ENOMEM;
2443
2444 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2445 "macs_list_len=%d\n", cmd, macs_list_len);
2446
2447 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2448
2449 new_cmd->type = cmd;
2450 new_cmd->done = false;
2451
2452 switch (cmd) {
2453 case BNX2X_MCAST_CMD_ADD:
2454 cur_mac = (struct bnx2x_mcast_mac_elem *)
2455 ((u8 *)new_cmd + sizeof(*new_cmd));
2456
2457 /* Push the MACs of the current command into the pendig command
2458 * MACs list: FIFO
2459 */
2460 list_for_each_entry(pos, &p->mcast_list, link) {
2461 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2462 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2463 cur_mac++;
2464 }
2465
2466 break;
2467
2468 case BNX2X_MCAST_CMD_DEL:
2469 new_cmd->data.macs_num = p->mcast_list_len;
2470 break;
2471
2472 case BNX2X_MCAST_CMD_RESTORE:
2473 new_cmd->data.next_bin = 0;
2474 break;
2475
2476 default:
2477 BNX2X_ERR("Unknown command: %d\n", cmd);
2478 return -EINVAL;
2479 }
2480
2481 /* Push the new pending command to the tail of the pending list: FIFO */
2482 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2483
2484 o->set_sched(o);
2485
2486 return 1;
2487}
2488
2489/**
2490 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2491 *
2492 * @o:
2493 * @last: index to start looking from (including)
2494 *
2495 * returns the next found (set) bin or a negative value if none is found.
2496 */
2497static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2498{
2499 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2500
2501 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2502 if (o->registry.aprox_match.vec[i])
2503 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2504 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2505 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2506 vec, cur_bit)) {
2507 return cur_bit;
2508 }
2509 }
2510 inner_start = 0;
2511 }
2512
2513 /* None found */
2514 return -1;
2515}
2516
2517/**
2518 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2519 *
2520 * @o:
2521 *
2522 * returns the index of the found bin or -1 if none is found
2523 */
2524static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2525{
2526 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2527
2528 if (cur_bit >= 0)
2529 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2530
2531 return cur_bit;
2532}
2533
2534static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2535{
2536 struct bnx2x_raw_obj *raw = &o->raw;
2537 u8 rx_tx_flag = 0;
2538
2539 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2540 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2541 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2542
2543 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2544 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2545 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2546
2547 return rx_tx_flag;
2548}
2549
2550static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2551 struct bnx2x_mcast_obj *o, int idx,
2552 union bnx2x_mcast_config_data *cfg_data,
2553 int cmd)
2554{
2555 struct bnx2x_raw_obj *r = &o->raw;
2556 struct eth_multicast_rules_ramrod_data *data =
2557 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2558 u8 func_id = r->func_id;
2559 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2560 int bin;
2561
2562 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2563 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2564
2565 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2566
2567 /* Get a bin and update a bins' vector */
2568 switch (cmd) {
2569 case BNX2X_MCAST_CMD_ADD:
2570 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2571 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2572 break;
2573
2574 case BNX2X_MCAST_CMD_DEL:
2575 /* If there were no more bins to clear
2576 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2577 * clear any (0xff) bin.
2578 * See bnx2x_mcast_validate_e2() for explanation when it may
2579 * happen.
2580 */
2581 bin = bnx2x_mcast_clear_first_bin(o);
2582 break;
2583
2584 case BNX2X_MCAST_CMD_RESTORE:
2585 bin = cfg_data->bin;
2586 break;
2587
2588 default:
2589 BNX2X_ERR("Unknown command: %d\n", cmd);
2590 return;
2591 }
2592
2593 DP(BNX2X_MSG_SP, "%s bin %d\n",
2594 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2595 "Setting" : "Clearing"), bin);
2596
2597 data->rules[idx].bin_id = (u8)bin;
2598 data->rules[idx].func_id = func_id;
2599 data->rules[idx].engine_id = o->engine_id;
2600}
2601
2602/**
2603 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2604 *
2605 * @bp: device handle
2606 * @o:
2607 * @start_bin: index in the registry to start from (including)
2608 * @rdata_idx: index in the ramrod data to start from
2609 *
2610 * returns last handled bin index or -1 if all bins have been handled
2611 */
2612static inline int bnx2x_mcast_handle_restore_cmd_e2(
2613 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2614 int *rdata_idx)
2615{
2616 int cur_bin, cnt = *rdata_idx;
2617 union bnx2x_mcast_config_data cfg_data = {0};
2618
2619 /* go through the registry and configure the bins from it */
2620 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2621 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2622
2623 cfg_data.bin = (u8)cur_bin;
2624 o->set_one_rule(bp, o, cnt, &cfg_data,
2625 BNX2X_MCAST_CMD_RESTORE);
2626
2627 cnt++;
2628
2629 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2630
2631 /* Break if we reached the maximum number
2632 * of rules.
2633 */
2634 if (cnt >= o->max_cmd_len)
2635 break;
2636 }
2637
2638 *rdata_idx = cnt;
2639
2640 return cur_bin;
2641}
2642
2643static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2644 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2645 int *line_idx)
2646{
2647 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2648 int cnt = *line_idx;
2649 union bnx2x_mcast_config_data cfg_data = {0};
2650
2651 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2652 link) {
2653
2654 cfg_data.mac = &pmac_pos->mac[0];
2655 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2656
2657 cnt++;
2658
0f9dad10
JP
2659 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2660 pmac_pos->mac);
619c5cb6
VZ
2661
2662 list_del(&pmac_pos->link);
2663
2664 /* Break if we reached the maximum number
2665 * of rules.
2666 */
2667 if (cnt >= o->max_cmd_len)
2668 break;
2669 }
2670
2671 *line_idx = cnt;
2672
2673 /* if no more MACs to configure - we are done */
2674 if (list_empty(&cmd_pos->data.macs_head))
2675 cmd_pos->done = true;
2676}
2677
2678static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2679 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2680 int *line_idx)
2681{
2682 int cnt = *line_idx;
2683
2684 while (cmd_pos->data.macs_num) {
2685 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2686
2687 cnt++;
2688
2689 cmd_pos->data.macs_num--;
2690
2691 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2692 cmd_pos->data.macs_num, cnt);
2693
2694 /* Break if we reached the maximum
2695 * number of rules.
2696 */
2697 if (cnt >= o->max_cmd_len)
2698 break;
2699 }
2700
2701 *line_idx = cnt;
2702
2703 /* If we cleared all bins - we are done */
2704 if (!cmd_pos->data.macs_num)
2705 cmd_pos->done = true;
2706}
2707
2708static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2709 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2710 int *line_idx)
2711{
2712 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2713 line_idx);
2714
2715 if (cmd_pos->data.next_bin < 0)
2716 /* If o->set_restore returned -1 we are done */
2717 cmd_pos->done = true;
2718 else
2719 /* Start from the next bin next time */
2720 cmd_pos->data.next_bin++;
2721}
2722
2723static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2724 struct bnx2x_mcast_ramrod_params *p)
2725{
2726 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2727 int cnt = 0;
2728 struct bnx2x_mcast_obj *o = p->mcast_obj;
2729
2730 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2731 link) {
2732 switch (cmd_pos->type) {
2733 case BNX2X_MCAST_CMD_ADD:
2734 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2735 break;
2736
2737 case BNX2X_MCAST_CMD_DEL:
2738 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2739 break;
2740
2741 case BNX2X_MCAST_CMD_RESTORE:
2742 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2743 &cnt);
2744 break;
2745
2746 default:
2747 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2748 return -EINVAL;
2749 }
2750
2751 /* If the command has been completed - remove it from the list
2752 * and free the memory
2753 */
2754 if (cmd_pos->done) {
2755 list_del(&cmd_pos->link);
2756 kfree(cmd_pos);
2757 }
2758
2759 /* Break if we reached the maximum number of rules */
2760 if (cnt >= o->max_cmd_len)
2761 break;
2762 }
2763
2764 return cnt;
2765}
2766
2767static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2768 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2769 int *line_idx)
2770{
2771 struct bnx2x_mcast_list_elem *mlist_pos;
2772 union bnx2x_mcast_config_data cfg_data = {0};
2773 int cnt = *line_idx;
2774
2775 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2776 cfg_data.mac = mlist_pos->mac;
2777 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2778
2779 cnt++;
2780
0f9dad10
JP
2781 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2782 mlist_pos->mac);
619c5cb6
VZ
2783 }
2784
2785 *line_idx = cnt;
2786}
2787
2788static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2789 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2790 int *line_idx)
2791{
2792 int cnt = *line_idx, i;
2793
2794 for (i = 0; i < p->mcast_list_len; i++) {
2795 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2796
2797 cnt++;
2798
2799 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2800 p->mcast_list_len - i - 1);
2801 }
2802
2803 *line_idx = cnt;
2804}
2805
2806/**
2807 * bnx2x_mcast_handle_current_cmd -
2808 *
2809 * @bp: device handle
2810 * @p:
2811 * @cmd:
2812 * @start_cnt: first line in the ramrod data that may be used
2813 *
2814 * This function is called iff there is enough place for the current command in
2815 * the ramrod data.
2816 * Returns number of lines filled in the ramrod data in total.
2817 */
2818static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2819 struct bnx2x_mcast_ramrod_params *p, int cmd,
2820 int start_cnt)
2821{
2822 struct bnx2x_mcast_obj *o = p->mcast_obj;
2823 int cnt = start_cnt;
2824
2825 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2826
2827 switch (cmd) {
2828 case BNX2X_MCAST_CMD_ADD:
2829 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2830 break;
2831
2832 case BNX2X_MCAST_CMD_DEL:
2833 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2834 break;
2835
2836 case BNX2X_MCAST_CMD_RESTORE:
2837 o->hdl_restore(bp, o, 0, &cnt);
2838 break;
2839
2840 default:
2841 BNX2X_ERR("Unknown command: %d\n", cmd);
2842 return -EINVAL;
2843 }
2844
2845 /* The current command has been handled */
2846 p->mcast_list_len = 0;
2847
2848 return cnt;
2849}
2850
2851static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2852 struct bnx2x_mcast_ramrod_params *p,
2853 int cmd)
2854{
2855 struct bnx2x_mcast_obj *o = p->mcast_obj;
2856 int reg_sz = o->get_registry_size(o);
2857
2858 switch (cmd) {
2859 /* DEL command deletes all currently configured MACs */
2860 case BNX2X_MCAST_CMD_DEL:
2861 o->set_registry_size(o, 0);
2862 /* Don't break */
2863
2864 /* RESTORE command will restore the entire multicast configuration */
2865 case BNX2X_MCAST_CMD_RESTORE:
2866 /* Here we set the approximate amount of work to do, which in
2867 * fact may be only less as some MACs in postponed ADD
2868 * command(s) scheduled before this command may fall into
2869 * the same bin and the actual number of bins set in the
2870 * registry would be less than we estimated here. See
2871 * bnx2x_mcast_set_one_rule_e2() for further details.
2872 */
2873 p->mcast_list_len = reg_sz;
2874 break;
2875
2876 case BNX2X_MCAST_CMD_ADD:
2877 case BNX2X_MCAST_CMD_CONT:
2878 /* Here we assume that all new MACs will fall into new bins.
2879 * However we will correct the real registry size after we
2880 * handle all pending commands.
2881 */
2882 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2883 break;
2884
2885 default:
2886 BNX2X_ERR("Unknown command: %d\n", cmd);
2887 return -EINVAL;
2888
2889 }
2890
2891 /* Increase the total number of MACs pending to be configured */
2892 o->total_pending_num += p->mcast_list_len;
2893
2894 return 0;
2895}
2896
2897static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2898 struct bnx2x_mcast_ramrod_params *p,
2899 int old_num_bins)
2900{
2901 struct bnx2x_mcast_obj *o = p->mcast_obj;
2902
2903 o->set_registry_size(o, old_num_bins);
2904 o->total_pending_num -= p->mcast_list_len;
2905}
2906
2907/**
2908 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2909 *
2910 * @bp: device handle
2911 * @p:
2912 * @len: number of rules to handle
2913 */
2914static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2915 struct bnx2x_mcast_ramrod_params *p,
2916 u8 len)
2917{
2918 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2919 struct eth_multicast_rules_ramrod_data *data =
2920 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2921
2922 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2923 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2924 data->header.rule_cnt = len;
2925}
2926
2927/**
2928 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2929 *
2930 * @bp: device handle
2931 * @o:
2932 *
2933 * Recalculate the actual number of set bins in the registry using Brian
2934 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2935 *
2936 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2937 */
2938static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2939 struct bnx2x_mcast_obj *o)
2940{
2941 int i, cnt = 0;
2942 u64 elem;
2943
2944 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2945 elem = o->registry.aprox_match.vec[i];
2946 for (; elem; cnt++)
2947 elem &= elem - 1;
2948 }
2949
2950 o->set_registry_size(o, cnt);
2951
2952 return 0;
2953}
2954
2955static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2956 struct bnx2x_mcast_ramrod_params *p,
2957 int cmd)
2958{
2959 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2960 struct bnx2x_mcast_obj *o = p->mcast_obj;
2961 struct eth_multicast_rules_ramrod_data *data =
2962 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2963 int cnt = 0, rc;
2964
2965 /* Reset the ramrod data buffer */
2966 memset(data, 0, sizeof(*data));
2967
2968 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2969
2970 /* If there are no more pending commands - clear SCHEDULED state */
2971 if (list_empty(&o->pending_cmds_head))
2972 o->clear_sched(o);
2973
2974 /* The below may be true iff there was enough room in ramrod
2975 * data for all pending commands and for the current
2976 * command. Otherwise the current command would have been added
2977 * to the pending commands and p->mcast_list_len would have been
2978 * zeroed.
2979 */
2980 if (p->mcast_list_len > 0)
2981 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2982
2983 /* We've pulled out some MACs - update the total number of
2984 * outstanding.
2985 */
2986 o->total_pending_num -= cnt;
2987
2988 /* send a ramrod */
2989 WARN_ON(o->total_pending_num < 0);
2990 WARN_ON(cnt > o->max_cmd_len);
2991
2992 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2993
2994 /* Update a registry size if there are no more pending operations.
2995 *
2996 * We don't want to change the value of the registry size if there are
2997 * pending operations because we want it to always be equal to the
2998 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2999 * set bins after the last requested operation in order to properly
3000 * evaluate the size of the next DEL/RESTORE operation.
3001 *
3002 * Note that we update the registry itself during command(s) handling
3003 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3004 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3005 * with a limited amount of update commands (per MAC/bin) and we don't
3006 * know in this scope what the actual state of bins configuration is
3007 * going to be after this ramrod.
3008 */
3009 if (!o->total_pending_num)
3010 bnx2x_mcast_refresh_registry_e2(bp, o);
3011
53e51e2f
VZ
3012 /*
3013 * If CLEAR_ONLY was requested - don't send a ramrod and clear
619c5cb6
VZ
3014 * RAMROD_PENDING status immediately.
3015 */
3016 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3017 raw->clear_pending(raw);
3018 return 0;
3019 } else {
53e51e2f
VZ
3020 /*
3021 * No need for an explicit memory barrier here as long we would
3022 * need to ensure the ordering of writing to the SPQ element
3023 * and updating of the SPQ producer which involves a memory
3024 * read and we will have to put a full memory barrier there
3025 * (inside bnx2x_sp_post()).
3026 */
3027
619c5cb6
VZ
3028 /* Send a ramrod */
3029 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3030 raw->cid, U64_HI(raw->rdata_mapping),
3031 U64_LO(raw->rdata_mapping),
3032 ETH_CONNECTION_TYPE);
3033 if (rc)
3034 return rc;
3035
3036 /* Ramrod completion is pending */
3037 return 1;
3038 }
3039}
3040
3041static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3042 struct bnx2x_mcast_ramrod_params *p,
3043 int cmd)
3044{
3045 /* Mark, that there is a work to do */
3046 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3047 p->mcast_list_len = 1;
3048
3049 return 0;
3050}
3051
3052static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3053 struct bnx2x_mcast_ramrod_params *p,
3054 int old_num_bins)
3055{
3056 /* Do nothing */
3057}
3058
3059#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3060do { \
3061 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3062} while (0)
3063
3064static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3065 struct bnx2x_mcast_obj *o,
3066 struct bnx2x_mcast_ramrod_params *p,
3067 u32 *mc_filter)
3068{
3069 struct bnx2x_mcast_list_elem *mlist_pos;
3070 int bit;
3071
3072 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3073 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3074 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3075
0f9dad10
JP
3076 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3077 mlist_pos->mac, bit);
619c5cb6
VZ
3078
3079 /* bookkeeping... */
3080 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3081 bit);
3082 }
3083}
3084
3085static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3086 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3087 u32 *mc_filter)
3088{
3089 int bit;
3090
3091 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3092 bit >= 0;
3093 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3094 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3095 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3096 }
3097}
3098
3099/* On 57711 we write the multicast MACs' aproximate match
3100 * table by directly into the TSTORM's internal RAM. So we don't
3101 * really need to handle any tricks to make it work.
3102 */
3103static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3104 struct bnx2x_mcast_ramrod_params *p,
3105 int cmd)
3106{
3107 int i;
3108 struct bnx2x_mcast_obj *o = p->mcast_obj;
3109 struct bnx2x_raw_obj *r = &o->raw;
3110
3111 /* If CLEAR_ONLY has been requested - clear the registry
3112 * and clear a pending bit.
3113 */
3114 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3115 u32 mc_filter[MC_HASH_SIZE] = {0};
3116
3117 /* Set the multicast filter bits before writing it into
3118 * the internal memory.
3119 */
3120 switch (cmd) {
3121 case BNX2X_MCAST_CMD_ADD:
3122 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3123 break;
3124
3125 case BNX2X_MCAST_CMD_DEL:
94f05b0f
JP
3126 DP(BNX2X_MSG_SP,
3127 "Invalidating multicast MACs configuration\n");
619c5cb6
VZ
3128
3129 /* clear the registry */
3130 memset(o->registry.aprox_match.vec, 0,
3131 sizeof(o->registry.aprox_match.vec));
3132 break;
3133
3134 case BNX2X_MCAST_CMD_RESTORE:
3135 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3136 break;
3137
3138 default:
3139 BNX2X_ERR("Unknown command: %d\n", cmd);
3140 return -EINVAL;
3141 }
3142
3143 /* Set the mcast filter in the internal memory */
3144 for (i = 0; i < MC_HASH_SIZE; i++)
3145 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3146 } else
3147 /* clear the registry */
3148 memset(o->registry.aprox_match.vec, 0,
3149 sizeof(o->registry.aprox_match.vec));
3150
3151 /* We are done */
3152 r->clear_pending(r);
3153
3154 return 0;
3155}
3156
3157static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3158 struct bnx2x_mcast_ramrod_params *p,
3159 int cmd)
3160{
3161 struct bnx2x_mcast_obj *o = p->mcast_obj;
3162 int reg_sz = o->get_registry_size(o);
3163
3164 switch (cmd) {
3165 /* DEL command deletes all currently configured MACs */
3166 case BNX2X_MCAST_CMD_DEL:
3167 o->set_registry_size(o, 0);
3168 /* Don't break */
3169
3170 /* RESTORE command will restore the entire multicast configuration */
3171 case BNX2X_MCAST_CMD_RESTORE:
3172 p->mcast_list_len = reg_sz;
3173 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3174 cmd, p->mcast_list_len);
3175 break;
3176
3177 case BNX2X_MCAST_CMD_ADD:
3178 case BNX2X_MCAST_CMD_CONT:
3179 /* Multicast MACs on 57710 are configured as unicast MACs and
3180 * there is only a limited number of CAM entries for that
3181 * matter.
3182 */
3183 if (p->mcast_list_len > o->max_cmd_len) {
3184 BNX2X_ERR("Can't configure more than %d multicast MACs"
3185 "on 57710\n", o->max_cmd_len);
3186 return -EINVAL;
3187 }
3188 /* Every configured MAC should be cleared if DEL command is
3189 * called. Only the last ADD command is relevant as long as
3190 * every ADD commands overrides the previous configuration.
3191 */
3192 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3193 if (p->mcast_list_len > 0)
3194 o->set_registry_size(o, p->mcast_list_len);
3195
3196 break;
3197
3198 default:
3199 BNX2X_ERR("Unknown command: %d\n", cmd);
3200 return -EINVAL;
3201
3202 }
3203
3204 /* We want to ensure that commands are executed one by one for 57710.
3205 * Therefore each none-empty command will consume o->max_cmd_len.
3206 */
3207 if (p->mcast_list_len)
3208 o->total_pending_num += o->max_cmd_len;
3209
3210 return 0;
3211}
3212
3213static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3214 struct bnx2x_mcast_ramrod_params *p,
3215 int old_num_macs)
3216{
3217 struct bnx2x_mcast_obj *o = p->mcast_obj;
3218
3219 o->set_registry_size(o, old_num_macs);
3220
3221 /* If current command hasn't been handled yet and we are
3222 * here means that it's meant to be dropped and we have to
3223 * update the number of outstandling MACs accordingly.
3224 */
3225 if (p->mcast_list_len)
3226 o->total_pending_num -= o->max_cmd_len;
3227}
3228
3229static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3230 struct bnx2x_mcast_obj *o, int idx,
3231 union bnx2x_mcast_config_data *cfg_data,
3232 int cmd)
3233{
3234 struct bnx2x_raw_obj *r = &o->raw;
3235 struct mac_configuration_cmd *data =
3236 (struct mac_configuration_cmd *)(r->rdata);
3237
3238 /* copy mac */
3239 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3240 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3241 &data->config_table[idx].middle_mac_addr,
3242 &data->config_table[idx].lsb_mac_addr,
3243 cfg_data->mac);
3244
3245 data->config_table[idx].vlan_id = 0;
3246 data->config_table[idx].pf_id = r->func_id;
3247 data->config_table[idx].clients_bit_vector =
3248 cpu_to_le32(1 << r->cl_id);
3249
3250 SET_FLAG(data->config_table[idx].flags,
3251 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3252 T_ETH_MAC_COMMAND_SET);
3253 }
3254}
3255
3256/**
3257 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3258 *
3259 * @bp: device handle
3260 * @p:
3261 * @len: number of rules to handle
3262 */
3263static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3264 struct bnx2x_mcast_ramrod_params *p,
3265 u8 len)
3266{
3267 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3268 struct mac_configuration_cmd *data =
3269 (struct mac_configuration_cmd *)(r->rdata);
3270
3271 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3272 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3273 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3274
3275 data->hdr.offset = offset;
3276 data->hdr.client_id = 0xff;
3277 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3278 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3279 data->hdr.length = len;
3280}
3281
3282/**
3283 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3284 *
3285 * @bp: device handle
3286 * @o:
3287 * @start_idx: index in the registry to start from
3288 * @rdata_idx: index in the ramrod data to start from
3289 *
3290 * restore command for 57710 is like all other commands - always a stand alone
3291 * command - start_idx and rdata_idx will always be 0. This function will always
3292 * succeed.
3293 * returns -1 to comply with 57712 variant.
3294 */
3295static inline int bnx2x_mcast_handle_restore_cmd_e1(
3296 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3297 int *rdata_idx)
3298{
3299 struct bnx2x_mcast_mac_elem *elem;
3300 int i = 0;
3301 union bnx2x_mcast_config_data cfg_data = {0};
3302
3303 /* go through the registry and configure the MACs from it. */
3304 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3305 cfg_data.mac = &elem->mac[0];
3306 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3307
3308 i++;
3309
0f9dad10
JP
3310 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3311 cfg_data.mac);
619c5cb6
VZ
3312 }
3313
3314 *rdata_idx = i;
3315
3316 return -1;
3317}
3318
3319
3320static inline int bnx2x_mcast_handle_pending_cmds_e1(
3321 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3322{
3323 struct bnx2x_pending_mcast_cmd *cmd_pos;
3324 struct bnx2x_mcast_mac_elem *pmac_pos;
3325 struct bnx2x_mcast_obj *o = p->mcast_obj;
3326 union bnx2x_mcast_config_data cfg_data = {0};
3327 int cnt = 0;
3328
3329
3330 /* If nothing to be done - return */
3331 if (list_empty(&o->pending_cmds_head))
3332 return 0;
3333
3334 /* Handle the first command */
3335 cmd_pos = list_first_entry(&o->pending_cmds_head,
3336 struct bnx2x_pending_mcast_cmd, link);
3337
3338 switch (cmd_pos->type) {
3339 case BNX2X_MCAST_CMD_ADD:
3340 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3341 cfg_data.mac = &pmac_pos->mac[0];
3342 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3343
3344 cnt++;
3345
0f9dad10
JP
3346 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3347 pmac_pos->mac);
619c5cb6
VZ
3348 }
3349 break;
3350
3351 case BNX2X_MCAST_CMD_DEL:
3352 cnt = cmd_pos->data.macs_num;
3353 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3354 break;
3355
3356 case BNX2X_MCAST_CMD_RESTORE:
3357 o->hdl_restore(bp, o, 0, &cnt);
3358 break;
3359
3360 default:
3361 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3362 return -EINVAL;
3363 }
3364
3365 list_del(&cmd_pos->link);
3366 kfree(cmd_pos);
3367
3368 return cnt;
3369}
3370
3371/**
3372 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3373 *
3374 * @fw_hi:
3375 * @fw_mid:
3376 * @fw_lo:
3377 * @mac:
3378 */
3379static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3380 __le16 *fw_lo, u8 *mac)
3381{
3382 mac[1] = ((u8 *)fw_hi)[0];
3383 mac[0] = ((u8 *)fw_hi)[1];
3384 mac[3] = ((u8 *)fw_mid)[0];
3385 mac[2] = ((u8 *)fw_mid)[1];
3386 mac[5] = ((u8 *)fw_lo)[0];
3387 mac[4] = ((u8 *)fw_lo)[1];
3388}
3389
3390/**
3391 * bnx2x_mcast_refresh_registry_e1 -
3392 *
3393 * @bp: device handle
3394 * @cnt:
3395 *
3396 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3397 * and update the registry correspondingly: if ADD - allocate a memory and add
3398 * the entries to the registry (list), if DELETE - clear the registry and free
3399 * the memory.
3400 */
3401static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3402 struct bnx2x_mcast_obj *o)
3403{
3404 struct bnx2x_raw_obj *raw = &o->raw;
3405 struct bnx2x_mcast_mac_elem *elem;
3406 struct mac_configuration_cmd *data =
3407 (struct mac_configuration_cmd *)(raw->rdata);
3408
3409 /* If first entry contains a SET bit - the command was ADD,
3410 * otherwise - DEL_ALL
3411 */
3412 if (GET_FLAG(data->config_table[0].flags,
3413 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3414 int i, len = data->hdr.length;
3415
3416 /* Break if it was a RESTORE command */
3417 if (!list_empty(&o->registry.exact_match.macs))
3418 return 0;
3419
01e23742 3420 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
619c5cb6
VZ
3421 if (!elem) {
3422 BNX2X_ERR("Failed to allocate registry memory\n");
3423 return -ENOMEM;
3424 }
3425
3426 for (i = 0; i < len; i++, elem++) {
3427 bnx2x_get_fw_mac_addr(
3428 &data->config_table[i].msb_mac_addr,
3429 &data->config_table[i].middle_mac_addr,
3430 &data->config_table[i].lsb_mac_addr,
3431 elem->mac);
0f9dad10
JP
3432 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3433 elem->mac);
619c5cb6
VZ
3434 list_add_tail(&elem->link,
3435 &o->registry.exact_match.macs);
3436 }
3437 } else {
3438 elem = list_first_entry(&o->registry.exact_match.macs,
3439 struct bnx2x_mcast_mac_elem, link);
3440 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3441 kfree(elem);
3442 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3443 }
3444
3445 return 0;
3446}
3447
3448static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3449 struct bnx2x_mcast_ramrod_params *p,
3450 int cmd)
3451{
3452 struct bnx2x_mcast_obj *o = p->mcast_obj;
3453 struct bnx2x_raw_obj *raw = &o->raw;
3454 struct mac_configuration_cmd *data =
3455 (struct mac_configuration_cmd *)(raw->rdata);
3456 int cnt = 0, i, rc;
3457
3458 /* Reset the ramrod data buffer */
3459 memset(data, 0, sizeof(*data));
3460
3461 /* First set all entries as invalid */
3462 for (i = 0; i < o->max_cmd_len ; i++)
3463 SET_FLAG(data->config_table[i].flags,
3464 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3465 T_ETH_MAC_COMMAND_INVALIDATE);
3466
3467 /* Handle pending commands first */
3468 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3469
3470 /* If there are no more pending commands - clear SCHEDULED state */
3471 if (list_empty(&o->pending_cmds_head))
3472 o->clear_sched(o);
3473
3474 /* The below may be true iff there were no pending commands */
3475 if (!cnt)
3476 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3477
3478 /* For 57710 every command has o->max_cmd_len length to ensure that
3479 * commands are done one at a time.
3480 */
3481 o->total_pending_num -= o->max_cmd_len;
3482
3483 /* send a ramrod */
3484
3485 WARN_ON(cnt > o->max_cmd_len);
3486
3487 /* Set ramrod header (in particular, a number of entries to update) */
3488 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3489
3490 /* update a registry: we need the registry contents to be always up
3491 * to date in order to be able to execute a RESTORE opcode. Here
3492 * we use the fact that for 57710 we sent one command at a time
3493 * hence we may take the registry update out of the command handling
3494 * and do it in a simpler way here.
3495 */
3496 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3497 if (rc)
3498 return rc;
3499
53e51e2f
VZ
3500 /*
3501 * If CLEAR_ONLY was requested - don't send a ramrod and clear
619c5cb6
VZ
3502 * RAMROD_PENDING status immediately.
3503 */
3504 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3505 raw->clear_pending(raw);
3506 return 0;
3507 } else {
53e51e2f
VZ
3508 /*
3509 * No need for an explicit memory barrier here as long we would
3510 * need to ensure the ordering of writing to the SPQ element
3511 * and updating of the SPQ producer which involves a memory
3512 * read and we will have to put a full memory barrier there
3513 * (inside bnx2x_sp_post()).
3514 */
3515
619c5cb6
VZ
3516 /* Send a ramrod */
3517 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3518 U64_HI(raw->rdata_mapping),
3519 U64_LO(raw->rdata_mapping),
3520 ETH_CONNECTION_TYPE);
3521 if (rc)
3522 return rc;
3523
3524 /* Ramrod completion is pending */
3525 return 1;
3526 }
3527
3528}
3529
3530static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3531{
3532 return o->registry.exact_match.num_macs_set;
3533}
3534
3535static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3536{
3537 return o->registry.aprox_match.num_bins_set;
3538}
3539
3540static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3541 int n)
3542{
3543 o->registry.exact_match.num_macs_set = n;
3544}
3545
3546static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3547 int n)
3548{
3549 o->registry.aprox_match.num_bins_set = n;
3550}
3551
3552int bnx2x_config_mcast(struct bnx2x *bp,
3553 struct bnx2x_mcast_ramrod_params *p,
3554 int cmd)
3555{
3556 struct bnx2x_mcast_obj *o = p->mcast_obj;
3557 struct bnx2x_raw_obj *r = &o->raw;
3558 int rc = 0, old_reg_size;
3559
3560 /* This is needed to recover number of currently configured mcast macs
3561 * in case of failure.
3562 */
3563 old_reg_size = o->get_registry_size(o);
3564
3565 /* Do some calculations and checks */
3566 rc = o->validate(bp, p, cmd);
3567 if (rc)
3568 return rc;
3569
3570 /* Return if there is no work to do */
3571 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3572 return 0;
3573
3574 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3575 "o->max_cmd_len=%d\n", o->total_pending_num,
3576 p->mcast_list_len, o->max_cmd_len);
3577
3578 /* Enqueue the current command to the pending list if we can't complete
3579 * it in the current iteration
3580 */
3581 if (r->check_pending(r) ||
3582 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3583 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3584 if (rc < 0)
3585 goto error_exit1;
3586
3587 /* As long as the current command is in a command list we
3588 * don't need to handle it separately.
3589 */
3590 p->mcast_list_len = 0;
3591 }
3592
3593 if (!r->check_pending(r)) {
3594
3595 /* Set 'pending' state */
3596 r->set_pending(r);
3597
3598 /* Configure the new classification in the chip */
3599 rc = o->config_mcast(bp, p, cmd);
3600 if (rc < 0)
3601 goto error_exit2;
3602
3603 /* Wait for a ramrod completion if was requested */
3604 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3605 rc = o->wait_comp(bp, o);
3606 }
3607
3608 return rc;
3609
3610error_exit2:
3611 r->clear_pending(r);
3612
3613error_exit1:
3614 o->revert(bp, p, old_reg_size);
3615
3616 return rc;
3617}
3618
3619static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3620{
3621 smp_mb__before_clear_bit();
3622 clear_bit(o->sched_state, o->raw.pstate);
3623 smp_mb__after_clear_bit();
3624}
3625
3626static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3627{
3628 smp_mb__before_clear_bit();
3629 set_bit(o->sched_state, o->raw.pstate);
3630 smp_mb__after_clear_bit();
3631}
3632
3633static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3634{
3635 return !!test_bit(o->sched_state, o->raw.pstate);
3636}
3637
3638static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3639{
3640 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3641}
3642
3643void bnx2x_init_mcast_obj(struct bnx2x *bp,
3644 struct bnx2x_mcast_obj *mcast_obj,
3645 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3646 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3647 int state, unsigned long *pstate, bnx2x_obj_type type)
3648{
3649 memset(mcast_obj, 0, sizeof(*mcast_obj));
3650
3651 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3652 rdata, rdata_mapping, state, pstate, type);
3653
3654 mcast_obj->engine_id = engine_id;
3655
3656 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3657
3658 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3659 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3660 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3661 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3662
3663 if (CHIP_IS_E1(bp)) {
3664 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3665 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3666 mcast_obj->hdl_restore =
3667 bnx2x_mcast_handle_restore_cmd_e1;
3668 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3669
3670 if (CHIP_REV_IS_SLOW(bp))
3671 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3672 else
3673 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3674
3675 mcast_obj->wait_comp = bnx2x_mcast_wait;
3676 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3677 mcast_obj->validate = bnx2x_mcast_validate_e1;
3678 mcast_obj->revert = bnx2x_mcast_revert_e1;
3679 mcast_obj->get_registry_size =
3680 bnx2x_mcast_get_registry_size_exact;
3681 mcast_obj->set_registry_size =
3682 bnx2x_mcast_set_registry_size_exact;
3683
3684 /* 57710 is the only chip that uses the exact match for mcast
3685 * at the moment.
3686 */
3687 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3688
3689 } else if (CHIP_IS_E1H(bp)) {
3690 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3691 mcast_obj->enqueue_cmd = NULL;
3692 mcast_obj->hdl_restore = NULL;
3693 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3694
3695 /* 57711 doesn't send a ramrod, so it has unlimited credit
3696 * for one command.
3697 */
3698 mcast_obj->max_cmd_len = -1;
3699 mcast_obj->wait_comp = bnx2x_mcast_wait;
3700 mcast_obj->set_one_rule = NULL;
3701 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3702 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3703 mcast_obj->get_registry_size =
3704 bnx2x_mcast_get_registry_size_aprox;
3705 mcast_obj->set_registry_size =
3706 bnx2x_mcast_set_registry_size_aprox;
3707 } else {
3708 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3709 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3710 mcast_obj->hdl_restore =
3711 bnx2x_mcast_handle_restore_cmd_e2;
3712 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3713 /* TODO: There should be a proper HSI define for this number!!!
3714 */
3715 mcast_obj->max_cmd_len = 16;
3716 mcast_obj->wait_comp = bnx2x_mcast_wait;
3717 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3718 mcast_obj->validate = bnx2x_mcast_validate_e2;
3719 mcast_obj->revert = bnx2x_mcast_revert_e2;
3720 mcast_obj->get_registry_size =
3721 bnx2x_mcast_get_registry_size_aprox;
3722 mcast_obj->set_registry_size =
3723 bnx2x_mcast_set_registry_size_aprox;
3724 }
3725}
3726
3727/*************************** Credit handling **********************************/
3728
3729/**
3730 * atomic_add_ifless - add if the result is less than a given value.
3731 *
3732 * @v: pointer of type atomic_t
3733 * @a: the amount to add to v...
3734 * @u: ...if (v + a) is less than u.
3735 *
3736 * returns true if (v + a) was less than u, and false otherwise.
3737 *
3738 */
3739static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3740{
3741 int c, old;
3742
3743 c = atomic_read(v);
3744 for (;;) {
3745 if (unlikely(c + a >= u))
3746 return false;
3747
3748 old = atomic_cmpxchg((v), c, c + a);
3749 if (likely(old == c))
3750 break;
3751 c = old;
3752 }
3753
3754 return true;
3755}
3756
3757/**
3758 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3759 *
3760 * @v: pointer of type atomic_t
3761 * @a: the amount to dec from v...
3762 * @u: ...if (v - a) is more or equal than u.
3763 *
3764 * returns true if (v - a) was more or equal than u, and false
3765 * otherwise.
3766 */
3767static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3768{
3769 int c, old;
3770
3771 c = atomic_read(v);
3772 for (;;) {
3773 if (unlikely(c - a < u))
3774 return false;
3775
3776 old = atomic_cmpxchg((v), c, c - a);
3777 if (likely(old == c))
3778 break;
3779 c = old;
3780 }
3781
3782 return true;
3783}
3784
3785static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3786{
3787 bool rc;
3788
3789 smp_mb();
3790 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3791 smp_mb();
3792
3793 return rc;
3794}
3795
3796static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3797{
3798 bool rc;
3799
3800 smp_mb();
3801
3802 /* Don't let to refill if credit + cnt > pool_sz */
3803 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3804
3805 smp_mb();
3806
3807 return rc;
3808}
3809
3810static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3811{
3812 int cur_credit;
3813
3814 smp_mb();
3815 cur_credit = atomic_read(&o->credit);
3816
3817 return cur_credit;
3818}
3819
3820static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3821 int cnt)
3822{
3823 return true;
3824}
3825
3826
3827static bool bnx2x_credit_pool_get_entry(
3828 struct bnx2x_credit_pool_obj *o,
3829 int *offset)
3830{
3831 int idx, vec, i;
3832
3833 *offset = -1;
3834
3835 /* Find "internal cam-offset" then add to base for this object... */
3836 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3837
3838 /* Skip the current vector if there are no free entries in it */
3839 if (!o->pool_mirror[vec])
3840 continue;
3841
3842 /* If we've got here we are going to find a free entry */
3843 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3844 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3845
3846 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3847 /* Got one!! */
3848 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3849 *offset = o->base_pool_offset + idx;
3850 return true;
3851 }
3852 }
3853
3854 return false;
3855}
3856
3857static bool bnx2x_credit_pool_put_entry(
3858 struct bnx2x_credit_pool_obj *o,
3859 int offset)
3860{
3861 if (offset < o->base_pool_offset)
3862 return false;
3863
3864 offset -= o->base_pool_offset;
3865
3866 if (offset >= o->pool_sz)
3867 return false;
3868
3869 /* Return the entry to the pool */
3870 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3871
3872 return true;
3873}
3874
3875static bool bnx2x_credit_pool_put_entry_always_true(
3876 struct bnx2x_credit_pool_obj *o,
3877 int offset)
3878{
3879 return true;
3880}
3881
3882static bool bnx2x_credit_pool_get_entry_always_true(
3883 struct bnx2x_credit_pool_obj *o,
3884 int *offset)
3885{
3886 *offset = -1;
3887 return true;
3888}
3889/**
3890 * bnx2x_init_credit_pool - initialize credit pool internals.
3891 *
3892 * @p:
3893 * @base: Base entry in the CAM to use.
3894 * @credit: pool size.
3895 *
3896 * If base is negative no CAM entries handling will be performed.
3897 * If credit is negative pool operations will always succeed (unlimited pool).
3898 *
3899 */
3900static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3901 int base, int credit)
3902{
3903 /* Zero the object first */
3904 memset(p, 0, sizeof(*p));
3905
3906 /* Set the table to all 1s */
3907 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3908
3909 /* Init a pool as full */
3910 atomic_set(&p->credit, credit);
3911
3912 /* The total poll size */
3913 p->pool_sz = credit;
3914
3915 p->base_pool_offset = base;
3916
3917 /* Commit the change */
3918 smp_mb();
3919
3920 p->check = bnx2x_credit_pool_check;
3921
3922 /* if pool credit is negative - disable the checks */
3923 if (credit >= 0) {
3924 p->put = bnx2x_credit_pool_put;
3925 p->get = bnx2x_credit_pool_get;
3926 p->put_entry = bnx2x_credit_pool_put_entry;
3927 p->get_entry = bnx2x_credit_pool_get_entry;
3928 } else {
3929 p->put = bnx2x_credit_pool_always_true;
3930 p->get = bnx2x_credit_pool_always_true;
3931 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3932 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3933 }
3934
3935 /* If base is negative - disable entries handling */
3936 if (base < 0) {
3937 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3938 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3939 }
3940}
3941
3942void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3943 struct bnx2x_credit_pool_obj *p, u8 func_id,
3944 u8 func_num)
3945{
3946/* TODO: this will be defined in consts as well... */
3947#define BNX2X_CAM_SIZE_EMUL 5
3948
3949 int cam_sz;
3950
3951 if (CHIP_IS_E1(bp)) {
3952 /* In E1, Multicast is saved in cam... */
3953 if (!CHIP_REV_IS_SLOW(bp))
3954 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3955 else
3956 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3957
3958 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3959
3960 } else if (CHIP_IS_E1H(bp)) {
3961 /* CAM credit is equaly divided between all active functions
3962 * on the PORT!.
3963 */
3964 if ((func_num > 0)) {
3965 if (!CHIP_REV_IS_SLOW(bp))
3966 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3967 else
3968 cam_sz = BNX2X_CAM_SIZE_EMUL;
3969 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3970 } else {
3971 /* this should never happen! Block MAC operations. */
3972 bnx2x_init_credit_pool(p, 0, 0);
3973 }
3974
3975 } else {
3976
3977 /*
3978 * CAM credit is equaly divided between all active functions
3979 * on the PATH.
3980 */
3981 if ((func_num > 0)) {
3982 if (!CHIP_REV_IS_SLOW(bp))
3983 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3984 else
3985 cam_sz = BNX2X_CAM_SIZE_EMUL;
3986
3987 /*
3988 * No need for CAM entries handling for 57712 and
3989 * newer.
3990 */
3991 bnx2x_init_credit_pool(p, -1, cam_sz);
3992 } else {
3993 /* this should never happen! Block MAC operations. */
3994 bnx2x_init_credit_pool(p, 0, 0);
3995 }
3996
3997 }
3998}
3999
4000void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4001 struct bnx2x_credit_pool_obj *p,
4002 u8 func_id,
4003 u8 func_num)
4004{
4005 if (CHIP_IS_E1x(bp)) {
4006 /*
4007 * There is no VLAN credit in HW on 57710 and 57711 only
4008 * MAC / MAC-VLAN can be set
4009 */
4010 bnx2x_init_credit_pool(p, 0, -1);
4011 } else {
4012 /*
4013 * CAM credit is equaly divided between all active functions
4014 * on the PATH.
4015 */
4016 if (func_num > 0) {
4017 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4018 bnx2x_init_credit_pool(p, func_id * credit, credit);
4019 } else
4020 /* this should never happen! Block VLAN operations. */
4021 bnx2x_init_credit_pool(p, 0, 0);
4022 }
4023}
4024
4025/****************** RSS Configuration ******************/
4026/**
4027 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4028 *
4029 * @bp: driver hanlde
4030 * @p: pointer to rss configuration
4031 *
4032 * Prints it when NETIF_MSG_IFUP debug level is configured.
4033 */
4034static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4035 struct bnx2x_config_rss_params *p)
4036{
042181f5
VZ
4037 int i;
4038
619c5cb6
VZ
4039 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4040 DP(BNX2X_MSG_SP, "0x0000: ");
4041 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4042 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4043
4044 /* Print 4 bytes in a line */
4045 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4046 (((i + 1) & 0x3) == 0)) {
4047 DP_CONT(BNX2X_MSG_SP, "\n");
4048 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4049 }
4050 }
4051
4052 DP_CONT(BNX2X_MSG_SP, "\n");
4053}
4054
4055/**
4056 * bnx2x_setup_rss - configure RSS
4057 *
4058 * @bp: device handle
4059 * @p: rss configuration
4060 *
4061 * sends on UPDATE ramrod for that matter.
4062 */
4063static int bnx2x_setup_rss(struct bnx2x *bp,
4064 struct bnx2x_config_rss_params *p)
4065{
4066 struct bnx2x_rss_config_obj *o = p->rss_obj;
4067 struct bnx2x_raw_obj *r = &o->raw;
4068 struct eth_rss_update_ramrod_data *data =
4069 (struct eth_rss_update_ramrod_data *)(r->rdata);
4070 u8 rss_mode = 0;
4071 int rc;
4072
4073 memset(data, 0, sizeof(*data));
4074
4075 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4076
4077 /* Set an echo field */
4078 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4079 (r->state << BNX2X_SWCID_SHIFT);
4080
4081 /* RSS mode */
4082 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4083 rss_mode = ETH_RSS_MODE_DISABLED;
4084 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4085 rss_mode = ETH_RSS_MODE_REGULAR;
4086 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4087 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4088 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4089 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4090 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4091 rss_mode = ETH_RSS_MODE_IP_DSCP;
4092
4093 data->rss_mode = rss_mode;
4094
4095 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4096
4097 /* RSS capabilities */
4098 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4099 data->capabilities |=
4100 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4101
4102 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4103 data->capabilities |=
4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4105
4106 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4109
4110 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4113
4114 /* Hashing mask */
4115 data->rss_result_mask = p->rss_result_mask;
4116
4117 /* RSS engine ID */
4118 data->rss_engine_id = o->engine_id;
4119
4120 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4121
4122 /* Indirection table */
4123 memcpy(data->indirection_table, p->ind_table,
4124 T_ETH_INDIRECTION_TABLE_SIZE);
4125
4126 /* Remember the last configuration */
4127 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4128
4129 /* Print the indirection table */
4130 if (netif_msg_ifup(bp))
4131 bnx2x_debug_print_ind_table(bp, p);
4132
4133 /* RSS keys */
4134 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4135 memcpy(&data->rss_key[0], &p->rss_key[0],
4136 sizeof(data->rss_key));
4137 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4138 }
4139
53e51e2f
VZ
4140 /*
4141 * No need for an explicit memory barrier here as long we would
4142 * need to ensure the ordering of writing to the SPQ element
4143 * and updating of the SPQ producer which involves a memory
4144 * read and we will have to put a full memory barrier there
4145 * (inside bnx2x_sp_post()).
4146 */
619c5cb6
VZ
4147
4148 /* Send a ramrod */
4149 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4150 U64_HI(r->rdata_mapping),
4151 U64_LO(r->rdata_mapping),
4152 ETH_CONNECTION_TYPE);
4153
4154 if (rc < 0)
4155 return rc;
4156
4157 return 1;
4158}
4159
4160void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4161 u8 *ind_table)
4162{
4163 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4164}
4165
4166int bnx2x_config_rss(struct bnx2x *bp,
4167 struct bnx2x_config_rss_params *p)
4168{
4169 int rc;
4170 struct bnx2x_rss_config_obj *o = p->rss_obj;
4171 struct bnx2x_raw_obj *r = &o->raw;
4172
4173 /* Do nothing if only driver cleanup was requested */
4174 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4175 return 0;
4176
4177 r->set_pending(r);
4178
4179 rc = o->config_rss(bp, p);
4180 if (rc < 0) {
4181 r->clear_pending(r);
4182 return rc;
4183 }
4184
4185 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4186 rc = r->wait_comp(bp, r);
4187
4188 return rc;
4189}
4190
4191
4192void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4193 struct bnx2x_rss_config_obj *rss_obj,
4194 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4195 void *rdata, dma_addr_t rdata_mapping,
4196 int state, unsigned long *pstate,
4197 bnx2x_obj_type type)
4198{
4199 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4200 rdata_mapping, state, pstate, type);
4201
4202 rss_obj->engine_id = engine_id;
4203 rss_obj->config_rss = bnx2x_setup_rss;
4204}
4205
4206/********************** Queue state object ***********************************/
4207
4208/**
4209 * bnx2x_queue_state_change - perform Queue state change transition
4210 *
4211 * @bp: device handle
4212 * @params: parameters to perform the transition
4213 *
4214 * returns 0 in case of successfully completed transition, negative error
4215 * code in case of failure, positive (EBUSY) value if there is a completion
4216 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4217 * not set in params->ramrod_flags for asynchronous commands).
4218 *
4219 */
4220int bnx2x_queue_state_change(struct bnx2x *bp,
4221 struct bnx2x_queue_state_params *params)
4222{
4223 struct bnx2x_queue_sp_obj *o = params->q_obj;
4224 int rc, pending_bit;
4225 unsigned long *pending = &o->pending;
4226
4227 /* Check that the requested transition is legal */
4228 if (o->check_transition(bp, o, params))
4229 return -EINVAL;
4230
4231 /* Set "pending" bit */
4232 pending_bit = o->set_pending(o, params);
4233
4234 /* Don't send a command if only driver cleanup was requested */
4235 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4236 o->complete_cmd(bp, o, pending_bit);
4237 else {
4238 /* Send a ramrod */
4239 rc = o->send_cmd(bp, params);
4240 if (rc) {
4241 o->next_state = BNX2X_Q_STATE_MAX;
4242 clear_bit(pending_bit, pending);
4243 smp_mb__after_clear_bit();
4244 return rc;
4245 }
4246
4247 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4248 rc = o->wait_comp(bp, o, pending_bit);
4249 if (rc)
4250 return rc;
4251
4252 return 0;
4253 }
4254 }
4255
4256 return !!test_bit(pending_bit, pending);
4257}
4258
4259
4260static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4261 struct bnx2x_queue_state_params *params)
4262{
4263 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4264
4265 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4266 * UPDATE command.
4267 */
4268 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4269 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4270 bit = BNX2X_Q_CMD_UPDATE;
4271 else
4272 bit = cmd;
4273
4274 set_bit(bit, &obj->pending);
4275 return bit;
4276}
4277
4278static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4279 struct bnx2x_queue_sp_obj *o,
4280 enum bnx2x_queue_cmd cmd)
4281{
4282 return bnx2x_state_wait(bp, cmd, &o->pending);
4283}
4284
4285/**
4286 * bnx2x_queue_comp_cmd - complete the state change command.
4287 *
4288 * @bp: device handle
4289 * @o:
4290 * @cmd:
4291 *
4292 * Checks that the arrived completion is expected.
4293 */
4294static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4295 struct bnx2x_queue_sp_obj *o,
4296 enum bnx2x_queue_cmd cmd)
4297{
4298 unsigned long cur_pending = o->pending;
4299
4300 if (!test_and_clear_bit(cmd, &cur_pending)) {
4301 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
6383c0b3
AE
4302 "pending 0x%lx, next_state %d\n", cmd,
4303 o->cids[BNX2X_PRIMARY_CID_INDEX],
619c5cb6
VZ
4304 o->state, cur_pending, o->next_state);
4305 return -EINVAL;
4306 }
4307
6383c0b3
AE
4308 if (o->next_tx_only >= o->max_cos)
4309 /* >= becuase tx only must always be smaller than cos since the
4310 * primary connection suports COS 0
4311 */
4312 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4313 o->next_tx_only, o->max_cos);
4314
619c5cb6 4315 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
6383c0b3
AE
4316 "setting state to %d\n", cmd,
4317 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4318
4319 if (o->next_tx_only) /* print num tx-only if any exist */
94f05b0f 4320 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
6383c0b3 4321 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
619c5cb6
VZ
4322
4323 o->state = o->next_state;
6383c0b3 4324 o->num_tx_only = o->next_tx_only;
619c5cb6
VZ
4325 o->next_state = BNX2X_Q_STATE_MAX;
4326
4327 /* It's important that o->state and o->next_state are
4328 * updated before o->pending.
4329 */
4330 wmb();
4331
4332 clear_bit(cmd, &o->pending);
4333 smp_mb__after_clear_bit();
4334
4335 return 0;
4336}
4337
4338static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4339 struct bnx2x_queue_state_params *cmd_params,
4340 struct client_init_ramrod_data *data)
4341{
4342 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4343
4344 /* Rx data */
4345
4346 /* IPv6 TPA supported for E2 and above only */
f5219d8e 4347 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
619c5cb6
VZ
4348 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4349}
4350
6383c0b3
AE
4351static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4352 struct bnx2x_queue_sp_obj *o,
4353 struct bnx2x_general_setup_params *params,
4354 struct client_init_general_data *gen_data,
4355 unsigned long *flags)
4356{
4357 gen_data->client_id = o->cl_id;
4358
4359 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4360 gen_data->statistics_counter_id =
4361 params->stat_id;
4362 gen_data->statistics_en_flg = 1;
4363 gen_data->statistics_zero_flg =
4364 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
619c5cb6 4365 } else
6383c0b3 4366 gen_data->statistics_counter_id =
619c5cb6
VZ
4367 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4368
6383c0b3
AE
4369 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4370 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4371 gen_data->sp_client_id = params->spcl_id;
4372 gen_data->mtu = cpu_to_le16(params->mtu);
4373 gen_data->func_id = o->func_id;
619c5cb6
VZ
4374
4375
6383c0b3 4376 gen_data->cos = params->cos;
619c5cb6 4377
6383c0b3
AE
4378 gen_data->traffic_type =
4379 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
619c5cb6
VZ
4380 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4381
94f05b0f 4382 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
6383c0b3
AE
4383 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4384}
4385
4386static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4387 struct bnx2x_txq_setup_params *params,
4388 struct client_init_tx_data *tx_data,
4389 unsigned long *flags)
4390{
4391 tx_data->enforce_security_flg =
4392 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4393 tx_data->default_vlan =
4394 cpu_to_le16(params->default_vlan);
4395 tx_data->default_vlan_flg =
4396 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4397 tx_data->tx_switching_flg =
4398 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4399 tx_data->anti_spoofing_flg =
4400 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4401 tx_data->tx_status_block_id = params->fw_sb_id;
4402 tx_data->tx_sb_index_number = params->sb_cq_index;
4403 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4404
4405 tx_data->tx_bd_page_base.lo =
4406 cpu_to_le32(U64_LO(params->dscr_map));
4407 tx_data->tx_bd_page_base.hi =
4408 cpu_to_le32(U64_HI(params->dscr_map));
4409
4410 /* Don't configure any Tx switching mode during queue SETUP */
4411 tx_data->state = 0;
4412}
4413
4414static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4415 struct rxq_pause_params *params,
4416 struct client_init_rx_data *rx_data)
4417{
4418 /* flow control data */
4419 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4420 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4421 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4422 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4423 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4424 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4425 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4426}
4427
4428static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4429 struct bnx2x_rxq_setup_params *params,
4430 struct client_init_rx_data *rx_data,
4431 unsigned long *flags)
4432{
4433 /* Rx data */
4434 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
619c5cb6 4435 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
6383c0b3 4436 rx_data->vmqueue_mode_en_flg = 0;
619c5cb6 4437
6383c0b3
AE
4438 rx_data->cache_line_alignment_log_size =
4439 params->cache_line_log;
4440 rx_data->enable_dynamic_hc =
4441 test_bit(BNX2X_Q_FLG_DHC, flags);
4442 rx_data->max_sges_for_packet = params->max_sges_pkt;
4443 rx_data->client_qzone_id = params->cl_qzone_id;
4444 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
619c5cb6
VZ
4445
4446 /* Always start in DROP_ALL mode */
6383c0b3 4447 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
619c5cb6
VZ
4448 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4449
4450 /* We don't set drop flags */
6383c0b3
AE
4451 rx_data->drop_ip_cs_err_flg = 0;
4452 rx_data->drop_tcp_cs_err_flg = 0;
4453 rx_data->drop_ttl0_flg = 0;
4454 rx_data->drop_udp_cs_err_flg = 0;
4455 rx_data->inner_vlan_removal_enable_flg =
4456 test_bit(BNX2X_Q_FLG_VLAN, flags);
4457 rx_data->outer_vlan_removal_enable_flg =
4458 test_bit(BNX2X_Q_FLG_OV, flags);
4459 rx_data->status_block_id = params->fw_sb_id;
4460 rx_data->rx_sb_index_number = params->sb_cq_index;
4461 rx_data->max_tpa_queues = params->max_tpa_queues;
4462 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4463 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4464 rx_data->bd_page_base.lo =
4465 cpu_to_le32(U64_LO(params->dscr_map));
4466 rx_data->bd_page_base.hi =
4467 cpu_to_le32(U64_HI(params->dscr_map));
4468 rx_data->sge_page_base.lo =
4469 cpu_to_le32(U64_LO(params->sge_map));
4470 rx_data->sge_page_base.hi =
4471 cpu_to_le32(U64_HI(params->sge_map));
4472 rx_data->cqe_page_base.lo =
4473 cpu_to_le32(U64_LO(params->rcq_map));
4474 rx_data->cqe_page_base.hi =
4475 cpu_to_le32(U64_HI(params->rcq_map));
4476 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4477
4478 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4479 rx_data->approx_mcast_engine_id = o->func_id;
4480 rx_data->is_approx_mcast = 1;
619c5cb6
VZ
4481 }
4482
6383c0b3 4483 rx_data->rss_engine_id = params->rss_engine_id;
619c5cb6
VZ
4484
4485 /* silent vlan removal */
6383c0b3
AE
4486 rx_data->silent_vlan_removal_flg =
4487 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4488 rx_data->silent_vlan_value =
4489 cpu_to_le16(params->silent_removal_value);
4490 rx_data->silent_vlan_mask =
4491 cpu_to_le16(params->silent_removal_mask);
619c5cb6 4492
619c5cb6
VZ
4493}
4494
6383c0b3
AE
4495/* initialize the general, tx and rx parts of a queue object */
4496static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4497 struct bnx2x_queue_state_params *cmd_params,
4498 struct client_init_ramrod_data *data)
4499{
4500 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4501 &cmd_params->params.setup.gen_params,
4502 &data->general,
4503 &cmd_params->params.setup.flags);
4504
4505 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4506 &cmd_params->params.setup.txq_params,
4507 &data->tx,
4508 &cmd_params->params.setup.flags);
4509
4510 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4511 &cmd_params->params.setup.rxq_params,
4512 &data->rx,
4513 &cmd_params->params.setup.flags);
4514
4515 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4516 &cmd_params->params.setup.pause_params,
4517 &data->rx);
4518}
4519
4520/* initialize the general and tx parts of a tx-only queue object */
4521static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4522 struct bnx2x_queue_state_params *cmd_params,
4523 struct tx_queue_init_ramrod_data *data)
4524{
4525 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4526 &cmd_params->params.tx_only.gen_params,
4527 &data->general,
4528 &cmd_params->params.tx_only.flags);
4529
4530 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4531 &cmd_params->params.tx_only.txq_params,
4532 &data->tx,
4533 &cmd_params->params.tx_only.flags);
4534
94f05b0f 4535 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
6383c0b3
AE
4536 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4537}
619c5cb6
VZ
4538
4539/**
4540 * bnx2x_q_init - init HW/FW queue
4541 *
4542 * @bp: device handle
4543 * @params:
4544 *
4545 * HW/FW initial Queue configuration:
4546 * - HC: Rx and Tx
4547 * - CDU context validation
4548 *
4549 */
4550static inline int bnx2x_q_init(struct bnx2x *bp,
4551 struct bnx2x_queue_state_params *params)
4552{
4553 struct bnx2x_queue_sp_obj *o = params->q_obj;
4554 struct bnx2x_queue_init_params *init = &params->params.init;
4555 u16 hc_usec;
6383c0b3 4556 u8 cos;
619c5cb6
VZ
4557
4558 /* Tx HC configuration */
4559 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4560 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4561 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4562
4563 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4564 init->tx.sb_cq_index,
4565 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4566 hc_usec);
4567 }
4568
4569 /* Rx HC configuration */
4570 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4571 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4572 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4573
4574 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4575 init->rx.sb_cq_index,
4576 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4577 hc_usec);
4578 }
4579
4580 /* Set CDU context validation values */
6383c0b3 4581 for (cos = 0; cos < o->max_cos; cos++) {
94f05b0f 4582 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
6383c0b3 4583 o->cids[cos], cos);
94f05b0f 4584 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
6383c0b3
AE
4585 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4586 }
619c5cb6
VZ
4587
4588 /* As no ramrod is sent, complete the command immediately */
4589 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4590
4591 mmiowb();
4592 smp_mb();
4593
4594 return 0;
4595}
4596
4597static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4598 struct bnx2x_queue_state_params *params)
4599{
4600 struct bnx2x_queue_sp_obj *o = params->q_obj;
4601 struct client_init_ramrod_data *rdata =
4602 (struct client_init_ramrod_data *)o->rdata;
4603 dma_addr_t data_mapping = o->rdata_mapping;
4604 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4605
4606 /* Clear the ramrod data */
4607 memset(rdata, 0, sizeof(*rdata));
4608
4609 /* Fill the ramrod data */
4610 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4611
53e51e2f
VZ
4612 /*
4613 * No need for an explicit memory barrier here as long we would
4614 * need to ensure the ordering of writing to the SPQ element
4615 * and updating of the SPQ producer which involves a memory
4616 * read and we will have to put a full memory barrier there
4617 * (inside bnx2x_sp_post()).
4618 */
619c5cb6 4619
6383c0b3
AE
4620 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4621 U64_HI(data_mapping),
619c5cb6
VZ
4622 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4623}
4624
4625static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4626 struct bnx2x_queue_state_params *params)
4627{
4628 struct bnx2x_queue_sp_obj *o = params->q_obj;
4629 struct client_init_ramrod_data *rdata =
4630 (struct client_init_ramrod_data *)o->rdata;
4631 dma_addr_t data_mapping = o->rdata_mapping;
4632 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4633
4634 /* Clear the ramrod data */
4635 memset(rdata, 0, sizeof(*rdata));
4636
4637 /* Fill the ramrod data */
4638 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4639 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4640
53e51e2f
VZ
4641 /*
4642 * No need for an explicit memory barrier here as long we would
4643 * need to ensure the ordering of writing to the SPQ element
4644 * and updating of the SPQ producer which involves a memory
4645 * read and we will have to put a full memory barrier there
4646 * (inside bnx2x_sp_post()).
4647 */
619c5cb6 4648
6383c0b3
AE
4649 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4650 U64_HI(data_mapping),
4651 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4652}
4653
4654static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4655 struct bnx2x_queue_state_params *params)
4656{
4657 struct bnx2x_queue_sp_obj *o = params->q_obj;
4658 struct tx_queue_init_ramrod_data *rdata =
4659 (struct tx_queue_init_ramrod_data *)o->rdata;
4660 dma_addr_t data_mapping = o->rdata_mapping;
4661 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4662 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4663 &params->params.tx_only;
4664 u8 cid_index = tx_only_params->cid_index;
4665
4666
4667 if (cid_index >= o->max_cos) {
4668 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4669 o->cl_id, cid_index);
4670 return -EINVAL;
4671 }
4672
94f05b0f 4673 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
6383c0b3
AE
4674 tx_only_params->gen_params.cos,
4675 tx_only_params->gen_params.spcl_id);
4676
4677 /* Clear the ramrod data */
4678 memset(rdata, 0, sizeof(*rdata));
4679
4680 /* Fill the ramrod data */
4681 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4682
4683 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
94f05b0f 4684 "sp-client id %d, cos %d\n",
6383c0b3
AE
4685 o->cids[cid_index],
4686 rdata->general.client_id,
4687 rdata->general.sp_client_id, rdata->general.cos);
4688
4689 /*
4690 * No need for an explicit memory barrier here as long we would
4691 * need to ensure the ordering of writing to the SPQ element
4692 * and updating of the SPQ producer which involves a memory
4693 * read and we will have to put a full memory barrier there
4694 * (inside bnx2x_sp_post()).
4695 */
4696
4697 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4698 U64_HI(data_mapping),
619c5cb6
VZ
4699 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4700}
4701
4702static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4703 struct bnx2x_queue_sp_obj *obj,
4704 struct bnx2x_queue_update_params *params,
4705 struct client_update_ramrod_data *data)
4706{
4707 /* Client ID of the client to update */
4708 data->client_id = obj->cl_id;
4709
4710 /* Function ID of the client to update */
4711 data->func_id = obj->func_id;
4712
4713 /* Default VLAN value */
4714 data->default_vlan = cpu_to_le16(params->def_vlan);
4715
4716 /* Inner VLAN stripping */
4717 data->inner_vlan_removal_enable_flg =
4718 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4719 data->inner_vlan_removal_change_flg =
4720 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4721 &params->update_flags);
4722
4723 /* Outer VLAN sripping */
4724 data->outer_vlan_removal_enable_flg =
4725 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4726 data->outer_vlan_removal_change_flg =
4727 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4728 &params->update_flags);
4729
4730 /* Drop packets that have source MAC that doesn't belong to this
4731 * Queue.
4732 */
4733 data->anti_spoofing_enable_flg =
4734 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4735 data->anti_spoofing_change_flg =
4736 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4737
4738 /* Activate/Deactivate */
4739 data->activate_flg =
4740 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4741 data->activate_change_flg =
4742 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4743
4744 /* Enable default VLAN */
4745 data->default_vlan_enable_flg =
4746 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4747 data->default_vlan_change_flg =
4748 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4749 &params->update_flags);
4750
4751 /* silent vlan removal */
4752 data->silent_vlan_change_flg =
4753 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4754 &params->update_flags);
4755 data->silent_vlan_removal_flg =
4756 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4757 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4758 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4759}
4760
4761static inline int bnx2x_q_send_update(struct bnx2x *bp,
4762 struct bnx2x_queue_state_params *params)
4763{
4764 struct bnx2x_queue_sp_obj *o = params->q_obj;
4765 struct client_update_ramrod_data *rdata =
4766 (struct client_update_ramrod_data *)o->rdata;
4767 dma_addr_t data_mapping = o->rdata_mapping;
6383c0b3
AE
4768 struct bnx2x_queue_update_params *update_params =
4769 &params->params.update;
4770 u8 cid_index = update_params->cid_index;
4771
4772 if (cid_index >= o->max_cos) {
4773 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4774 o->cl_id, cid_index);
4775 return -EINVAL;
4776 }
4777
619c5cb6
VZ
4778
4779 /* Clear the ramrod data */
4780 memset(rdata, 0, sizeof(*rdata));
4781
4782 /* Fill the ramrod data */
6383c0b3 4783 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
619c5cb6 4784
53e51e2f
VZ
4785 /*
4786 * No need for an explicit memory barrier here as long we would
4787 * need to ensure the ordering of writing to the SPQ element
4788 * and updating of the SPQ producer which involves a memory
4789 * read and we will have to put a full memory barrier there
4790 * (inside bnx2x_sp_post()).
4791 */
619c5cb6 4792
6383c0b3
AE
4793 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4794 o->cids[cid_index], U64_HI(data_mapping),
619c5cb6
VZ
4795 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4796}
4797
4798/**
4799 * bnx2x_q_send_deactivate - send DEACTIVATE command
4800 *
4801 * @bp: device handle
4802 * @params:
4803 *
4804 * implemented using the UPDATE command.
4805 */
4806static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4807 struct bnx2x_queue_state_params *params)
4808{
4809 struct bnx2x_queue_update_params *update = &params->params.update;
4810
4811 memset(update, 0, sizeof(*update));
4812
4813 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4814
4815 return bnx2x_q_send_update(bp, params);
4816}
4817
4818/**
4819 * bnx2x_q_send_activate - send ACTIVATE command
4820 *
4821 * @bp: device handle
4822 * @params:
4823 *
4824 * implemented using the UPDATE command.
4825 */
4826static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4827 struct bnx2x_queue_state_params *params)
4828{
4829 struct bnx2x_queue_update_params *update = &params->params.update;
4830
4831 memset(update, 0, sizeof(*update));
4832
4833 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4834 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4835
4836 return bnx2x_q_send_update(bp, params);
4837}
4838
4839static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4840 struct bnx2x_queue_state_params *params)
4841{
4842 /* TODO: Not implemented yet. */
4843 return -1;
4844}
4845
4846static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4847 struct bnx2x_queue_state_params *params)
4848{
4849 struct bnx2x_queue_sp_obj *o = params->q_obj;
4850
6383c0b3
AE
4851 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4852 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
619c5cb6
VZ
4853 ETH_CONNECTION_TYPE);
4854}
4855
4856static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4857 struct bnx2x_queue_state_params *params)
4858{
4859 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4860 u8 cid_idx = params->params.cfc_del.cid_index;
619c5cb6 4861
6383c0b3
AE
4862 if (cid_idx >= o->max_cos) {
4863 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4864 o->cl_id, cid_idx);
4865 return -EINVAL;
4866 }
4867
4868 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4869 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
619c5cb6
VZ
4870}
4871
4872static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4873 struct bnx2x_queue_state_params *params)
4874{
4875 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4876 u8 cid_index = params->params.terminate.cid_index;
619c5cb6 4877
6383c0b3
AE
4878 if (cid_index >= o->max_cos) {
4879 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4880 o->cl_id, cid_index);
4881 return -EINVAL;
4882 }
4883
4884 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4885 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
619c5cb6
VZ
4886}
4887
4888static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4889 struct bnx2x_queue_state_params *params)
4890{
4891 struct bnx2x_queue_sp_obj *o = params->q_obj;
4892
6383c0b3
AE
4893 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4894 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
619c5cb6
VZ
4895 ETH_CONNECTION_TYPE);
4896}
4897
4898static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4899 struct bnx2x_queue_state_params *params)
4900{
4901 switch (params->cmd) {
4902 case BNX2X_Q_CMD_INIT:
4903 return bnx2x_q_init(bp, params);
6383c0b3
AE
4904 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4905 return bnx2x_q_send_setup_tx_only(bp, params);
619c5cb6
VZ
4906 case BNX2X_Q_CMD_DEACTIVATE:
4907 return bnx2x_q_send_deactivate(bp, params);
4908 case BNX2X_Q_CMD_ACTIVATE:
4909 return bnx2x_q_send_activate(bp, params);
4910 case BNX2X_Q_CMD_UPDATE:
4911 return bnx2x_q_send_update(bp, params);
4912 case BNX2X_Q_CMD_UPDATE_TPA:
4913 return bnx2x_q_send_update_tpa(bp, params);
4914 case BNX2X_Q_CMD_HALT:
4915 return bnx2x_q_send_halt(bp, params);
4916 case BNX2X_Q_CMD_CFC_DEL:
4917 return bnx2x_q_send_cfc_del(bp, params);
4918 case BNX2X_Q_CMD_TERMINATE:
4919 return bnx2x_q_send_terminate(bp, params);
4920 case BNX2X_Q_CMD_EMPTY:
4921 return bnx2x_q_send_empty(bp, params);
4922 default:
4923 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4924 return -EINVAL;
4925 }
4926}
4927
4928static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4929 struct bnx2x_queue_state_params *params)
4930{
4931 switch (params->cmd) {
4932 case BNX2X_Q_CMD_SETUP:
4933 return bnx2x_q_send_setup_e1x(bp, params);
4934 case BNX2X_Q_CMD_INIT:
6383c0b3 4935 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4936 case BNX2X_Q_CMD_DEACTIVATE:
4937 case BNX2X_Q_CMD_ACTIVATE:
4938 case BNX2X_Q_CMD_UPDATE:
4939 case BNX2X_Q_CMD_UPDATE_TPA:
4940 case BNX2X_Q_CMD_HALT:
4941 case BNX2X_Q_CMD_CFC_DEL:
4942 case BNX2X_Q_CMD_TERMINATE:
4943 case BNX2X_Q_CMD_EMPTY:
4944 return bnx2x_queue_send_cmd_cmn(bp, params);
4945 default:
4946 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4947 return -EINVAL;
4948 }
4949}
4950
4951static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4952 struct bnx2x_queue_state_params *params)
4953{
4954 switch (params->cmd) {
4955 case BNX2X_Q_CMD_SETUP:
4956 return bnx2x_q_send_setup_e2(bp, params);
4957 case BNX2X_Q_CMD_INIT:
6383c0b3 4958 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4959 case BNX2X_Q_CMD_DEACTIVATE:
4960 case BNX2X_Q_CMD_ACTIVATE:
4961 case BNX2X_Q_CMD_UPDATE:
4962 case BNX2X_Q_CMD_UPDATE_TPA:
4963 case BNX2X_Q_CMD_HALT:
4964 case BNX2X_Q_CMD_CFC_DEL:
4965 case BNX2X_Q_CMD_TERMINATE:
4966 case BNX2X_Q_CMD_EMPTY:
4967 return bnx2x_queue_send_cmd_cmn(bp, params);
4968 default:
4969 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4970 return -EINVAL;
4971 }
4972}
4973
4974/**
4975 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4976 *
4977 * @bp: device handle
4978 * @o:
4979 * @params:
4980 *
4981 * (not Forwarding)
4982 * It both checks if the requested command is legal in a current
4983 * state and, if it's legal, sets a `next_state' in the object
4984 * that will be used in the completion flow to set the `state'
4985 * of the object.
4986 *
4987 * returns 0 if a requested command is a legal transition,
4988 * -EINVAL otherwise.
4989 */
4990static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4991 struct bnx2x_queue_sp_obj *o,
4992 struct bnx2x_queue_state_params *params)
4993{
4994 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4995 enum bnx2x_queue_cmd cmd = params->cmd;
6383c0b3
AE
4996 struct bnx2x_queue_update_params *update_params =
4997 &params->params.update;
4998 u8 next_tx_only = o->num_tx_only;
619c5cb6 4999
6debea87
DK
5000 /*
5001 * Forget all pending for completion commands if a driver only state
5002 * transition has been requested.
5003 */
5004 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5005 o->pending = 0;
5006 o->next_state = BNX2X_Q_STATE_MAX;
5007 }
5008
5009 /*
5010 * Don't allow a next state transition if we are in the middle of
5011 * the previous one.
5012 */
5013 if (o->pending)
5014 return -EBUSY;
5015
619c5cb6
VZ
5016 switch (state) {
5017 case BNX2X_Q_STATE_RESET:
5018 if (cmd == BNX2X_Q_CMD_INIT)
5019 next_state = BNX2X_Q_STATE_INITIALIZED;
5020
5021 break;
5022 case BNX2X_Q_STATE_INITIALIZED:
5023 if (cmd == BNX2X_Q_CMD_SETUP) {
5024 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5025 &params->params.setup.flags))
5026 next_state = BNX2X_Q_STATE_ACTIVE;
5027 else
5028 next_state = BNX2X_Q_STATE_INACTIVE;
5029 }
5030
5031 break;
5032 case BNX2X_Q_STATE_ACTIVE:
5033 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5034 next_state = BNX2X_Q_STATE_INACTIVE;
5035
5036 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5037 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5038 next_state = BNX2X_Q_STATE_ACTIVE;
5039
6383c0b3
AE
5040 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5041 next_state = BNX2X_Q_STATE_MULTI_COS;
5042 next_tx_only = 1;
5043 }
5044
619c5cb6
VZ
5045 else if (cmd == BNX2X_Q_CMD_HALT)
5046 next_state = BNX2X_Q_STATE_STOPPED;
5047
5048 else if (cmd == BNX2X_Q_CMD_UPDATE) {
6383c0b3
AE
5049 /* If "active" state change is requested, update the
5050 * state accordingly.
5051 */
5052 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5053 &update_params->update_flags) &&
5054 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5055 &update_params->update_flags))
5056 next_state = BNX2X_Q_STATE_INACTIVE;
5057 else
5058 next_state = BNX2X_Q_STATE_ACTIVE;
5059 }
5060
5061 break;
5062 case BNX2X_Q_STATE_MULTI_COS:
5063 if (cmd == BNX2X_Q_CMD_TERMINATE)
5064 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5065
5066 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5067 next_state = BNX2X_Q_STATE_MULTI_COS;
5068 next_tx_only = o->num_tx_only + 1;
5069 }
5070
5071 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5072 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5073 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6 5074
6383c0b3 5075 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
5076 /* If "active" state change is requested, update the
5077 * state accordingly.
5078 */
5079 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5080 &update_params->update_flags) &&
5081 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5082 &update_params->update_flags))
5083 next_state = BNX2X_Q_STATE_INACTIVE;
5084 else
6383c0b3
AE
5085 next_state = BNX2X_Q_STATE_MULTI_COS;
5086 }
5087
5088 break;
5089 case BNX2X_Q_STATE_MCOS_TERMINATED:
5090 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5091 next_tx_only = o->num_tx_only - 1;
5092 if (next_tx_only == 0)
619c5cb6 5093 next_state = BNX2X_Q_STATE_ACTIVE;
6383c0b3
AE
5094 else
5095 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6
VZ
5096 }
5097
5098 break;
5099 case BNX2X_Q_STATE_INACTIVE:
5100 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5101 next_state = BNX2X_Q_STATE_ACTIVE;
5102
5103 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5104 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5105 next_state = BNX2X_Q_STATE_INACTIVE;
5106
5107 else if (cmd == BNX2X_Q_CMD_HALT)
5108 next_state = BNX2X_Q_STATE_STOPPED;
5109
5110 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
5111 /* If "active" state change is requested, update the
5112 * state accordingly.
5113 */
5114 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5115 &update_params->update_flags) &&
5116 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
6383c0b3
AE
5117 &update_params->update_flags)){
5118 if (o->num_tx_only == 0)
5119 next_state = BNX2X_Q_STATE_ACTIVE;
5120 else /* tx only queues exist for this queue */
5121 next_state = BNX2X_Q_STATE_MULTI_COS;
5122 } else
619c5cb6
VZ
5123 next_state = BNX2X_Q_STATE_INACTIVE;
5124 }
5125
5126 break;
5127 case BNX2X_Q_STATE_STOPPED:
5128 if (cmd == BNX2X_Q_CMD_TERMINATE)
5129 next_state = BNX2X_Q_STATE_TERMINATED;
5130
5131 break;
5132 case BNX2X_Q_STATE_TERMINATED:
5133 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5134 next_state = BNX2X_Q_STATE_RESET;
5135
5136 break;
5137 default:
5138 BNX2X_ERR("Illegal state: %d\n", state);
5139 }
5140
5141 /* Transition is assured */
5142 if (next_state != BNX2X_Q_STATE_MAX) {
5143 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5144 state, cmd, next_state);
5145 o->next_state = next_state;
6383c0b3 5146 o->next_tx_only = next_tx_only;
619c5cb6
VZ
5147 return 0;
5148 }
5149
5150 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5151
5152 return -EINVAL;
5153}
5154
5155void bnx2x_init_queue_obj(struct bnx2x *bp,
5156 struct bnx2x_queue_sp_obj *obj,
6383c0b3
AE
5157 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5158 void *rdata,
619c5cb6
VZ
5159 dma_addr_t rdata_mapping, unsigned long type)
5160{
5161 memset(obj, 0, sizeof(*obj));
5162
6383c0b3
AE
5163 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5164 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5165
5166 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5167 obj->max_cos = cid_cnt;
619c5cb6
VZ
5168 obj->cl_id = cl_id;
5169 obj->func_id = func_id;
5170 obj->rdata = rdata;
5171 obj->rdata_mapping = rdata_mapping;
5172 obj->type = type;
5173 obj->next_state = BNX2X_Q_STATE_MAX;
5174
5175 if (CHIP_IS_E1x(bp))
5176 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5177 else
5178 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5179
5180 obj->check_transition = bnx2x_queue_chk_transition;
5181
5182 obj->complete_cmd = bnx2x_queue_comp_cmd;
5183 obj->wait_comp = bnx2x_queue_wait_comp;
5184 obj->set_pending = bnx2x_queue_set_pending;
5185}
5186
6383c0b3
AE
5187void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5188 struct bnx2x_queue_sp_obj *obj,
5189 u32 cid, u8 index)
5190{
5191 obj->cids[index] = cid;
5192}
5193
619c5cb6 5194/********************** Function state object *********************************/
6debea87
DK
5195enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5196 struct bnx2x_func_sp_obj *o)
5197{
5198 /* in the middle of transaction - return INVALID state */
5199 if (o->pending)
5200 return BNX2X_F_STATE_MAX;
5201
5202 /*
5203 * unsure the order of reading of o->pending and o->state
5204 * o->pending should be read first
5205 */
5206 rmb();
5207
5208 return o->state;
5209}
619c5cb6
VZ
5210
5211static int bnx2x_func_wait_comp(struct bnx2x *bp,
5212 struct bnx2x_func_sp_obj *o,
5213 enum bnx2x_func_cmd cmd)
5214{
5215 return bnx2x_state_wait(bp, cmd, &o->pending);
5216}
5217
5218/**
5219 * bnx2x_func_state_change_comp - complete the state machine transition
5220 *
5221 * @bp: device handle
5222 * @o:
5223 * @cmd:
5224 *
5225 * Called on state change transition. Completes the state
5226 * machine transition only - no HW interaction.
5227 */
5228static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5229 struct bnx2x_func_sp_obj *o,
5230 enum bnx2x_func_cmd cmd)
5231{
5232 unsigned long cur_pending = o->pending;
5233
5234 if (!test_and_clear_bit(cmd, &cur_pending)) {
5235 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5236 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5237 o->state, cur_pending, o->next_state);
5238 return -EINVAL;
5239 }
5240
94f05b0f
JP
5241 DP(BNX2X_MSG_SP,
5242 "Completing command %d for func %d, setting state to %d\n",
5243 cmd, BP_FUNC(bp), o->next_state);
619c5cb6
VZ
5244
5245 o->state = o->next_state;
5246 o->next_state = BNX2X_F_STATE_MAX;
5247
5248 /* It's important that o->state and o->next_state are
5249 * updated before o->pending.
5250 */
5251 wmb();
5252
5253 clear_bit(cmd, &o->pending);
5254 smp_mb__after_clear_bit();
5255
5256 return 0;
5257}
5258
5259/**
5260 * bnx2x_func_comp_cmd - complete the state change command
5261 *
5262 * @bp: device handle
5263 * @o:
5264 * @cmd:
5265 *
5266 * Checks that the arrived completion is expected.
5267 */
5268static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5269 struct bnx2x_func_sp_obj *o,
5270 enum bnx2x_func_cmd cmd)
5271{
5272 /* Complete the state machine part first, check if it's a
5273 * legal completion.
5274 */
5275 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5276 return rc;
5277}
5278
5279/**
5280 * bnx2x_func_chk_transition - perform function state machine transition
5281 *
5282 * @bp: device handle
5283 * @o:
5284 * @params:
5285 *
5286 * It both checks if the requested command is legal in a current
5287 * state and, if it's legal, sets a `next_state' in the object
5288 * that will be used in the completion flow to set the `state'
5289 * of the object.
5290 *
5291 * returns 0 if a requested command is a legal transition,
5292 * -EINVAL otherwise.
5293 */
5294static int bnx2x_func_chk_transition(struct bnx2x *bp,
5295 struct bnx2x_func_sp_obj *o,
5296 struct bnx2x_func_state_params *params)
5297{
5298 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5299 enum bnx2x_func_cmd cmd = params->cmd;
5300
6debea87
DK
5301 /*
5302 * Forget all pending for completion commands if a driver only state
5303 * transition has been requested.
5304 */
5305 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5306 o->pending = 0;
5307 o->next_state = BNX2X_F_STATE_MAX;
5308 }
5309
5310 /*
5311 * Don't allow a next state transition if we are in the middle of
5312 * the previous one.
5313 */
5314 if (o->pending)
5315 return -EBUSY;
5316
619c5cb6
VZ
5317 switch (state) {
5318 case BNX2X_F_STATE_RESET:
5319 if (cmd == BNX2X_F_CMD_HW_INIT)
5320 next_state = BNX2X_F_STATE_INITIALIZED;
5321
5322 break;
5323 case BNX2X_F_STATE_INITIALIZED:
5324 if (cmd == BNX2X_F_CMD_START)
5325 next_state = BNX2X_F_STATE_STARTED;
5326
5327 else if (cmd == BNX2X_F_CMD_HW_RESET)
5328 next_state = BNX2X_F_STATE_RESET;
5329
5330 break;
5331 case BNX2X_F_STATE_STARTED:
5332 if (cmd == BNX2X_F_CMD_STOP)
5333 next_state = BNX2X_F_STATE_INITIALIZED;
6debea87
DK
5334 else if (cmd == BNX2X_F_CMD_TX_STOP)
5335 next_state = BNX2X_F_STATE_TX_STOPPED;
5336
5337 break;
5338 case BNX2X_F_STATE_TX_STOPPED:
5339 if (cmd == BNX2X_F_CMD_TX_START)
5340 next_state = BNX2X_F_STATE_STARTED;
619c5cb6
VZ
5341
5342 break;
5343 default:
5344 BNX2X_ERR("Unknown state: %d\n", state);
5345 }
5346
5347 /* Transition is assured */
5348 if (next_state != BNX2X_F_STATE_MAX) {
5349 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5350 state, cmd, next_state);
5351 o->next_state = next_state;
5352 return 0;
5353 }
5354
5355 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5356 state, cmd);
5357
5358 return -EINVAL;
5359}
5360
5361/**
5362 * bnx2x_func_init_func - performs HW init at function stage
5363 *
5364 * @bp: device handle
5365 * @drv:
5366 *
5367 * Init HW when the current phase is
5368 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5369 * HW blocks.
5370 */
5371static inline int bnx2x_func_init_func(struct bnx2x *bp,
5372 const struct bnx2x_func_sp_drv_ops *drv)
5373{
5374 return drv->init_hw_func(bp);
5375}
5376
5377/**
5378 * bnx2x_func_init_port - performs HW init at port stage
5379 *
5380 * @bp: device handle
5381 * @drv:
5382 *
5383 * Init HW when the current phase is
5384 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5385 * FUNCTION-only HW blocks.
5386 *
5387 */
5388static inline int bnx2x_func_init_port(struct bnx2x *bp,
5389 const struct bnx2x_func_sp_drv_ops *drv)
5390{
5391 int rc = drv->init_hw_port(bp);
5392 if (rc)
5393 return rc;
5394
5395 return bnx2x_func_init_func(bp, drv);
5396}
5397
5398/**
5399 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5400 *
5401 * @bp: device handle
5402 * @drv:
5403 *
5404 * Init HW when the current phase is
5405 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5406 * PORT-only and FUNCTION-only HW blocks.
5407 */
5408static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5409 const struct bnx2x_func_sp_drv_ops *drv)
5410{
5411 int rc = drv->init_hw_cmn_chip(bp);
5412 if (rc)
5413 return rc;
5414
5415 return bnx2x_func_init_port(bp, drv);
5416}
5417
5418/**
5419 * bnx2x_func_init_cmn - performs HW init at common stage
5420 *
5421 * @bp: device handle
5422 * @drv:
5423 *
5424 * Init HW when the current phase is
5425 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5426 * PORT-only and FUNCTION-only HW blocks.
5427 */
5428static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5429 const struct bnx2x_func_sp_drv_ops *drv)
5430{
5431 int rc = drv->init_hw_cmn(bp);
5432 if (rc)
5433 return rc;
5434
5435 return bnx2x_func_init_port(bp, drv);
5436}
5437
5438static int bnx2x_func_hw_init(struct bnx2x *bp,
5439 struct bnx2x_func_state_params *params)
5440{
5441 u32 load_code = params->params.hw_init.load_phase;
5442 struct bnx2x_func_sp_obj *o = params->f_obj;
5443 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5444 int rc = 0;
5445
5446 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5447 BP_ABS_FUNC(bp), load_code);
5448
5449 /* Prepare buffers for unzipping the FW */
5450 rc = drv->gunzip_init(bp);
5451 if (rc)
5452 return rc;
5453
5454 /* Prepare FW */
5455 rc = drv->init_fw(bp);
5456 if (rc) {
5457 BNX2X_ERR("Error loading firmware\n");
eb2afd4a 5458 goto init_err;
619c5cb6
VZ
5459 }
5460
5461 /* Handle the beginning of COMMON_XXX pases separatelly... */
5462 switch (load_code) {
5463 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5464 rc = bnx2x_func_init_cmn_chip(bp, drv);
5465 if (rc)
eb2afd4a 5466 goto init_err;
619c5cb6
VZ
5467
5468 break;
5469 case FW_MSG_CODE_DRV_LOAD_COMMON:
5470 rc = bnx2x_func_init_cmn(bp, drv);
5471 if (rc)
eb2afd4a 5472 goto init_err;
619c5cb6
VZ
5473
5474 break;
5475 case FW_MSG_CODE_DRV_LOAD_PORT:
5476 rc = bnx2x_func_init_port(bp, drv);
5477 if (rc)
eb2afd4a 5478 goto init_err;
619c5cb6
VZ
5479
5480 break;
5481 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5482 rc = bnx2x_func_init_func(bp, drv);
5483 if (rc)
eb2afd4a 5484 goto init_err;
619c5cb6
VZ
5485
5486 break;
5487 default:
5488 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5489 rc = -EINVAL;
5490 }
5491
eb2afd4a 5492init_err:
619c5cb6
VZ
5493 drv->gunzip_end(bp);
5494
5495 /* In case of success, complete the comand immediatelly: no ramrods
5496 * have been sent.
5497 */
5498 if (!rc)
5499 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5500
5501 return rc;
5502}
5503
5504/**
5505 * bnx2x_func_reset_func - reset HW at function stage
5506 *
5507 * @bp: device handle
5508 * @drv:
5509 *
5510 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5511 * FUNCTION-only HW blocks.
5512 */
5513static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5514 const struct bnx2x_func_sp_drv_ops *drv)
5515{
5516 drv->reset_hw_func(bp);
5517}
5518
5519/**
5520 * bnx2x_func_reset_port - reser HW at port stage
5521 *
5522 * @bp: device handle
5523 * @drv:
5524 *
5525 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5526 * FUNCTION-only and PORT-only HW blocks.
5527 *
5528 * !!!IMPORTANT!!!
5529 *
5530 * It's important to call reset_port before reset_func() as the last thing
5531 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5532 * makes impossible any DMAE transactions.
5533 */
5534static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5535 const struct bnx2x_func_sp_drv_ops *drv)
5536{
5537 drv->reset_hw_port(bp);
5538 bnx2x_func_reset_func(bp, drv);
5539}
5540
5541/**
5542 * bnx2x_func_reset_cmn - reser HW at common stage
5543 *
5544 * @bp: device handle
5545 * @drv:
5546 *
5547 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5548 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5549 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5550 */
5551static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5552 const struct bnx2x_func_sp_drv_ops *drv)
5553{
5554 bnx2x_func_reset_port(bp, drv);
5555 drv->reset_hw_cmn(bp);
5556}
5557
5558
5559static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5560 struct bnx2x_func_state_params *params)
5561{
5562 u32 reset_phase = params->params.hw_reset.reset_phase;
5563 struct bnx2x_func_sp_obj *o = params->f_obj;
5564 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5565
5566 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5567 reset_phase);
5568
5569 switch (reset_phase) {
5570 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5571 bnx2x_func_reset_cmn(bp, drv);
5572 break;
5573 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5574 bnx2x_func_reset_port(bp, drv);
5575 break;
5576 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5577 bnx2x_func_reset_func(bp, drv);
5578 break;
5579 default:
5580 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5581 reset_phase);
5582 break;
5583 }
5584
5585 /* Complete the comand immediatelly: no ramrods have been sent. */
5586 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5587
5588 return 0;
5589}
5590
5591static inline int bnx2x_func_send_start(struct bnx2x *bp,
5592 struct bnx2x_func_state_params *params)
5593{
5594 struct bnx2x_func_sp_obj *o = params->f_obj;
5595 struct function_start_data *rdata =
5596 (struct function_start_data *)o->rdata;
5597 dma_addr_t data_mapping = o->rdata_mapping;
5598 struct bnx2x_func_start_params *start_params = &params->params.start;
5599
5600 memset(rdata, 0, sizeof(*rdata));
5601
5602 /* Fill the ramrod data with provided parameters */
5603 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5604 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5605 rdata->path_id = BP_PATH(bp);
5606 rdata->network_cos_mode = start_params->network_cos_mode;
5607
53e51e2f
VZ
5608 /*
5609 * No need for an explicit memory barrier here as long we would
5610 * need to ensure the ordering of writing to the SPQ element
5611 * and updating of the SPQ producer which involves a memory
5612 * read and we will have to put a full memory barrier there
5613 * (inside bnx2x_sp_post()).
5614 */
619c5cb6
VZ
5615
5616 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5617 U64_HI(data_mapping),
5618 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5619}
5620
5621static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5622 struct bnx2x_func_state_params *params)
5623{
5624 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5625 NONE_CONNECTION_TYPE);
5626}
5627
6debea87
DK
5628static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5629 struct bnx2x_func_state_params *params)
5630{
5631 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5632 NONE_CONNECTION_TYPE);
5633}
5634static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5635 struct bnx2x_func_state_params *params)
5636{
5637 struct bnx2x_func_sp_obj *o = params->f_obj;
5638 struct flow_control_configuration *rdata =
5639 (struct flow_control_configuration *)o->rdata;
5640 dma_addr_t data_mapping = o->rdata_mapping;
5641 struct bnx2x_func_tx_start_params *tx_start_params =
5642 &params->params.tx_start;
5643 int i;
5644
5645 memset(rdata, 0, sizeof(*rdata));
5646
5647 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5648 rdata->dcb_version = tx_start_params->dcb_version;
5649 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5650
5651 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5652 rdata->traffic_type_to_priority_cos[i] =
5653 tx_start_params->traffic_type_to_priority_cos[i];
5654
5655 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5656 U64_HI(data_mapping),
5657 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5658}
5659
619c5cb6
VZ
5660static int bnx2x_func_send_cmd(struct bnx2x *bp,
5661 struct bnx2x_func_state_params *params)
5662{
5663 switch (params->cmd) {
5664 case BNX2X_F_CMD_HW_INIT:
5665 return bnx2x_func_hw_init(bp, params);
5666 case BNX2X_F_CMD_START:
5667 return bnx2x_func_send_start(bp, params);
5668 case BNX2X_F_CMD_STOP:
5669 return bnx2x_func_send_stop(bp, params);
5670 case BNX2X_F_CMD_HW_RESET:
5671 return bnx2x_func_hw_reset(bp, params);
6debea87
DK
5672 case BNX2X_F_CMD_TX_STOP:
5673 return bnx2x_func_send_tx_stop(bp, params);
5674 case BNX2X_F_CMD_TX_START:
5675 return bnx2x_func_send_tx_start(bp, params);
619c5cb6
VZ
5676 default:
5677 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5678 return -EINVAL;
5679 }
5680}
5681
5682void bnx2x_init_func_obj(struct bnx2x *bp,
5683 struct bnx2x_func_sp_obj *obj,
5684 void *rdata, dma_addr_t rdata_mapping,
5685 struct bnx2x_func_sp_drv_ops *drv_iface)
5686{
5687 memset(obj, 0, sizeof(*obj));
5688
5689 mutex_init(&obj->one_pending_mutex);
5690
5691 obj->rdata = rdata;
5692 obj->rdata_mapping = rdata_mapping;
5693
5694 obj->send_cmd = bnx2x_func_send_cmd;
5695 obj->check_transition = bnx2x_func_chk_transition;
5696 obj->complete_cmd = bnx2x_func_comp_cmd;
5697 obj->wait_comp = bnx2x_func_wait_comp;
5698
5699 obj->drv = drv_iface;
5700}
5701
5702/**
5703 * bnx2x_func_state_change - perform Function state change transition
5704 *
5705 * @bp: device handle
5706 * @params: parameters to perform the transaction
5707 *
5708 * returns 0 in case of successfully completed transition,
5709 * negative error code in case of failure, positive
5710 * (EBUSY) value if there is a completion to that is
5711 * still pending (possible only if RAMROD_COMP_WAIT is
5712 * not set in params->ramrod_flags for asynchronous
5713 * commands).
5714 */
5715int bnx2x_func_state_change(struct bnx2x *bp,
5716 struct bnx2x_func_state_params *params)
5717{
5718 struct bnx2x_func_sp_obj *o = params->f_obj;
5719 int rc;
5720 enum bnx2x_func_cmd cmd = params->cmd;
5721 unsigned long *pending = &o->pending;
5722
5723 mutex_lock(&o->one_pending_mutex);
5724
5725 /* Check that the requested transition is legal */
5726 if (o->check_transition(bp, o, params)) {
5727 mutex_unlock(&o->one_pending_mutex);
5728 return -EINVAL;
5729 }
5730
5731 /* Set "pending" bit */
5732 set_bit(cmd, pending);
5733
5734 /* Don't send a command if only driver cleanup was requested */
5735 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5736 bnx2x_func_state_change_comp(bp, o, cmd);
5737 mutex_unlock(&o->one_pending_mutex);
5738 } else {
5739 /* Send a ramrod */
5740 rc = o->send_cmd(bp, params);
5741
5742 mutex_unlock(&o->one_pending_mutex);
5743
5744 if (rc) {
5745 o->next_state = BNX2X_F_STATE_MAX;
5746 clear_bit(cmd, pending);
5747 smp_mb__after_clear_bit();
5748 return rc;
5749 }
5750
5751 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5752 rc = o->wait_comp(bp, o, cmd);
5753 if (rc)
5754 return rc;
5755
5756 return 0;
5757 }
5758 }
042181f5 5759
619c5cb6 5760 return !!test_bit(cmd, pending);
042181f5 5761}