]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
UBUNTU: SAUCE: bnxt_en_bpo: Import bnxt_en driver version 1.8.1
authorJuerg Haefliger <juerg.haefliger@canonical.com>
Thu, 17 Aug 2017 11:32:00 +0000 (13:32 +0200)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Thu, 24 Aug 2017 16:35:18 +0000 (18:35 +0200)
BugLink: http://bugs.launchpad.net/bugs/1711056
Signed-off-by: Juerg Haefliger <juerg.haefliger@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
23 files changed:
ubuntu/bnxt/COPYING [new file with mode: 0644]
ubuntu/bnxt/ChangeLog [new file with mode: 0644]
ubuntu/bnxt/Makefile [new file with mode: 0644]
ubuntu/bnxt/README.TXT [new file with mode: 0644]
ubuntu/bnxt/RELEASE.TXT [new file with mode: 0644]
ubuntu/bnxt/bnxt.c [new file with mode: 0644]
ubuntu/bnxt/bnxt.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_compat.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_dcb.c [new file with mode: 0644]
ubuntu/bnxt/bnxt_dcb.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_ethtool.c [new file with mode: 0644]
ubuntu/bnxt/bnxt_ethtool.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_fw_hdr.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_hsi.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_nvm_defs.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_ptp.c [new file with mode: 0644]
ubuntu/bnxt/bnxt_ptp.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_sriov.c [new file with mode: 0644]
ubuntu/bnxt/bnxt_sriov.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_ulp.c [new file with mode: 0644]
ubuntu/bnxt/bnxt_ulp.h [new file with mode: 0644]
ubuntu/bnxt/bnxt_xdp.c [new file with mode: 0644]
ubuntu/bnxt/bnxt_xdp.h [new file with mode: 0644]

diff --git a/ubuntu/bnxt/COPYING b/ubuntu/bnxt/COPYING
new file mode 100644 (file)
index 0000000..d159169
--- /dev/null
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/ubuntu/bnxt/ChangeLog b/ubuntu/bnxt/ChangeLog
new file mode 100644 (file)
index 0000000..fe28419
--- /dev/null
@@ -0,0 +1,3952 @@
+commit 9b0436c3f29483ca91d890b0072c0c02e2e535ed
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Jul 11 13:05:36 2017 -0400
+
+    bnxt_en: Fix SRIOV on big-endian architecture.
+    
+    The PF driver sets up a list of firmware commands from the VF driver that
+    needs to be forwarded to the PF for approval.  This list is a 256-bit
+    bitmap.  The code that sets up the bitmap falls apart on big-endian
+    architecture.  __set_bit() does not work because it operates on long types
+    whereas the firmware interface is defined in u32 types, causing bits in
+    the wrong 32-bit word to be set.
+    
+    Fix it by setting the proper bits on an array of u32.
+    
+    Fixes: de68f5de5651 ("bnxt_en: Fix bitmap declaration to work on 32-bit arches.")
+    Reported-by: Shannon Nelson <shannon.nelson@oracle.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3b6b34df342553a7522561e34288f5bb803aa9aa
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Jul 11 13:05:35 2017 -0400
+
+    bnxt_en: Fix bug in ethtool -L.
+    
+    When changing channels from combined to rx/tx or vice versa, the code
+    uses the wrong "sh" parameter to determine if we are reserving rings
+    for shared or non-shared mode.  It should be using the ethtool requested
+    "sh" parameter instead of the current "sh" parameter.
+    
+    Fix it by passing the "sh" parameter to bnxt_reserve_rings().  For
+    ethtool, we will pass in the requested "sh" parameter.
+    
+    Fixes: 391be5c27364 ("bnxt_en: Implement new scheme to reserve tx rings.")
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f9b76ebd49f97458857568918c305a17fa7c6567
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Jul 11 13:05:34 2017 -0400
+
+    bnxt_en: Fix race conditions in .ndo_get_stats64().
+    
+    .ndo_get_stats64() may not be protected by RTNL and can race with
+    .ndo_stop() or other ethtool operations that can free the statistics
+    memory.  Fix it by setting a new flag BNXT_STATE_READ_STATS and then
+    proceeding to read statistics memory only if the state is OPEN.  The
+    close path that frees the memory clears the OPEN state and then waits
+    for the BNXT_STATE_READ_STATS to clear before proceeding to free the
+    statistics memory.
+    
+    Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2270bc5da34979454e6f2eb133d800b635156174
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jun 23 14:01:01 2017 -0400
+
+    bnxt_en: Fix netpoll handling.
+    
+    To handle netpoll properly, the driver must only handle TX packets
+    during NAPI.  Handling RX events cause warnings and errors in
+    netpoll mode. The ndo_poll_controller() method should call
+    napi_schedule() directly so that a NAPI weight of zero will be used
+    during netpoll mode.
+    
+    The bnxt_en driver supports 2 ring modes: combined, and separate rx/tx.
+    In separate rx/tx mode, the ndo_poll_controller() method will only
+    process the tx rings.  In combined mode, the rx and tx completion
+    entries are mixed in the completion ring and we need to drop the rx
+    entries and recycle the rx buffers.
+    
+    Add a function bnxt_force_rx_discard() to handle this in netpoll mode
+    when we see rx entries in combined ring mode.
+    
+    Reported-by: Calvin Owens <calvinowens@fb.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 69c149e2e39e8d66437c9034bb4926ef2c1f7c23
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jun 23 14:01:00 2017 -0400
+
+    bnxt_en: Add missing logic to handle TPA end error conditions.
+    
+    When we get a TPA_END completion to handle a completed LRO packet, it
+    is possible that hardware would indicate errors.  The current code is
+    not checking for the error condition.  Define the proper error bits and
+    the macro to check for this error and abort properly.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8902965f8cb23bba8aa7f3be293ec2f3067b82c6
+Author: Martin KaFai Lau <kafai@fb.com>
+Date:   Thu Jun 15 17:29:13 2017 -0700
+
+    bpf: bnxt: Report bpf_prog ID during XDP_QUERY_PROG
+    
+    Add support to bnxt to report bpf_prog ID during XDP_QUERY_PROG.
+    
+    Signed-off-by: Martin KaFai Lau <kafai@fb.com>
+    Cc: Michael Chan <michael.chan@broadcom.com>
+    Acked-by: Alexei Starovoitov <ast@fb.com>
+    Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a5fcf8a6c968ed8e312ff0b2a55d4c62d821eabb
+Author: Jiri Pirko <jiri@mellanox.com>
+Date:   Tue Jun 6 17:00:16 2017 +0200
+
+    net: propagate tc filter chain index down the ndo_setup_tc call
+    
+    We need to push the chain index down to the drivers, so they have the
+    information to which chain the rule belongs. For now, no driver supports
+    multichain offload, so only chain 0 is supported. This is needed to
+    prevent chain squashes during offload for now. Later this will be used
+    to implement multichain offload.
+    
+    Signed-off-by: Jiri Pirko <jiri@mellanox.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f667724b99ad1afc91f16064d8fb293d2805bd57
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue May 16 16:39:44 2017 -0400
+
+    bnxt_en: Check status of firmware DCBX agent before setting DCB_CAP_DCBX_HOST.
+    
+    Otherwise, all the host based DCBX settings from lldpad will fail if the
+    firmware DCBX agent is running.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 87fe603274aa9889c05cca3c3e45675e1997cb13
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue May 16 16:39:43 2017 -0400
+
+    bnxt_en: Call bnxt_dcb_init() after getting firmware DCBX configuration.
+    
+    In the current code, bnxt_dcb_init() is called too early before we
+    determine if the firmware DCBX agent is running or not.  As a result,
+    we are not setting the DCB_CAP_DCBX_HOST and DCB_CAP_DCBX_LLD_MANAGED
+    flags properly to report to DCBNL.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ac45bd93a5035c2f39c9862b8b6ed692db0fdc87
+Author: Dan Carpenter <dan.carpenter@oracle.com>
+Date:   Sat May 6 03:49:01 2017 +0300
+
+    bnxt_en: allocate enough space for ->ntp_fltr_bmap
+    
+    We have the number of longs, but we need to calculate the number of
+    bytes required.
+    
+    Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
+    Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7d63818a35851cf00867248d5ab50a8fe8df5943
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Apr 21 20:11:25 2017 -0400
+
+    bnxt_en: Check the FW_LLDP_AGENT flag before allowing DCBX host agent.
+    
+    Check the additional flag in bnxt_hwrm_func_qcfg() before allowing
+    DCBX to be done in host mode.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f0249056eaf2b9a17b2b76a6e099e9b7877e187d
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Apr 21 20:11:23 2017 -0400
+
+    bnxt_en: Fix VF attributes reporting.
+    
+    The .ndo_get_vf_config() is returning the wrong qos attribute.  Fix
+    the code that checks and reports the qos and spoofchk attributes.  The
+    BNXT_VF_QOS and BNXT_VF_LINK_UP flags should not be set by default
+    during init. time.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a82fba8dbfb522bd19b1644bf599135680fd0122
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Apr 21 20:11:22 2017 -0400
+
+    bnxt_en: Pass DCB RoCE app priority to firmware.
+    
+    When the driver gets the RoCE app priority set/delete call through DCBNL,
+    the driver will send the information to the firmware to set up the
+    priority VLAN tag for RDMA traffic.
+    
+    [ New version using the common ETH_P_IBOE constant in if_ether.h ]
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 68a946bb81e07ed0e59a99e0c068d091ed42cc1b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:17 2017 -0400
+
+    bnxt_en: Cap the msix vector with the max completion rings.
+    
+    The current code enables up to the maximum MSIX vectors in the PCIE
+    config space without considering the max completion rings available.
+    An MSIX vector is only useful when it has an associated completion
+    ring, so it is better to cap it.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 932dbf83ba18bdb871e0c03a4ffdd9785f7a9c07
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:16 2017 -0400
+
+    bnxt_en: Use short TX BDs for the XDP TX ring.
+    
+    No offload is performed on the XDP_TX ring so we can use the short TX
+    BDs.  This has the effect of doubling the size of the XDP TX ring so
+    that it now matches the size of the rx ring by default.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 67fea463fd873492ab641459a6d1af0e9ea3c9ce
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:15 2017 -0400
+
+    bnxt_en: Add interrupt test to ethtool -t selftest.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 91725d89b97acea168a94c577d999801c3b3bcfb
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:14 2017 -0400
+
+    bnxt_en: Add PHY loopback to ethtool self-test.
+    
+    It is necessary to disable autoneg before enabling PHY loopback,
+    otherwise link won't come up.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f7dc1ea6c4c1f31371b7098d6fae0d49dc6cdff1
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:13 2017 -0400
+
+    bnxt_en: Add ethtool mac loopback self test.
+    
+    The mac loopback self test operates in polling mode.  To support that,
+    we need to add functions to open and close the NIC half way.  The half
+    open mode allows the rings to operate without IRQ and NAPI.  We
+    use the XDP transmit function to send the loopback packet.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit eb51365846bc418687af4c4f41b68b6e84cdd449
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:12 2017 -0400
+
+    bnxt_en: Add basic ethtool -t selftest support.
+    
+    Add the basic infrastructure and only firmware tests initially.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f65a2044a8c988adf16788c51c04ac10dbbdb494
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:11 2017 -0400
+
+    bnxt_en: Add suspend/resume callbacks.
+    
+    Add suspend/resume callbacks using the newer dev_pm_ops method.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5282db6c794fed3ea8b399bc5305c4078e084f7b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:10 2017 -0400
+
+    bnxt_en: Add ethtool set_wol method.
+    
+    And add functions to set and free magic packet filter.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8e202366dd752564d7f090ba280cc51cbf7bbbd9
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:09 2017 -0400
+
+    bnxt_en: Add ethtool get_wol method.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d196ece740bf337aa25731cd8cb44660a2a227dd
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:08 2017 -0400
+
+    bnxt_en: Add pci shutdown method.
+    
+    Add pci shutdown method to put device in the proper WoL and power state.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c1ef146a5bd3b286d5c3eb2c9f631b38647c76d3
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:07 2017 -0400
+
+    bnxt_en: Add basic WoL infrastructure.
+    
+    Add code to driver probe function to check if the device is WoL capable
+    and if Magic packet WoL filter is currently set.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8eb992e876a88de7539b1b9e132dd171d865cd2f
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 4 18:14:06 2017 -0400
+
+    bnxt_en: Update firmware interface spec to 1.7.6.2.
+    
+    Features added include WoL and selftest.
+    
+    Signed-off-by: Deepak Khungar <deepak.khungar@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 282ccf6efb7c5d75b0283b66ed487957163ce8fe
+Author: Florian Westphal <fw@strlen.de>
+Date:   Wed Mar 29 17:17:31 2017 +0200
+
+    drivers: add explicit interrupt.h includes
+    
+    These files all use functions declared in interrupt.h, but currently rely
+    on implicit inclusion of this file (via netns/xfrm.h).
+    
+    That won't work anymore when the flow cache is removed so include that
+    header where needed.
+    
+    Signed-off-by: Florian Westphal <fw@strlen.de>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3ed3a83e3f3871c57b18cef09b148e96921236ed
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Mar 28 19:47:31 2017 -0400
+
+    bnxt_en: Fix DMA unmapping of the RX buffers in XDP mode during shutdown.
+    
+    In bnxt_free_rx_skbs(), which is called to free up all RX buffers during
+    shutdown, we need to unmap the page if we are running in XDP mode.
+    
+    Fixes: c61fb99cae51 ("bnxt_en: Add RX page mode support.")
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 23e12c893489ed12ecfccbf866fc62af1bead4b0
+Author: Sankar Patchineelam <sankar.patchineelam@broadcom.com>
+Date:   Tue Mar 28 19:47:30 2017 -0400
+
+    bnxt_en: Correct the order of arguments to netdev_err() in bnxt_set_tpa()
+    
+    Signed-off-by: Sankar Patchineelam <sankar.patchineelam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2247925f0942dc4e7c09b1cde45ca18461d94c5f
+Author: Sankar Patchineelam <sankar.patchineelam@broadcom.com>
+Date:   Tue Mar 28 19:47:29 2017 -0400
+
+    bnxt_en: Fix NULL pointer dereference in reopen failure path
+    
+    Net device reset can fail when the h/w or f/w is in a bad state.
+    Subsequent netdevice open fails in bnxt_hwrm_stat_ctx_alloc().
+    The cleanup invokes bnxt_hwrm_resource_free() which inturn
+    calls bnxt_disable_int().  In this routine, the code segment
+    
+    if (ring->fw_ring_id != INVALID_HW_RING_ID)
+       BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+    
+    results in NULL pointer dereference as cpr->cp_doorbell is not yet
+    initialized, and fw_ring_id is zero.
+    
+    The fix is to initialize cpr fw_ring_id to INVALID_HW_RING_ID before
+    bnxt_init_chip() is invoked.
+    
+    Signed-off-by: Sankar Patchineelam <sankar.patchineelam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 56f36acd215cf7c28372b2fdb4f33f6900e97e05
+Author: Amritha Nambiar <amritha.nambiar@intel.com>
+Date:   Wed Mar 15 10:39:25 2017 -0700
+
+    mqprio: Modify mqprio to pass user parameters via ndo_setup_tc.
+    
+    The configurable priority to traffic class mapping and the user specified
+    queue ranges are used to configure the traffic class, overriding the
+    hardware defaults when the 'hw' option is set to 0. However, when the 'hw'
+    option is non-zero, the hardware QOS defaults are used.
+    
+    This patch makes it so that we can pass the data the user provided to
+    ndo_setup_tc. This allows us to pull in the queue configuration if the
+    user requested it as well as any additional hardware offload type
+    requested by using a value other than 1 for the hw value.
+    
+    Finally it also provides a means for the device driver to return the level
+    supported for the offload type via the qopt->hw value. Previously we were
+    just always assuming the value to be 1, in the future values beyond just 1
+    may be supported.
+    
+    Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com>
+    Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 520ad89a54edea84496695d528f73ddcf4a52ea4
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Mar 8 18:44:35 2017 -0500
+
+    bnxt_en: Ignore 0 value in autoneg supported speed from firmware.
+    
+    In some situations, the firmware will return 0 for autoneg supported
+    speed.  This may happen if the firmware detects no SFP module, for
+    example.  The driver should ignore this so that we don't end up with
+    an invalid autoneg setting with nothing advertised.  When SFP module
+    is inserted, we'll get the updated settings from firmware at that time.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit bc39f885a9c3bdbff0a96ecaf07b162a78eff6e4
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Mar 8 18:44:34 2017 -0500
+
+    bnxt_en: Check if firmware LLDP agent is running.
+    
+    Set DCB_CAP_DCBX_HOST capability flag only if the firmware LLDP agent
+    is not running.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b386cd362ffea09d05c56bfa85d104562e860647
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Mar 8 18:44:33 2017 -0500
+
+    bnxt_en: Call bnxt_ulp_stop() during tx timeout.
+    
+    If we call bnxt_reset_task() due to tx timeout, we should call
+    bnxt_ulp_stop() to inform the RDMA driver about the error and the
+    impending reset.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3c2217a675bac22afb149166e0de71809189850d
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Mar 8 18:44:32 2017 -0500
+
+    bnxt_en: Perform function reset earlier during probe.
+    
+    The firmware call to do function reset is done too late.  It is causing
+    the rings that have been reserved to be freed.  In NPAR mode, this bug
+    is causing us to run out of rings.
+    
+    Fixes: 391be5c27364 ("bnxt_en: Implement new scheme to reserve tx rings.")
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 1faaa78f36cb2915ae89138ba5846f87ade85dcb
+Author: Tobias Klauser <tklauser@distanz.ch>
+Date:   Tue Feb 21 15:27:28 2017 +0100
+
+    bnxt_en: use eth_hw_addr_random()
+    
+    Use eth_hw_addr_random() to set a random MAC address in order to make
+    sure bp->dev->addr_assign_type will be properly set to NET_ADDR_RANDOM.
+    
+    Signed-off-by: Tobias Klauser <tklauser@distanz.ch>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 17086399c113d933e1202697f85b8f0f82fcb8ce
+Author: Sathya Perla <sathya.perla@broadcom.com>
+Date:   Mon Feb 20 19:25:18 2017 -0500
+
+    bnxt_en: fix pci cleanup in bnxt_init_one() failure path
+    
+    In the bnxt_init_one() failure path, bar1 and bar2 are not
+    being unmapped.  This commit fixes this issue.  Reorganize the
+    code so that bnxt_init_one()'s failure path and bnxt_remove_one()
+    can call the same function to do the PCI cleanup.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit daf1f1e7841138cb0e48d52c8573a5f064d8f495
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 20 19:25:17 2017 -0500
+
+    bnxt_en: Fix NULL pointer dereference in a failure path during open.
+    
+    If bnxt_hwrm_ring_free() is called during a failure path in bnxt_open(),
+    it is possible that the completion rings have not been allocated yet.
+    In that case, the completion doorbell has not been initialized, and
+    calling bnxt_disable_int() will crash.  Fix it by checking that the
+    completion ring has been initialized before writing to the completion
+    ring doorbell.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4e00338a61998de3502d0428c4f71ffc69772316
+Author: Ray Jui <ray.jui@broadcom.com>
+Date:   Mon Feb 20 19:25:16 2017 -0500
+
+    bnxt_en: Reject driver probe against all bridge devices
+    
+    There are additional SoC devices that use the same device ID for
+    bridge and NIC devices.  The bnxt driver should reject probe against
+    all bridge devices since it's meant to be used with only endpoint
+    devices.
+    
+    Signed-off-by: Ray Jui <ray.jui@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 32b40798c1b40343641f04cdfd09652af70ea0e9
+Author: Deepak Khungar <deepak.khungar@broadcom.com>
+Date:   Sun Feb 12 19:18:18 2017 -0500
+
+    bnxt_en: Added PCI IDs for BCM57452 and BCM57454 ASICs
+    
+    Signed-off-by: Deepak Khungar <deepak.khungar@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b451c8b69e70de299aa6061e1fa6afbb4d7c1f9e
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:17 2017 -0500
+
+    bnxt_en: Fix bnxt_setup_tc() error message.
+    
+    Add proper puctuation to make the message more clear.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit e70c752f88ed23e6a0f081fa408282c2450c8ce9
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:16 2017 -0500
+
+    bnxt_en: Print FEC settings as part of the linkup dmesg.
+    
+    Print FEC (Forward Error Correction) autoneg and encoding settings during
+    link up.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 33dac24abbd5a77eefca18fb7ebbd01a3cf1b343
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:15 2017 -0500
+
+    bnxt_en: Do not setup PHY unless driving a single PF.
+    
+    If it is a VF or an NPAR function, the firmware call to setup the PHY
+    will fail.  Adding this check will prevent unnecessary firmware calls
+    to setup the PHY unless calling from the PF.  This will also eliminate
+    many unnecessary warning messages when the call from a VF or NPAR fails.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 61aad724ec0a685bc83b02b059a3ca0ad3bde6b0
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:14 2017 -0500
+
+    bnxt_en: Add hardware NTUPLE filter for encapsulated packets.
+    
+    If skb_flow_dissect_flow_keys() returns with the encapsulation flag
+    set, pass the information to the firmware to setup the NTUPLE filter
+    accordingly.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 964fd4801d40ead69a447482c0dd0cd4be495e47
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:13 2017 -0500
+
+    bnxt_en: Allow NETIF_F_NTUPLE to be enabled on VFs.
+    
+    Commit ae10ae740ad2 ("bnxt_en: Add new hardware RFS mode.") has added
+    code to allow NTUPLE to be enabled on VFs.  So we now remove the
+    BNXT_VF() check in rfs_capable() to allow NTUPLE on VFs.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a79a5276aa2f844bd368c1d3d5a625e1fbefd989
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:12 2017 -0500
+
+    bnxt_en: Fix ethtool -l pre-set max combined channel.
+    
+    With commit d1e7925e6d80 ("bnxt_en: Centralize logic to reserve rings."),
+    ring allocation for combined rings has become stricter.  A combined
+    ring must now have an rx-tx ring pair.  The pre-set max. for combined
+    rings should now be min(rx, tx).
+    
+    Fixes: d1e7925e6d80 ("bnxt_en: Centralize logic to reserve rings.")
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit cb4d1d6261453677feb54e7a09c23fc7648dd6bc
+Author: Kshitij Soni <kshitij.soni@broadcom.com>
+Date:   Sun Feb 12 19:18:11 2017 -0500
+
+    bnxt_en: Retry failed NVM_INSTALL_UPDATE with defragmentation flag.
+    
+    If the HWRM_NVM_INSTALL_UPDATE command fails with the error code
+    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR, retry the command with
+    a new flag to allow defragmentation.  Since we are checking the
+    response for error code, we also need to take the mutex until
+    we finish reading the response.
+    
+    Signed-off-by: Kshitij Soni <kshitij.soni@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit bac9a7e0f5d6da82478d5e0a2a236158f42d5757
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun Feb 12 19:18:10 2017 -0500
+
+    bnxt_en: Update to firmware interface spec 1.7.0.
+    
+    The new spec has NVRAM defragmentation support which will be used in
+    the next patch to improve ethtool flash operation.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 38413406277fd060f46855ad527f6f8d4cf2652d
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:43 2017 -0500
+
+    bnxt_en: Add support for XDP_TX action.
+    
+    Add dedicated transmit function and transmit completion handler for
+    XDP.  The XDP transmit logic and completion logic are different than
+    regular TX ring.  The TX buffer is recycled back to the RX ring when
+    it completes.
+    
+    v3: Improved the buffer recyling scheme for XDP_TX.
+    
+    v2: Add trace_xdp_exception().
+        Add dma_sync.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Tested-by: Andy Gospodarek <gospo@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c6d30e8391b85e00eb544e6cf047ee0160ee9938
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:42 2017 -0500
+
+    bnxt_en: Add basic XDP support.
+    
+    Add basic ndo_xdp support to setup and query program, configure the NIC
+    to run in rx page mode, and support XDP_PASS, XDP_DROP, XDP_ABORTED
+    actions only.
+    
+    v3: Pass modified offset and length to stack for XDP_PASS.
+        Remove Kconfig option.
+    
+    v2: Added trace_xdp_exception()
+        Added dma_syncs.
+        Added XDP headroom support.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Tested-by: Andy Gospodarek <gospo@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fa3e93e86cc3d1809fba67cb138883ed4bb74a5f
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:41 2017 -0500
+
+    bnxt_en: Refactor tx completion path.
+    
+    XDP_TX requires a different function to handle completion.  Add a
+    function pointer to handle tx completion logic.  Regular TX rings
+    will be assigned the current bnxt_tx_int() for the ->tx_int()
+    function pointer.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5f4492493e75dafc5cbb96eabe0f146c2ffb1e3d
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:40 2017 -0500
+
+    bnxt_en: Add a set of TX rings to support XDP.
+    
+    Add logic for an extra set of TX rings for XDP.  If enabled, this
+    set of TX rings equals the number of RX rings and shares the same
+    IRQ as the RX ring set.  A new field bp->tx_nr_rings_xdp is added
+    to keep track of these TX XDP rings.  Adjust all other relevant functions
+    to handle bp->tx_nr_rings_xdp.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a960dec98861b009b4227d2ae3b94a142c83eb96
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:39 2017 -0500
+
+    bnxt_en: Add tx ring mapping logic.
+    
+    To support XDP_TX, we need to add a set of dedicated TX rings, each
+    associated with the NAPI of an RX ring.  To assign XDP rings and regular
+    rings in a flexible way, we add a bp->tx_ring_map[] array to do the
+    remapping.  The netdev txq index is stored in the new field txq_index
+    so that we can retrieve the netdev txq when handling TX completions.
+    In this patch, before we introduce XDP_TX, the mapping is 1:1.
+    
+    v2: Fixed a bug in bnxt_tx_int().
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d1e7925e6d80ce5f9ef6deb8f3cec7526f5c443c
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:38 2017 -0500
+
+    bnxt_en: Centralize logic to reserve rings.
+    
+    Currently, bnxt_setup_tc() and bnxt_set_channels() have similar and
+    duplicated code to check and reserve rx and tx rings.  Add a new
+    function bnxt_reserve_rings() to centralize the logic.  This will
+    make it easier to add XDP_TX support which requires allocating a
+    new set of TX rings.
+    
+    Also, the tx ring checking logic in bnxt_setup_msix() can be removed.
+    The rings have been reserved before hand.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4e5dbbda4c40a239e2ed4bbc98f2aa320e4dcca2
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:37 2017 -0500
+
+    bnxt_en: Use event bit map in RX path.
+    
+    In the current code, we have separate rx_event and agg_event parameters
+    to keep track of rx and aggregation events.  Combine these events into
+    an u8 event mask with different bits defined for different events.  This
+    way, it is easier to expand the logic to include XDP tx events.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c61fb99cae51958a9096d8540c8c05e74cfa7e59
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:36 2017 -0500
+
+    bnxt_en: Add RX page mode support.
+    
+    This mode is to support XDP.  In this mode, each rx ring is configured
+    with page sized buffers for linear placement of each packet.  MTU will be
+    restricted to what the page sized buffers can support.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b3dba77cf0acb6e44b368979026df975658332bc
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:35 2017 -0500
+
+    bnxt_en: Parameterize RX buffer offsets.
+    
+    Convert the global constants BNXT_RX_OFFSET and BNXT_RX_DMA_OFFSET to
+    device parameters.  This will make it easier to support XDP with
+    headroom support which requires different RX buffer offsets.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 745fc05c9db1f17da076861c7f57507e13f28a3a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:34 2017 -0500
+
+    bnxt_en: Add bp->rx_dir field for rx buffer DMA direction.
+    
+    When driver is running in XDP mode, rx buffers are DMA mapped as
+    DMA_BIDIRECTIONAL.  Add a field so the code will map/unmap rx buffers
+    according to this field.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 11cd119d31a71b37c2362fc621f225e2aa12aea1
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:33 2017 -0500
+
+    bnxt_en: Don't use DEFINE_DMA_UNMAP_ADDR to store DMA address in RX path.
+    
+    To support XDP_TX, we need the RX buffer's DMA address to transmit the
+    packet.  Convert the DMA address field to a permanent field in
+    bnxt_sw_rx_bd.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 6bb19474391d17954fee9a9997ecca25b35dfd46
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 6 16:55:32 2017 -0500
+
+    bnxt_en: Refactor rx SKB function.
+    
+    Minor refactoring of bnxt_rx_skb() so that it can easily be replaced by
+    a new function that handles packets in a single page.  Also, use a
+    function pointer bp->rx_skb_func() to switch to a new function when
+    we add the new mode in the next patch.
+    
+    Add a new field data_ptr that points to the packet data in the
+    bnxt_sw_rx_bd structure.  The original data field is changed to void
+    pointer so that it can either hold the kmalloc'ed data or a page
+    pointer.
+    
+    The last parameter of bnxt_rx_skb() which was the length parameter is
+    changed to include the payload offset of the packet in the upper 16 bit.
+    The offset is needed to support the rx page mode and is not used in
+    this existing function.
+    
+    v3: Added a new data_ptr parameter to bp->rx_skb_func().  The caller
+    has the option to modify the starting address of the packet.  This
+    will be needed when XDP with headroom support is added.
+    
+    v2: Changed the name of the last parameter to offset_and_len to make the
+    code more clear.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 6ad20165d376fa07919a70e4f43dfae564601829
+Author: Eric Dumazet <edumazet@google.com>
+Date:   Mon Jan 30 08:22:01 2017 -0800
+
+    drivers: net: generalize napi_complete_done()
+    
+    napi_complete_done() allows to opt-in for gro_flush_timeout,
+    added back in linux-3.19, commit 3b47d30396ba
+    ("net: gro: add a per device gro flush timer")
+    
+    This allows for more efficient GRO aggregation without
+    sacrifying latencies.
+    
+    Signed-off-by: Eric Dumazet <edumazet@google.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 90c694bb71819fb5bd3501ac397307d7e41ddeca
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Jan 25 02:55:09 2017 -0500
+
+    bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status().
+    
+    bnxt_get_port_module_status() calls bnxt_update_link() which expects
+    RTNL to be held.  In bnxt_sp_task() that does not hold RTNL, we need to
+    call it with a prior call to bnxt_rtnl_lock_sp() and the call needs to
+    be moved to the end of bnxt_sp_task().
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 0eaa24b971ae251ae9d3be23f77662a655532063
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Jan 25 02:55:08 2017 -0500
+
+    bnxt_en: Fix RTNL lock usage on bnxt_update_link().
+    
+    bnxt_update_link() is called from multiple code paths.  Most callers,
+    such as open, ethtool, already hold RTNL.  Only the caller bnxt_sp_task()
+    does not.  So it is a bug to take RTNL inside bnxt_update_link().
+    
+    Fix it by removing the RTNL inside bnxt_update_link().  The function
+    now expects the caller to always hold RTNL.
+    
+    In bnxt_sp_task(), call bnxt_rtnl_lock_sp() before calling
+    bnxt_update_link().  We also need to move the call to the end of
+    bnxt_sp_task() since it will be clearing the BNXT_STATE_IN_SP_TASK bit.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a551ee94ea723b4af9b827c7460f108bc13425ee
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Jan 25 02:55:07 2017 -0500
+
+    bnxt_en: Fix bnxt_reset() in the slow path task.
+    
+    In bnxt_sp_task(), we set a bit BNXT_STATE_IN_SP_TASK so that bnxt_close()
+    will synchronize and wait for bnxt_sp_task() to finish.  Some functions
+    in bnxt_sp_task() require us to clear BNXT_STATE_IN_SP_TASK and then
+    acquire rtnl_lock() to prevent race conditions.
+    
+    There are some bugs related to this logic. This patch refactors the code
+    to have common bnxt_rtnl_lock_sp() and bnxt_rtnl_unlock_sp() to handle
+    the RTNL and the clearing/setting of the bit.  Multiple functions will
+    need the same logic.  We also need to move bnxt_reset() to the end of
+    bnxt_sp_task().  Functions that clear BNXT_STATE_IN_SP_TASK must be the
+    last functions to be called in bnxt_sp_task().  The common scheme will
+    handle the condition properly.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 719ca8111402aa6157bd83a3c966d184db0d8956
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Jan 17 22:07:19 2017 -0500
+
+    bnxt_en: Fix "uninitialized variable" bug in TPA code path.
+    
+    In the TPA GRO code path, initialize the tcp_opt_len variable to 0 so
+    that it will be correct for packets without TCP timestamps.  The bug
+    caused the SKB fields to be incorrectly set up for packets without
+    TCP timestamps, leading to these packets being rejected by the stack.
+    
+    Reported-by: Andy Gospodarek <andrew.gospodarek@broadocm.com>
+    Acked-by: Andy Gospodarek <andrew.gospodarek@broadocm.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2f5938467bd7f34e59a1d6d3809f5970f62e194b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jan 13 01:32:04 2017 -0500
+
+    bnxt_en: Add the ulp_sriov_cfg hooks for bnxt_re RDMA driver.
+    
+    Add the ulp_sriov_cfg callbacks when the number of VFs is changing.  This
+    allows the RDMA driver to provision RDMA resources for the VFs.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5ad2cbeed74bd1e89ac4ba14288158ec7eb167da
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jan 13 01:32:03 2017 -0500
+
+    bnxt_en: Add support for ethtool -p.
+    
+    Add LED blinking code to support ethtool -p on the PF.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f183886c0d798ca3cf0a51e8cab3c1902fbd1e8b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jan 13 01:32:02 2017 -0500
+
+    bnxt_en: Update to firmware interface spec to 1.6.1.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 341138c3e6afa8e77f9f3e773d72b37022dbcee8
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jan 13 01:32:01 2017 -0500
+
+    bnxt_en: Clear TPA flags when BNXT_FLAG_NO_AGG_RINGS is set.
+    
+    Commit bdbd1eb59c56 ("bnxt_en: Handle no aggregation ring gracefully.")
+    introduced the BNXT_FLAG_NO_AGG_RINGS flag.  For consistency,
+    bnxt_set_tpa_flags() should also clear TPA flags when there are no
+    aggregation rings.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b742995445fbac874f5fe19ce2afc76c7a6ac2cf
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jan 13 01:32:00 2017 -0500
+
+    bnxt_en: Fix compiler warnings when CONFIG_RFS_ACCEL is not defined.
+    
+    CC [M]  drivers/net/ethernet/broadcom/bnxt/bnxt.o
+    drivers/net/ethernet/broadcom/bnxt/bnxt.c:4947:21: warning: ‘bnxt_get_max_func_rss_ctxs’ defined but not used [-Wunused-function]
+     static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+                         ^
+      CC [M]  drivers/net/ethernet/broadcom/bnxt/bnxt.o
+    drivers/net/ethernet/broadcom/bnxt/bnxt.c:4956:21: warning: ‘bnxt_get_max_func_vnics’ defined but not used [-Wunused-function]
+     static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+                         ^
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5944701df90d9577658e2354cc27c4ceaeca30fe
+Author: stephen hemminger <stephen@networkplumber.org>
+Date:   Fri Jan 6 19:12:53 2017 -0800
+
+    net: remove useless memset's in drivers get_stats64
+    
+    In dev_get_stats() the statistic structure storage has already been
+    zeroed. Therefore network drivers do not need to call memset() again.
+    
+    Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221
+Author: stephen hemminger <stephen@networkplumber.org>
+Date:   Fri Jan 6 19:12:52 2017 -0800
+
+    net: make ndo_get_stats64 a void function
+    
+    The network device operation for reading statistics is only called
+    in one place, and it ignores the return value. Having a structure
+    return value is potentially confusing because some future driver could
+    incorrectly assume that the return value was used.
+    
+    Fix all drivers with ndo_get_stats64 to have a void function.
+    
+    Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit bdbd1eb59c565c56a74d21076e2ae8706de00ecd
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:43 2016 -0500
+
+    bnxt_en: Handle no aggregation ring gracefully.
+    
+    The current code assumes that we will always have at least 2 rx rings, 1
+    will be used as an aggregation ring for TPA and jumbo page placements.
+    However, it is possible, especially on a VF, that there is only 1 rx
+    ring available.  In this scenario, the current code will fail to initialize.
+    To handle it, we need to properly set up only 1 ring without aggregation.
+    Set a new flag BNXT_FLAG_NO_AGG_RINGS for this condition and add logic to
+    set up the chip to place RX data linearly into a single buffer per packet.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 486b5c22ea1d35e00e90dd79a32a9ee530b18915
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:42 2016 -0500
+
+    bnxt_en: Set default completion ring for async events.
+    
+    With the added support for the bnxt_re RDMA driver, both drivers can be
+    allocating completion rings in any order.  The firmware does not know
+    which completion ring should be receiving async events.  Add an
+    extra step to tell firmware the completion ring number for receiving
+    async events after bnxt_en allocates the completion rings.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 391be5c2736456f032fe0265031ecfe17aee84a0
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:41 2016 -0500
+
+    bnxt_en: Implement new scheme to reserve tx rings.
+    
+    In order to properly support TX rate limiting in SRIOV VF functions or
+    NPAR functions, firmware needs better control over tx ring allocations.
+    The new scheme requires the driver to reserve the number of tx rings
+    and to query to see if the requested number of tx rings is reserved.
+    The driver will use the new scheme when the firmware interface spec is
+    1.6.1 or newer.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit dda0e7465f040ed814d4a5c98c6bf042e59cba69
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:40 2016 -0500
+
+    bnxt_en: Add IPV6 hardware RFS support.
+    
+    Accept ipv6 flows in .ndo_rx_flow_steer() and support ETHTOOL_GRXCLSRULE
+    ipv6 flows.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadocm.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8427af811a2fcbbf0c71a4b1f904f2442abdcf39
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:39 2016 -0500
+
+    bnxt_en: Assign additional vnics to VFs.
+    
+    Assign additional vnics to VFs whenever possible so that NTUPLE can be
+    supported on the VFs.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ae10ae740ad2befd92b6f5b2ab39220bce6e5da2
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:38 2016 -0500
+
+    bnxt_en: Add new hardware RFS mode.
+    
+    The existing hardware RFS mode uses one hardware RSS context block
+    per ring just to calculate the RSS hash.  This is very wasteful and
+    prevents VF functions from using it.  The new hardware mode shares
+    the same hardware RSS context for RSS placement and RFS steering.
+    This allows VFs to enable RFS.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8079e8f107bf02e1e5ece89239dd2fb475a4735f
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:37 2016 -0500
+
+    bnxt_en: Refactor code that determines RFS capability.
+    
+    Add function bnxt_rfs_supported() that determines if the chip supports
+    RFS.  Refactor the existing function bnxt_rfs_capable() that determines
+    if run-time conditions support RFS.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8fdefd63c203d9b2955d679704f4ed92bf40752c
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:36 2016 -0500
+
+    bnxt_en: Add function to get vnic capability.
+    
+    The new vnic RSS capability will enhance NTUPLE support, to be added
+    in subsequent patches.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5910906ca9ee32943f67db24917f78a9ad1087db
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:35 2016 -0500
+
+    bnxt_en: Refactor TPA code path.
+    
+    Call tcp_gro_complete() in the common code path instead of the chip-
+    specific method.  The newer 5731x method is missing the call.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcmo.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 68515a186cf8a8f97956eaea5829277752399f58
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:34 2016 -0500
+
+    bnxt_en: Fix and clarify link_info->advertising.
+    
+    The advertising field is closely related to the auto_link_speeds field.
+    The former is the user setting while the latter is the firmware setting.
+    Both should be u16.  We should use the advertising field in
+    bnxt_get_link_ksettings because the auto_link_speeds field may not
+    be updated with the latest from the firmware yet.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 9d8bc09766f1a229b2d204c713a1cfc6c7fa1bb1
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:33 2016 -0500
+
+    bnxt_en: Improve the IRQ disable sequence during shutdown.
+    
+    The IRQ is disabled by writing to the completion ring doorbell.  This
+    should be done before the hardware completion ring is freed for correctness.
+    The current code disables IRQs after all the completion rings are freed.
+    
+    Fix it by calling bnxt_disable_int_sync() before freeing the completion
+    rings.  Rearrange the code to avoid forward declaration.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadocm.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit e7b9569102995ebc26821789628eef45bd9840d8
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:32 2016 -0500
+
+    bnxt_en: Use napi_complete_done()
+    
+    For better busy polling and GRO support.  Do not re-arm IRQ if
+    napi_complete_done() returns false.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b356a2e729cec145a648d22ba5686357c009da25
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Thu Dec 29 12:13:31 2016 -0500
+
+    bnxt_en: Remove busy poll logic in the driver.
+    
+    Use native NAPI polling instead.  The next patch will complete the work
+    by switching to use napi_complete_done()
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a588e4580a7ecb715dab8bf09725b97aa0e0e3a0
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:21 2016 -0500
+
+    bnxt_en: Add interface to support RDMA driver.
+    
+    Since the network driver and RDMA driver operate on the same PCI function,
+    we need to create an interface to allow the RDMA driver to share resources
+    with the network driver.
+    
+    1. Create a new bnxt_en_dev struct which will be returned by
+    bnxt_ulp_probe() upon success.  After that, all calls from the RDMA driver
+    to bnxt_en will pass a pointer to this struct.
+    
+    2. This struct contains additional function pointers to register, request
+    msix, send fw messages, register for async events.
+    
+    3. If the RDMA driver wants to enable RDMA on the function, it needs to
+    call the function pointer bnxt_register_device().  A ulp_ops structure
+    is passed for RCU protected upcalls from bnxt_en to the RDMA driver.
+    
+    4. The RDMA driver can call firmware APIs using the bnxt_send_fw_msg()
+    function pointer.
+    
+    5. 1 stats context is reserved when the RDMA driver registers.  MSIX
+    and completion rings are reserved when the RDMA driver calls
+    bnxt_request_msix() function pointer.
+    
+    6. When the RDMA driver calls bnxt_unregister_device(), all RDMA resources
+    will be cleaned up.
+    
+    v2: Fixed 2 uninitialized variable warnings.
+    
+    Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a1653b13f14c714f9bfd5e10c603a37c3bcba7b6
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:20 2016 -0500
+
+    bnxt_en: Refactor the driver registration function with firmware.
+    
+    The driver register function with firmware consists of passing version
+    information and registering for async events.  To support the RDMA driver,
+    the async events that we need to register may change.  Separate the
+    driver register function into 2 parts so that we can just update the
+    async events for the RDMA driver.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit e4060d306b5196966d74e05dee48e6c3a52aaad4
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:19 2016 -0500
+
+    bnxt_en: Reserve RDMA resources by default.
+    
+    If the device supports RDMA, we'll setup network default rings so that
+    there are enough minimum resources for RDMA, if possible.  However, the
+    user can still increase network rings to the max if he wants.  The actual
+    RDMA resources won't be reserved until the RDMA driver registers.
+    
+    v2: Fix compile warning when BNXT_CONFIG_SRIOV is not set.
+    
+    Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7b08f661ab80e87dcdba7ab9a460fe2c9d08bf5b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:18 2016 -0500
+
+    bnxt_en: Improve completion ring allocation for VFs.
+    
+    All available remaining completion rings not used by the PF should be
+    made available for the VFs so that there are enough rings in the VF to
+    support RDMA.  The earlier workaround code of capping the rings by the
+    statistics context is removed.
+    
+    When SRIOV is disabled, call a new function bnxt_restore_pf_fw_resources()
+    to restore FW resources.  Later on we need to add some logic to account
+    for RDMA resources.
+    
+    Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit aa8ed021ab515a93f2a052e9cc80320882889698
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:17 2016 -0500
+
+    bnxt_en: Move function reset to bnxt_init_one().
+    
+    Now that MSIX is enabled in bnxt_init_one(), resources may be allocated by
+    the RDMA driver before the network device is opened.  So we cannot do
+    function reset in bnxt_open() which will clear all the resources.
+    
+    The proper place to do function reset now is in bnxt_init_one().
+    If we get AER, we'll do function reset as well.
+    
+    Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7809592d3e2ec79cd1feab0cc96169d22f6ffee1
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:16 2016 -0500
+
+    bnxt_en: Enable MSIX early in bnxt_init_one().
+    
+    To better support the new RDMA driver, we need to move pci_enable_msix()
+    from bnxt_open() to bnxt_init_one().  This way, MSIX vectors are available
+    to the RDMA driver whether the network device is up or down.
+    
+    Part of the existing bnxt_setup_int_mode() function is now refactored into
+    a new bnxt_init_int_mode().  bnxt_init_int_mode() is called during
+    bnxt_init_one() to enable MSIX.  The remaining logic in
+    bnxt_setup_int_mode() to map the IRQs to the completion rings is called
+    during bnxt_open().
+    
+    v2: Fixed compile warning when CONFIG_BNXT_SRIOV is not set.
+    
+    Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 33c2657eb688a063ab9cbe11fd4d18c93c7945e1
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Dec 7 00:26:15 2016 -0500
+
+    bnxt_en: Add bnxt_set_max_func_irqs().
+    
+    By refactoring existing code into this new function.  The new function
+    will be used in subsequent patches.
+    
+    v2: Fixed compile warning when CONFIG_BNXT_SRIOV is not set.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 89aa8445cd4e8c2556c40d42dd0ceb2cbb96ba78
+Author: Pan Bian <bianpan2016@163.com>
+Date:   Sat Dec 3 17:56:17 2016 +0800
+
+    netdev: broadcom: propagate error code
+    
+    Function bnxt_hwrm_stat_ctx_alloc() always returns 0, even if the call
+    to _hwrm_send_message() fails. It may be better to propagate the errors
+    to the caller of bnxt_hwrm_stat_ctx_alloc().
+    
+    Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188661
+    
+    Signed-off-by: Pan Bian <bianpan2016@163.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c77192f2042537b1e0e5f520db91e4d28778195f
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Dec 2 21:17:18 2016 -0500
+
+    bnxt_en: Add PFC statistics.
+    
+    Report PFC statistics to ethtool -S and DCBNL.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7df4ae9fe85567a1710048da8229bd85e0da9df7
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Dec 2 21:17:17 2016 -0500
+
+    bnxt_en: Implement DCBNL to support host-based DCBX.
+    
+    Support only IEEE DCBX initially.  Add IEEE DCBNL ops and functions to
+    get and set the hardware DCBX parameters.  The DCB code is conditional on
+    Kconfig CONFIG_BNXT_DCB.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 87c374ded0b2cfe50bb1e7648a4ca06df13fa399
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Dec 2 21:17:16 2016 -0500
+
+    bnxt_en: Update firmware header file to latest 1.6.0.
+    
+    Latest interface has the latest DCB command structs.  Get and store the
+    max number of lossless TCs the hardware can support.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c5e3deb8a38453037b89e0b0485d3b031896e8eb
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Dec 2 21:17:15 2016 -0500
+
+    bnxt_en: Re-factor bnxt_setup_tc().
+    
+    Add a new function bnxt_setup_mq_tc() to handle MQPRIO.  This new function
+    will be called during ETS setup when we add DCBNL in the next patch.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 57aac71b3e9ed890cf2219dd980c36f859b43d6a
+Author: Christophe Jaillet <christophe.jaillet@wanadoo.fr>
+Date:   Tue Nov 22 06:14:40 2016 +0100
+
+    bnxt_en: Fix a VXLAN vs GENEVE issue
+    
+    Knowing that:
+      #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN        (0x1UL << 0)
+      #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       (0x5UL << 0)
+    and that 'bnxt_hwrm_tunnel_dst_port_alloc()' is only called with one of
+    these 2 constants, the TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE can not
+    trigger.
+    
+    Replace the bit test that overlap by an equality test, just as in
+    'bnxt_hwrm_tunnel_dst_port_free()' above.
+    
+    Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit e5f6f564fd191d365fcd775c06a732a488205588
+Author: Eric Dumazet <edumazet@google.com>
+Date:   Wed Nov 16 06:31:52 2016 -0800
+
+    bnxt: add a missing rcu synchronization
+    
+    Add a missing synchronize_net() call to avoid potential use after free,
+    since we explicitly call napi_hash_del() to factorize the RCU grace
+    period.
+    
+    Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
+    Signed-off-by: Eric Dumazet <edumazet@google.com>
+    Cc: Michael Chan <michael.chan@broadcom.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a011952a1a465258ab006a8613a41aa5367d2274
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Nov 16 21:13:10 2016 -0500
+
+    bnxt_en: Add ethtool -n|-N rx-flow-hash support.
+    
+    To display and modify the RSS hash.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 87da7f796d5e44311ea69afb6f4220d43a89382e
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Nov 16 21:13:09 2016 -0500
+
+    bnxt_en: Add UDP RSS support for 57X1X chips.
+    
+    The newer chips have proper support for 4-tuple UDP RSS.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 286ef9d64ea7435a1e323d12b44a309e15cbff0e
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Nov 16 21:13:08 2016 -0500
+
+    bnxt_en: Enhance autoneg support.
+    
+    On some dual port NICs, the speed setting on one port can affect the
+    available speed on the other port.  Add logic to detect these changes
+    and adjust the advertised speed settings when necessary.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 16d663a69f4a1f3534e780e35d50142b98cf1279
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed Nov 16 21:13:07 2016 -0500
+
+    bnxt_en: Update firmware interface spec to 1.5.4.
+    
+    Use the new FORCE_LINK_DWN bit to shutdown link during close.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 73b9bad63ae3c902ce64221d10a0d371d059748d
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Nov 11 00:11:43 2016 -0500
+
+    bnxt_en: Fix VF virtual link state.
+    
+    If the physical link is down and the VF virtual link is set to "enable",
+    the current code does not always work.  If the link is down but the
+    cable is attached, the firmware returns LINK_SIGNAL instead of
+    NO_LINK.  The current code is treating LINK_SIGNAL as link up.
+    The fix is to treat link as down when the link_status != LINK.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3ffb6a39b751b635a0c50b650064c38b8d371ef2
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Nov 11 00:11:42 2016 -0500
+
+    bnxt_en: Fix ring arithmetic in bnxt_setup_tc().
+    
+    The logic is missing the check on whether the tx and rx rings are sharing
+    completion rings or not.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ef8d759b5251ee9d6784fe53d90220bd91ee477f
+Author: Eric Dumazet <edumazet@google.com>
+Date:   Tue Nov 8 11:06:53 2016 -0800
+
+    bnxt_en: do not call napi_hash_add()
+    
+    This is automatically done from netif_napi_add(), and we want to not
+    export napi_hash_add() anymore in the following patch.
+    
+    Signed-off-by: Eric Dumazet <edumazet@google.com>
+    Cc: Michael Chan <michael.chan@broadcom.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit e1c6dccaf3af291488fbad155d7ee6bc29db262a
+Author: Jarod Wilson <jarod@redhat.com>
+Date:   Mon Oct 17 15:54:04 2016 -0400
+
+    ethernet/broadcom: use core min/max MTU checking
+    
+    tg3: min_mtu 60, max_mtu 9000/1500
+    
+    bnxt: min_mtu 60, max_mtu 9000
+    
+    bnx2x: min_mtu 46, max_mtu 9600
+    - Fix up ETH_OVREHEAD -> ETH_OVERHEAD while we're in here, remove
+      duplicated defines from bnx2x_link.c.
+    
+    bnx2: min_mtu 46, max_mtu 9000
+    - Use more standard ETH_* defines while we're at it.
+    
+    bcm63xx_enet: min_mtu 46, max_mtu 2028
+    - compute_hw_mtu was made largely pointless, and thus merged back into
+      bcm_enet_change_mtu.
+    
+    b44: min_mtu 60, max_mtu 1500
+    
+    CC: netdev@vger.kernel.org
+    CC: Michael Chan <michael.chan@broadcom.com>
+    CC: Sony Chacko <sony.chacko@qlogic.com>
+    CC: Ariel Elior <ariel.elior@qlogic.com>
+    CC: Dept-HSGLinuxNICDev@qlogic.com
+    CC: Siva Reddy Kallam <siva.kallam@broadcom.com>
+    CC: Prashant Sreedharan <prashant@broadcom.com>
+    Signed-off-by: Jarod Wilson <jarod@redhat.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 79aab093a0b5370d7fc4e99df75996f4744dc03f
+Author: Moshe Shemesh <moshe@mellanox.com>
+Date:   Thu Sep 22 12:11:15 2016 +0300
+
+    net: Update API for VF vlan protocol 802.1ad support
+    
+    Introduce new rtnl UAPI that exposes a list of vlans per VF, giving
+    the ability for user-space application to specify it for the VF, as an
+    option to support 802.1ad.
+    We adjusted IP Link tool to support this option.
+    
+    For future use cases, the new UAPI supports multiple vlans. For now we
+    limit the list size to a single vlan in kernel.
+    Add IFLA_VF_VLAN_LIST in addition to IFLA_VF_VLAN to keep backward
+    compatibility with older versions of IP Link tool.
+    
+    Add a vlan protocol parameter to the ndo_set_vf_vlan callback.
+    We kept 802.1Q as the drivers' default vlan protocol.
+    Suitable ip link tool command examples:
+      Set vf vlan protocol 802.1ad:
+        ip link set eth0 vf 1 vlan 100 proto 802.1ad
+      Set vf to VST (802.1Q) mode:
+        ip link set eth0 vf 1 vlan 100 proto 802.1Q
+      Or by omitting the new parameter
+        ip link set eth0 vf 1 vlan 100
+    
+    Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
+    Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 878786d95e07ce2f5fb6e3cd8a6c2ed320339196
+Author: Rob Swindell <swindell@broadcom.com>
+Date:   Tue Sep 20 03:36:33 2016 -0400
+
+    bnxt_en: Fix build error for kernesl without RTC-LIB
+    
+    bnxt_hwrm_fw_set_time() now returns -EOPNOTSUPP when built for kernel
+    without RTC_LIB.  Setting the firmware time is not critical to the
+    successful completion of the firmware update process.
+    
+    Signed-off-by: Rob Swindell <Rob.Swindell@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 350a714960eb8a980c913c9be5a96bb18b2fe9da
+Author: Eddie Wai <eddie.wai@broadcom.com>
+Date:   Mon Sep 19 03:58:09 2016 -0400
+
+    bnxt_en: Fixed the VF link status after a link state change
+    
+    The VF link state can be changed via the 'ip link set' cmd.
+    Currently, the new link state does not take effect immediately.
+    
+    The fix is for the PF to send a link change async event to the
+    designated VF after a VF link state change.  This async event will
+    trigger the VF to update the link status.
+    
+    Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ae8e98a6fa7a73917196c507e43414ea96b6a0fc
+Author: Deepak Khungar <deepak.khungar@broadcom.com>
+Date:   Mon Sep 19 03:58:08 2016 -0400
+
+    bnxt_en: Support for "ethtool -r" command
+    
+    Restart autoneg if autoneg is enabled.
+    
+    Signed-off-by: Deepak Khungar <deepak.khungar@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4ffcd582301bd020b1f9d00c55473af305ec19b5
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:07 2016 -0400
+
+    bnxt_en: Pad TX packets below 52 bytes.
+    
+    The hardware has a limitation that it won't pass host to BMC loopback
+    packets below 52-bytes.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 001154eb242b5a6667b74e5cf20873fb75f1b9d3
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:06 2016 -0400
+
+    bnxt_en: Call firmware to approve the random VF MAC address.
+    
+    After generating the random MAC address for VF, call the firmware to
+    approve it.  This step serves 2 purposes.  Some hypervisor (e.g. ESX)
+    wants to approve the MAC address.  2nd, the call will setup the
+    proper forwarding database in the internal switch.
+    
+    We need to unlock the hwrm_cmd_lock mutex before calling bnxt_approve_mac().
+    We can do that because we are at the end of the function and all the
+    previous firmware response data has been copied.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7cc5a20e38fcaf395ac59e7ed6c3decb575a0dc7
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:05 2016 -0400
+
+    bnxt_en: Re-arrange bnxt_hwrm_func_qcaps().
+    
+    Re-arrange the code so that the generation of the random MAC address for
+    the VF is at the end of the function.  The next patch will add one more step
+    to call bnxt_approve_mac() to get the firmware to approve the random MAC
+    address.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 47f8e8b9bbbbe00740786bd1da0d5097d45ba46b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:04 2016 -0400
+
+    bnxt_en: Fix ethtool -l|-L inconsistent channel counts.
+    
+    The existing code is inconsistent in reporting and accepting the combined
+    channel count.  bnxt_get_channels() reports maximum combined as the
+    maximum rx count.  bnxt_set_channels() accepts combined count that
+    cannot be bigger than max rx or max tx.
+    
+    For example, if max rx = 2 and max tx = 1, we report max supported
+    combined to be 2.  But if the user tries to set combined to 2, it will
+    fail because 2 is bigger than max tx which is 1.
+    
+    Fix the code to be consistent.  Max allowed combined = max(max_rx, max_tx).
+    We will accept a combined channel count <= max(max_rx, max_tx).
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5ac67d8bc753b122175e682274599338b3ee7d42
+Author: Rob Swindell <Rob.Swindell@broadcom.com>
+Date:   Mon Sep 19 03:58:03 2016 -0400
+
+    bnxt_en: Added support for Secure Firmware Update
+    
+    Using Ethtool flashdev command, entire NVM package (*.pkg) files
+    may now be staged into the "update" area of the NVM and subsequently
+    verified and installed by the firmware using the newly introduced
+    command: NVM_INSTALL_UPDATE.
+    
+    We also introduce use of the new firmware command FW_SET_TIME so that the
+    NVM-resident package installation log contains valid time-stamps.
+    
+    Signed-off-by: Rob Swindell <Rob.Swindell@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 441cabbbf1bd0b99e283c9116fe430e53ee67a4a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:02 2016 -0400
+
+    bnxt_en: Update to firmware interface spec 1.5.1.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit adbc830545003c4b7494c903654bea22e5a66bb4
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:01 2016 -0400
+
+    bnxt_en: Simplify PCI device names and add additinal PCI IDs.
+    
+    Remove "Single-port/Dual-port" from the device names.  Dual-port devices
+    will appear as 2 separate devices, so no need to call each a dual-port
+    device.  Use a more generic name for VF devices belonging to the same
+    chip fanmily.  Add some remaining NPAR device IDs.
+    
+    Signed-off-by: David Christensen <david.christensen@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8d6be8b627389c6dc7e0ea2455a7542c8a2a16a7
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 19 03:58:00 2016 -0400
+
+    bnxt_en: Use RSS flags defined in the bnxt_hsi.h file.
+    
+    And remove redundant definitions of the same flags.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 9d13744bb75078175ab49408f2abb980e4dbccc9
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Sep 5 01:57:35 2016 -0400
+
+    bnxt_en: Fix TX push operation on ARM64.
+    
+    There is a code path where we are calling __iowrite64_copy() on
+    an address that is not 64-bit aligned.  This causes an exception on
+    some architectures such as arm64.  Fix that code path by using
+    __iowrite32_copy().
+    
+    Reported-by: JD Zheng <jiandong.zheng@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 1f681688aaf1126df981615064a68a0dced458ef
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jul 25 12:33:37 2016 -0400
+
+    bnxt_en: Add new NPAR and dual media device IDs.
+    
+    Add 5741X/5731X NPAR device IDs and dual media SFP/10GBase-T device IDs.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a23049091d57f4bdc47f16fce01c371647d15dd7
+Author: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date:   Mon Jul 25 12:33:36 2016 -0400
+
+    bnxt_en: Log a message, if enabling NTUPLE filtering fails.
+    
+    If there are not enough resources to enable ntuple filtering,
+    log a warning message.
+    
+    v2: Use single message and add missing newline.
+    
+    Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a54c4d74989b769014b359e5b66f3e571d903d25
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jul 25 12:33:35 2016 -0400
+
+    bnxt_en: Improve ntuple filters by checking destination MAC address.
+    
+    Include the destination MAC address in the ntuple filter structure.  The
+    current code assumes that the destination MAC address is always the MAC
+    address of the NIC.  This may not be true if there are macvlans, for
+    example.  Add destination MAC address checking and configure the filter
+    correctly using the correct index for the destination MAC address.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit cbce91cad4ee39070bf3c7873767194e4be88e16
+Author: Florian Fainelli <f.fainelli@gmail.com>
+Date:   Mon Jul 18 13:02:47 2016 -0700
+
+    bnxt_en: Remove locking around txr->dev_state
+    
+    txr->dev_state was not consistently manipulated with the acquisition of
+    the per-queue lock, after further inspection the lock does not seem
+    necessary, either the value is read as BNXT_DEV_STATE_CLOSING or 0.
+    
+    Reported-by: coverity (CID 1339583)
+    Fixes: c0c050c58d840 ("bnxt_en: New Broadcom ethernet driver.")
+    Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fa853dda19a1878d2a586de19f02bc9fed052425
+Author: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+Date:   Mon Jul 18 07:15:25 2016 -0400
+
+    bnxt_en: Add BCM58700 PCI device ID for NS2 Nitro.
+    
+    A bridge device in NS2 has the same device ID as the ethernet controller.
+    Add check to avoid probing the bridge device.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit dc52c6c70e0066e9cef886907f820411bebe8e07
+Author: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+Date:   Mon Jul 18 07:15:24 2016 -0400
+
+    bnxt_en: Workaround Nitro A0 RX hardware bug (part 4).
+    
+    Allocate special vnic for dropping packets not matching the RX filters.
+    First vnic is for normal RX packets and the driver will drop all
+    packets on the 2nd vnic.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 10bbdaf5e4879fd7fc51f25c84d7b10de16cbe0e
+Author: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+Date:   Mon Jul 18 07:15:23 2016 -0400
+
+    bnxt_en: Workaround Nitro A0 hardware RX bug (part 3).
+    
+    Allocate napi for special vnic, packets arriving on this
+    napi will simply be dropped and the buffers will be replenished back
+    to the HW.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 765951938e2fe2e30571ef4a7de6a46659ce4c68
+Author: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+Date:   Mon Jul 18 07:15:22 2016 -0400
+
+    bnxt_en: Workaround Nitro A0 hardware RX bug (part 2).
+    
+    The hardware is unable to drop rx packets not matching the RX filters.  To
+    workaround it, we create a special VNIC and configure the hardware to
+    direct all packets not matching the filters to it.  We then setup the
+    driver to drop packets received on this VNIC.
+    
+    This patch creates the infrastructure for this VNIC, reserves a
+    completion ring, and rx rings.  Only shared completion ring mode is
+    supported.  The next 2 patches add a NAPI to handle packets from this
+    VNIC and the setup of the VNIC.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 94ce9caa0f75b0d56e69550e84d7a1653f0ef3b0
+Author: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+Date:   Mon Jul 18 07:15:21 2016 -0400
+
+    bnxt_en: Workaround Nitro A0 hardware RX bug (part 1).
+    
+    Nitro A0 has a hardware bug in the rx path.  The workaround is to create
+    a special COS context as a path for non-RSS (non-IP) packets.  Without this
+    workaround, the chip may stall when receiving RSS and non-RSS packets.
+    
+    Add infrastructure to allow 2 contexts (RSS and CoS) per VNIC.  Allocate
+    and configure the CoS context for Nitro A0.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3e8060fa837630f6fb4acbf59ba588c6df5b2f50
+Author: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+Date:   Mon Jul 18 07:15:20 2016 -0400
+
+    bnxt_en: Add basic support for Nitro in North Star 2.
+    
+    Nitro is the embedded version of the ethernet controller in the North
+    Star 2 SoC.  Add basic code to recognize the chip ID and disable
+    the features (ntuple, TPA, ring and port statistics) not supported on
+    Nitro A0.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f3ea3119ad75dde0ba3e8da4653dbd5a189688e5
+Author: Colin Ian King <colin.king@canonical.com>
+Date:   Fri Jul 8 16:42:48 2016 +0100
+
+    bnxt_en: initialize rc to zero to avoid returning garbage
+    
+    rc is not initialized so it can contain garbage if it is not
+    set by the call to bnxt_read_sfp_module_eeprom_info. Ensure
+    garbage is not returned by initializing rc to 0.
+    
+    Signed-off-by: Colin Ian King <colin.king@canonical.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 09a7636a5b151670072de60767ddf096dc7bd12e
+Author: Dan Carpenter <dan.carpenter@oracle.com>
+Date:   Thu Jul 7 11:23:09 2016 +0300
+
+    bnxt: fix a condition
+    
+    This code generates as static checker warning because htons(ETH_P_IPV6)
+    is always true.  From the context it looks like the && was intended to
+    be !=.
+    
+    Fixes: 94758f8de037 ('bnxt_en: Add GRO logic for BCM5731X chips.')
+    Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 51f307856b60e6b10975654e15bc236aa87b53d7
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:29 2016 -0400
+
+    bnxt_en: Allow statistics DMA to be configurable using ethtool -C.
+    
+    The allowable range is 0.25 seconds to 1 second interval.  Default is
+    1 second.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 87027db19c30aafb8ff8d98e1c8802bc920f7b32
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:28 2016 -0400
+
+    bnxt_en: Assign netdev->dev_port with port ID.
+    
+    This is useful for multi-function devices.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 17c71ac38134c3369479e34911b2035a85566caf
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:27 2016 -0400
+
+    bnxt_en: Allow promiscuous mode for VF if default VLAN is enabled.
+    
+    With a default VLAN, the VF has its own VLAN domain and it can receive
+    all traffic within that domain.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit dc7aadb5133846f738c59da7af3261335af35ad3
+Author: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date:   Fri Jul 1 18:46:26 2016 -0400
+
+    bnxt_en: Increase maximum supported MTU to 9500.
+    
+    Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 550feebf5cb075f7576b3cfe9bcf05abc1ffb8cd
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:25 2016 -0400
+
+    bnxt_en: Enable MRU enables bit when configuring VNIC MRU.
+    
+    For correctness, the MRU enables bit must be set when passing the
+    MRU to firmware during vnic configuration.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 93e0b4feb90cc651f7fbdfe07c257a969c51d1bb
+Author: Rob Swindell <rob.swindell@broadcom.com>
+Date:   Fri Jul 1 18:46:24 2016 -0400
+
+    bnxt_en: Add support for firmware updates for additional processors.
+    
+    Add support to the Ethtool FLASHDEV command handler for additional
+    firmware types to cover all the on-chip processors.
+    
+    Signed-off-by: Rob Swindell <rob.swindell@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 08141e0bf4f6cb82d51930e34e6a8e4af46c776f
+Author: Rob Swindell <rob.swindell@broadcom.com>
+Date:   Fri Jul 1 18:46:23 2016 -0400
+
+    bnxt_en: Request firmware reset after successful firwmare update
+    
+    Upon successful mgmt processor firmware update, request a self
+    reset upon next PCIe reset (e.g. system reboot).
+    
+    Signed-off-by: Rob Swindell <rob.swindell@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a4c363471f2fa2b0f0abbd9f0563b034340585c3
+Author: Rob Swindell <rob.swindell@broadcom.com>
+Date:   Fri Jul 1 18:46:22 2016 -0400
+
+    bnxt_en: Add support for updating flash more securely
+    
+    To support Secure Firmware Update, we must be able to allocate
+    a staging area in the Flash.  This patch adds support for the
+    "update" type to tell firmware to do that.
+    
+    Signed-off-by: Rob Swindell <rob.swindell@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2a5bedfa674cf81d60a20a76f456778834bd2123
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:21 2016 -0400
+
+    bnxt_en: Do function reset on the 1st PF open only.
+    
+    Calling the firmware to do function reset on the PF will kill all the VFs.
+    To prevent that, we call function reset on the 1st PF open before any VF
+    can be activated.  On subsequent PF opens (with possibly some active VFs),
+    a bit has been set and we'll skip the function reset.  VF driver will
+    always do function reset on every open.  If there is an AER event, we will
+    always do function reset.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadocm.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a58a3e68037647de78e3461194239a1104f76003
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:20 2016 -0400
+
+    bnxt_en: Update firmware spec. to 1.3.0.
+    
+    And update driver version to 1.3.0.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 75362a3fd4e37ff8af1ef5e3d9f2d9d5ccf2f3ab
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Jul 1 18:46:19 2016 -0400
+
+    bnxt_en: VF/NPAR should return -EOPNOTSUPP for unsupported ethtool ops.
+    
+    Returning 0 for doing nothing is confusing to the user.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7cdd5fc376a51cdf191895c23badd699eddbc901
+Author: Alexander Duyck <aduyck@mirantis.com>
+Date:   Thu Jun 16 12:21:36 2016 -0700
+
+    bnxt: Move GENEVE support from hard-coded port to using port notifier
+    
+    The port number for GENEVE is hard coded into the bnxt driver.  This is the
+    kind of thing we want to avoid going forward.  For now I will integrate
+    this back into the port notifier so that we can change the GENEVE port
+    number if we need to in the future.
+    
+    Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ad51b8e9f9f4f8172eb7a6219d3005861bfb9a57
+Author: Alexander Duyck <aduyck@mirantis.com>
+Date:   Thu Jun 16 12:21:19 2016 -0700
+
+    bnxt: Update drivers to support unified UDP encapsulation offload functions
+    
+    This patch ends up doing several things.  First it updates the driver to
+    make use of the new unified UDP tunnel offload notifier functions.  In
+    addition I updated the code so that we can work around the bits that were
+    checking for if VXLAN was enabled since we are now using a notifier based
+    setup.
+    
+    Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+    Acked-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 00c04a928572991d30b2473a7e992c1be8e646f3
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:38 2016 -0400
+
+    bnxt_en: Support new ETHTOOL_{G|S}LINKSETTINGS API.
+    
+    To fully support 25G and 50G link settings.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 93ed8117336485af2cedb069d28f3d4270fb90a1
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:37 2016 -0400
+
+    bnxt_en: Don't allow autoneg on cards that don't support it.
+    
+    Some cards do not support autoneg.  The current code does not prevent the
+    user from enabling autoneg with ethtool on such cards, causing confusion.
+    Firmware provides the autoneg capability information and we just need to
+    store it in the support_auto_speeds field in bnxt_link_info struct.
+    The ethtool set_settings() call will check this field before proceeding
+    with autoneg.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b24eb6ae7058ca1a42b0532489e5f5796c107d65
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:36 2016 -0400
+
+    bnxt_en: Add BCM5731X and BCM5741X device IDs.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 94758f8de037cf5c62eb56287f5d5e937cda8c9b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:35 2016 -0400
+
+    bnxt_en: Add GRO logic for BCM5731X chips.
+    
+    Add bnxt_gro_func_5731x() to handle GRO packets for this chip.  The
+    completion structures used in the new chip have new data to help determine
+    the header offsets.  The offsets can be off by 4 if the packet is an
+    internal loopback packet (e.g. from one VF to another VF).  Some additional
+    logic is added to adjust the offsets if it is a loopback packet.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 309369c9b3f6a8665e581d9014f222b602f6845a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:34 2016 -0400
+
+    bnxt_en: Refactor bnxt_gro_skb().
+    
+    Newer chips require different logic to handle GRO packets.  So refactor
+    the code so that we can call different functions depending on the chip.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 659c805cc01b3c5a6d972db0408164371a2bab4b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:33 2016 -0400
+
+    bnxt_en: Define the supported chip numbers.
+    
+    Define all the supported chip numbers and chip categories.  Store the
+    chip_num returned by firmware.  If the call to get the version and chip
+    number fails, we should abort.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ebcd4eeb2a0b4859d7aaa3308b222a30d51a643f
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:32 2016 -0400
+
+    bnxt_en: Add PCI device ID for 57404 NPAR devices.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 567b2abe68551781b725b3b739672da41cb92ef0
+Author: Satish Baddipadige <sbaddipa@broadcom.com>
+Date:   Mon Jun 13 02:25:31 2016 -0400
+
+    bnxt_en: Enable NPAR (NIC Partitioning) Support.
+    
+    NPAR type is read from bnxt_hwrm_func_qcfg.  Do not allow changing link
+    parameters if in NPAR mode sinc ethe port is shared among multiple
+    partitions.  The link parameters are set up by firmware.
+    
+    Signed-off-by: Satish Baddipadige <sbaddipa@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fc0f19294d1ffaf9366b10d966f86e6cf13335a4
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:30 2016 -0400
+
+    bnxt_en: Handle VF_CFG_CHANGE event from firmware.
+    
+    When the VF driver gets this event, the VF configuration has changed (such
+    as default VLAN).  The VF driver will initiate a silent reset to pick up
+    the new configuration.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 6988bd920c6ea53497ed15db947408b7488c9e36
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:29 2016 -0400
+
+    bnxt_en: Add new function bnxt_reset().
+    
+    When a default VLAN is added to the VF, the VF driver needs to reset to
+    pick up the default VLAN ID.  We can use the same tx timeout reset logic
+    to do that, without the debug output.  This new function, with the
+    silent parameter to suppress debug output will now serve both purposes.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit cf6645f8ebc69775a857b7c51928f3ad9e37aa66
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 13 02:25:28 2016 -0400
+
+    bnxt_en: Add function for VF driver to query default VLAN.
+    
+    The PF can setup a default VLAN for a VF.  The default VLAN tag is
+    automatically inserted and stripped without the knowledge of the
+    stack running on the VF.  The VF driver needs to know that default
+    VLAN is enabled as VLAN acceleration on the RX side is no longer
+    supported.  Call netdev_update_features() to fix up the VLAN features
+    as necessary.  Also, VLAN strip mode must be enabled to strip out
+    the default VLAN tag.
+    
+    Only allow VF default VLAN to be set if the firmware spec is >= 1.2.1.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8852ddb4dcdfe6f877a02f79bf2bca9ae63c039a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 6 02:37:16 2016 -0400
+
+    bnxt_en: Simplify VLAN receive logic.
+    
+    Since both CTAG and STAG rx acceleration must be enabled together, we
+    only need to check one feature flag (NETIF_F_HW_VLAN_CTAG_RX) before
+    calling __vlan_hwaccel_put_tag().
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5a9f6b238e59bc05afb4cdeaf3672990bf2a5309
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 6 02:37:15 2016 -0400
+
+    bnxt_en: Enable and disable RX CTAG and RX STAG VLAN acceleration together.
+    
+    The hardware can only be set to strip or not strip both the VLAN CTAG and
+    STAG.  It cannot strip one and not strip the other.  Add logic to
+    bnxt_fix_features() to toggle both feature flags when the user is toggling
+    one of them.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b9a8460a08a1e0150073cda3e7a0dd23cb888052
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Jun 6 02:37:14 2016 -0400
+
+    bnxt_en: Fix tx push race condition.
+    
+    Set the is_push flag in the software BD before the tx data is pushed to
+    the chip.  It is possible to get the tx interrupt as soon as the tx data
+    is pushed.  The tx handler will not handle the event properly if the
+    is_push flag is not set and it will crash.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadocm.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7e13318daa4a67bff2f800923a993ef3818b3c53
+Author: Tom Herbert <tom@herbertland.com>
+Date:   Wed May 18 09:06:10 2016 -0700
+
+    net: define gso types for IPx over IPv4 and IPv6
+    
+    This patch defines two new GSO definitions SKB_GSO_IPXIP4 and
+    SKB_GSO_IPXIP6 along with corresponding NETIF_F_GSO_IPXIP4 and
+    NETIF_F_GSO_IPXIP6. These are used to described IP in IP
+    tunnel and what the outer protocol is. The inner protocol
+    can be deduced from other GSO types (e.g. SKB_GSO_TCPV4 and
+    SKB_GSO_TCPV6). The GSO types of SKB_GSO_IPIP and SKB_GSO_SIT
+    are removed (these are both instances of SKB_GSO_IPXIP4).
+    SKB_GSO_IPXIP6 will be used when support for GSO with IP
+    encapsulation over IPv6 is added.
+    
+    Signed-off-by: Tom Herbert <tom@herbertland.com>
+    Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b67daab033293b3882ba4dc926ffb084d70044e0
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun May 15 03:04:51 2016 -0400
+
+    bnxt_en: Use dma_rmb() instead of rmb().
+    
+    Use the weaker but more appropriate dma_rmb() to order the reading of
+    the completion ring.
+    
+    Suggested-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 5049e33b559a44e9f216d86c58c7c7fce6f5df2f
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun May 15 03:04:50 2016 -0400
+
+    bnxt_en: Add BCM57314 device ID.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 10289bec0072b13f629a654d94faf1dadd44f335
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun May 15 03:04:49 2016 -0400
+
+    bnxt_en: Simplify and improve unsupported SFP+ module reporting.
+    
+    The current code is more complicated than necessary and can only report
+    unsupported SFP+ module if it is plugged in after the device is up.
+    
+    Rename bnxt_port_module_event() to bnxt_get_port_module_status().  We
+    already have the current module_status in the link_info structure, so
+    just check that and report any unsupported SFP+ module status.  Delete
+    the unnecessary last_port_module_event.  Call this function at the
+    end of bnxt_open to report unsupported module already plugged in.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8578d6c19a308dea3daf3d03acdf18724ec05590
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun May 15 03:04:48 2016 -0400
+
+    bnxt_en: Fix length value in dmesg log firmware error message.
+    
+    The len value in the hwrm error message is wrong.  Use the properly adjusted
+    value in the variable len.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a11fa2be6d1564375dc57530680268ad569c2632
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun May 15 03:04:47 2016 -0400
+
+    bnxt_en: Improve the delay logic for firmware response.
+    
+    The current code has 2 problems:
+    
+    1. The maximum wait time is not long enough.  It is about 60% of the
+    duration specified by the firmware.  It is calling usleep_range(600, 800)
+    for every 1 msec we are supposed to wait.
+    
+    2. The granularity of the delay is too coarse.  Many simple firmware
+    commands finish in 25 usec or less.
+    
+    We fix these 2 issues by multiplying the original 1 msec loop counter by
+    40 and calling usleep_range(25, 40) for each iteration.
+    
+    There is also a second delay loop to wait for the last DMA word to
+    complete.  This delay loop should be a very short 5 usec wait.
+    
+    This change results in much faster bring-up/down time:
+    
+    Before the patch:
+    
+    time ip link set p4p1 up
+    
+    real    0m0.120s
+    user    0m0.001s
+    sys     0m0.009s
+    
+    After the patch:
+    
+    time ip link set p4p1 up
+    
+    real    0m0.030s
+    user    0m0.000s
+    sys     0m0.010s
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d0a42d6fc8eaf1b64f62b0bbc3b829b756eacf57
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Sun May 15 03:04:46 2016 -0400
+
+    bnxt_en: Reduce maximum ring pages if page size is 64K.
+    
+    The chip supports 4K/8K/64K page sizes for the rings and we try to
+    match it to the CPU PAGE_SIZE.  The current page size limits for the rings
+    are based on 4K/8K page size. If the page size is 64K, these limits are
+    too large.  Reduce them appropriately.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 90c4f788f6c08aaa52edbb47a817403376523375
+Author: Ajit Khaparde <ajit.khaparde@broadcom.com>
+Date:   Sun May 15 03:04:45 2016 -0400
+
+    bnxt_en: Report PCIe link speed and width during driver load
+    
+    Add code to log a message during driver load indicating PCIe link
+    speed and width.
+    
+    The log message will look like this:
+    bnxt_en 0000:86:00.0 eth0: PCIe: Speed 8.0GT/s Width x8
+    
+    Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 42ee18fe4ca2a12b8370bb1c53fa6b9f9300c70c
+Author: Ajit Khaparde <ajit.khaparde@broadcom.com>
+Date:   Sun May 15 03:04:44 2016 -0400
+
+    bnxt_en: Add Support for ETHTOOL_GMODULEINFO and ETHTOOL_GMODULEEEPRO
+    
+    Add support to fetch the SFP EEPROM settings from the firmware
+    and display it via the ethtool -m command.  We support SFP+ and QSFP
+    modules.
+    
+    v2: Fixed a bug in bnxt_get_module_eeprom() found by Ben Hutchings.
+    
+    Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 18d6e4e2d800cbd44a7d7d215a49f99c6508e4a5
+Author: Satish Baddipadige <sbaddipa@broadcom.com>
+Date:   Sun May 15 03:04:43 2016 -0400
+
+    bnxt_en: Fix invalid max channel parameter in ethtool -l.
+    
+    When there is only 1 MSI-X vector or in INTA mode, tx and rx pre-set
+    max channel parameters are shown incorrectly in ethtool -l.  With only 1
+    vector, bnxt_get_max_rings() will return -ENOMEM.  bnxt_get_channels
+    should check this return value, and set max_rx/max_tx to 0 if it is
+    non-zero.
+    
+    Signed-off-by: Satish Baddipadige <sbaddipa@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fa7e28127a5ad9fd55ac9c7707d8c8b835113a7c
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue May 10 19:18:00 2016 -0400
+
+    bnxt_en: Add workaround to detect bad opaque in rx completion (part 2)
+    
+    Add detection and recovery code when the hardware returned opaque value
+    does not match the expected consumer index.  Once the issue is detected,
+    we skip the processing of all RX and LRO/GRO packets.  These completion
+    entries are discarded without sending the SKB to the stack and without
+    producing new buffers.  The function will be reset from a workqueue.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 376a5b8647d6c56cb8f104d7ad0390b4f4057e70
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue May 10 19:17:59 2016 -0400
+
+    bnxt_en: Add workaround to detect bad opaque in rx completion (part 1)
+    
+    There is a rare hardware bug that can cause a bad opaque value in the RX
+    or TPA completion.  When this happens, the hardware may have used the
+    same buffer twice for 2 rx packets.  In addition, the driver will also
+    crash later using the bad opaque as the index into the ring.
+    
+    The rx opaque value is predictable and is always monotonically increasing.
+    The workaround is to keep track of the expected next opaque value and
+    compare it with the one returned by hardware during RX and TPA start
+    completions.  If they miscompare, we will not process any more RX and
+    TPA completions and exit NAPI.  We will then schedule a workqueue to
+    reset the function.
+    
+    This patch adds the logic to keep track of the next rx consumer index.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 7d2837dd7a3239e8201d9bef75c1a708e451e123
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed May 4 16:56:44 2016 -0400
+
+    bnxt_en: Setup multicast properly after resetting device.
+    
+    The multicast/all-multicast internal flags are not properly restored
+    after device reset.  This could lead to unreliable multicast operations
+    after an ethtool configuration change for example.
+    
+    Call bnxt_mc_list_updated() and setup the vnic->mask in bnxt_init_chip()
+    to fix the issue.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 67a95e2022c7f0405408fb1f910283785ece354a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Wed May 4 16:56:43 2016 -0400
+
+    bnxt_en: Need memory barrier when processing the completion ring.
+    
+    The code determines if the next ring entry is valid before proceeding
+    further to read the rest of the entry.  The CPU can re-order and read
+    the rest of the entry first, possibly reading a stale entry, if DMA
+    of a new entry happens right after reading it.  This issue can be
+    readily seen on a ppc64 system, causing it to crash.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 152971ee75fddbc43fb6cf7e3ada96c1324df2af
+Author: Alexander Duyck <aduyck@mirantis.com>
+Date:   Mon May 2 09:38:55 2016 -0700
+
+    bnxt: Add support for segmentation of tunnels with outer checksums
+    
+    This patch assumes that the bnxt hardware will ignore existing IPv4/v6
+    header fields for length and checksum as well as the length and checksum
+    fields for outer UDP and GRE headers.
+    
+    I have been told by Michael Chan that this is working.  Though this might
+    be somewhat redundant for IPv6 as they are forcing the checksum to be
+    computed for all IPv6 frames that are offloaded.  A follow-up patch may be
+    necessary in order to fix this as it is essentially mangling the outer IPv6
+    headers to add a checksum where none was requested.
+    
+    Signed-off-by: Alexander Duyck <aduyck@mirantis.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 89d0a06c516339c0a2b3d02677f5d6310b3319fb
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 25 02:30:51 2016 -0400
+
+    bnxt_en: Divide a page into 32K buffers for the aggregation ring if necessary.
+    
+    If PAGE_SIZE is bigger than BNXT_RX_PAGE_SIZE, that means the native CPU
+    page is bigger than the maximum length of the RX BD.  Divide the page
+    into multiple 32K buffers for the aggregation ring.
+    
+    Add an offset field in the bnxt_sw_rx_agg_bd struct to keep track of the
+    page offset of each buffer.  Since each page can be referenced by multiple
+    buffer entries, call get_page() as needed to get the proper reference
+    count.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2839f28bd5bf8fd2ab4a1ea3a5589c8f94364cbb
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 25 02:30:50 2016 -0400
+
+    bnxt_en: Limit RX BD pages to be no bigger than 32K.
+    
+    The RX BD length field of this device is 16-bit, so the largest buffer
+    size is 65535.  For LRO and GRO, we allocate native CPU pages for the
+    aggregation ring buffers.  It won't work if the native CPU page size is
+    64K or bigger.
+    
+    We fix this by defining BNXT_RX_PAGE_SIZE to be native CPU page size
+    up to 32K.  Replace PAGE_SIZE with BNXT_RX_PAGE_SIZE in all appropriate
+    places related to the rx aggregation ring logic.
+    
+    The next patch will add additional logic to divide the page into 32K
+    chunks for aggrgation ring buffers if PAGE_SIZE is bigger than
+    BNXT_RX_PAGE_SIZE.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 1fa72e29e14d97fbda15437c648d7cc4eb00bff8
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 25 02:30:49 2016 -0400
+
+    bnxt_en: Don't fallback to INTA on VF.
+    
+    Only MSI-X can be used on a VF.  The driver should fail initialization
+    if it cannot successfully enable MSI-X.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8cbde1175e3c8565edbb777cd09cbfdb93c78397
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 11 04:11:14 2016 -0400
+
+    bnxt_en: Add async event handling for speed config changes.
+    
+    On some dual port cards, link speeds on both ports have to be compatible.
+    Firmware will inform the driver when a certain speed is no longer
+    supported if the other port has linked up at a certain speed.  Add
+    logic to handle this event by logging a message and getting the
+    updated list of supported speeds.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 84c33dd342ad596a271a61da0119bf34e80bb1c5
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 11 04:11:13 2016 -0400
+
+    bnxt_en: Call firmware to approve VF MAC address change.
+    
+    Some hypervisors (e.g. ESX) require the VF MAC address to be forwarded to
+    the PF for approval.  In Linux PF, the call is not forwarded and the
+    firmware will simply check and approve the MAC address if the PF has not
+    previously administered a valid MAC address for this VF.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 33f7d55f07ab964055d73d38774346f8d4821f00
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 11 04:11:12 2016 -0400
+
+    bnxt_en: Shutdown link when device is closed.
+    
+    Let firmware know that the driver is giving up control of the link so that
+    it can be shutdown if no management firmware is running.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 03efbec03198a0f505c2a6c93268c3c5df321c90
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Apr 11 04:11:11 2016 -0400
+
+    bnxt_en: Disallow forced speed for 10GBaseT devices.
+    
+    10GBaseT devices must autonegotiate to determine master/slave clocking.
+    Disallow forced speed in ethtool .set_settings() for these devices.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 29c262fed4067c52977ba279cf71520f9991a050
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:09:03 2016 -0400
+
+    bnxt_en: Improve ethtool .get_settings().
+    
+    If autoneg is off, we should always report the speed and duplex settings
+    even if it is link down so the user knows the current settings.  The
+    unknown speed and duplex should only be used for autoneg when link is
+    down.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 9d9cee08fc9f5c4df84ef314158fd19c013bcec6
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:09:02 2016 -0400
+
+    bnxt_en: Check for valid forced speed during ethtool -s.
+    
+    Check that the forced speed is a valid speed supported by firmware.
+    If not supported, return -EINVAL.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4bb13abf208cb484a9b9d1af9233b0ef850c2fe7
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:09:01 2016 -0400
+
+    bnxt_en: Add unsupported SFP+ module warnings.
+    
+    Add the PORT_CONN_NOT_ALLOWED async event handling logic.  The driver
+    will print an appropriate warning to reflect the SFP+ module enforcement
+    policy done in the firmware.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 25be862370031056989ee76e3c48c3ac8ff67fd4
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:09:00 2016 -0400
+
+    bnxt_en: Set async event bits when registering with the firmware.
+    
+    Currently, the driver only sets bit 0 of the async_event_fwd fields.
+    To be compatible with the latest spec, we need to set the
+    appropriate event bits handled by the driver.  We should be handling
+    link change and PF driver unload events, so these 2 bits should be
+    set.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 72b34f04e0b00956dd679ae18bf2163669df8b56
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:08:59 2016 -0400
+
+    bnxt_en: Add get_eee() and set_eee() ethtool support.
+    
+    Allow users to get|set EEE parameters.
+    
+    v2: Added comment for preserving the tx_lpi_timer value in get_eee.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 939f7f0ca442187db2a4ec7a40979c711b0c939e
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:08:58 2016 -0400
+
+    bnxt_en: Add EEE setup code.
+    
+    1. Add bnxt_hwrm_set_eee() function to setup EEE firmware parameters based
+    on the bp->eee settings.
+    2. The new function bnxt_eee_config_ok() will check if EEE parameters need
+    to be modified due to autoneg changes.
+    3. bnxt_hwrm_set_link() has added a new parameter to update EEE.  If the
+    parameter is set, it will call bnxt_hwrm_set_eee().
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 170ce01301a2a1a87808765531d938fa0b023641
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:08:57 2016 -0400
+
+    bnxt_en: Add basic EEE support.
+    
+    Get EEE capability and the initial EEE settings from firmware.
+    Add "EEE is active | not active" to link up dmesg.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c9ee9516c161da2d072e035907aa35a35dfa68a8
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:08:56 2016 -0400
+
+    bnxt_en: Improve flow control autoneg with Firmware 1.2.1 interface.
+    
+    Make use of the new AUTONEG_PAUSE bit in the new interface to better
+    control autoneg flow control settings, independent of RX and TX
+    advertisement settings.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 11f15ed394782dd018d60a0bb550616a8571b43c
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Tue Apr 5 14:08:55 2016 -0400
+
+    bnxt_en: Update to Firmware 1.2.2 spec.
+    
+    Use new field names in API structs and stop using deprecated fields
+    auto_link_speed and auto_duplex in phy_cfg/phy_qcfg structs.
+    
+    Update copyright year to 2016.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3c02d1bb32347d0674714ee170772d771d513469
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 28 19:46:07 2016 -0400
+
+    bnxt_en: Fix ethtool -a reporting.
+    
+    To report flow control tx/rx settings accurately regardless of autoneg
+    setting, we should use link_info->req_flow_ctrl.  Before this patch,
+    the reported settings were only correct when autoneg was on.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 49b5c7a125201bb42c25831fda3a50305c29ef50
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 28 19:46:06 2016 -0400
+
+    bnxt_en: Fix typo in bnxt_hwrm_set_pause_common().
+    
+    The typo caused the wrong flow control bit to be set.
+    
+    Reported by: Ajit Khaparde <ajit.khaparde@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit e6ef26991a46e20879bebb8298080eb7ceed4ae8
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 28 19:46:05 2016 -0400
+
+    bnxt_en: Implement proper firmware message padding.
+    
+    The size of every padded firmware message is specified in the first
+    HWRM_VER_GET response message.  Use this value to pad every message
+    after that.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 33e52d888d0c84a0c66f13357a53113fd9710bd6
+Author: Prashant Sreedharan <prashant@broadcom.com>
+Date:   Mon Mar 28 19:46:04 2016 -0400
+
+    bnxt_en: Initialize CP doorbell value before ring allocation
+    
+    The existing code does the following:
+        allocate completion ring
+        initialize completion ring doorbell
+        disable interrupts on this completion ring by writing to the doorbell
+    
+    We can have a race where firmware sends an asynchronous event to the host
+    after completion ring allocation and before doorbell is initialized.
+    When this happens driver can crash while ringing the doorbell using
+    uninitialized value as part of handling the IRQ/napi request.
+    
+    Signed-off-by: Prashant Sreedharan <prashant.sreedharan@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 6316ea6db93d875df333e7ab205bf1aa3b3616d7
+Author: Satish Baddipadige <sbaddipa@broadcom.com>
+Date:   Mon Mar 7 15:38:48 2016 -0500
+
+    bnxt_en: Enable AER support.
+    
+    Add pci_error_handler callbacks to support for pcie advanced error
+    recovery.
+    
+    Signed-off-by: Satish Baddipadige <sbaddipa@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8ddc9aaa725a9337fc7bbe95fe1d1499769fb9b2
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:47 2016 -0500
+
+    bnxt_en: Include hardware port statistics in ethtool -S.
+    
+    Include the more useful port statistics in ethtool -S for the PF device.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 9947f83fb79ca501f5ab24c370211bfb78b6b364
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:46 2016 -0500
+
+    bnxt_en: Include some hardware port statistics in ndo_get_stats64().
+    
+    Include some of the port error counters (e.g. crc) in ->ndo_get_stats64()
+    for the PF device.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3bdf56c47dfcd819ab1e73644c2eb9c72c08f29e
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:45 2016 -0500
+
+    bnxt_en: Add port statistics support.
+    
+    Gather periodic port statistics if the device is PF and link is up.  This
+    is triggered in bnxt_timer() every one second to request firmware to DMA
+    the counters.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadocm.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit f1a082a6f79fd5f06b27ef05a5ba7ec8d6e83b4c
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:44 2016 -0500
+
+    bnxt_en: Extend autoneg to all speeds.
+    
+    Allow all autoneg speeds aupported by firmware to be advertised.  If
+    the advertising parameter is 0, then all supported speeds will be
+    advertised.
+    
+    Remove BNXT_ALL_COPPER_ETHTOOL_SPEED which is no longer used as all
+    supported speeds can be advertised.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4b32cacca28fe8b29bf266feff19b6fc2180402e
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:43 2016 -0500
+
+    bnxt_en: Use common function to get ethtool supported flags.
+    
+    The supported bits and advertising bits in ethtool have the same
+    definitions.  The same is true for the firmware bits.  So use the
+    common function to handle the conversion for both supported and
+    advertising bits.
+    
+    v2: Don't use parentheses on function return.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3277360eb29c6e482391975717d983060ecbd28d
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:42 2016 -0500
+
+    bnxt_en: Add reporting of link partner advertisement.
+    
+    And report actual pause settings to ETHTOOL_GPAUSEPARAM to let ethtool
+    resolve the actual pause settings.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 27c4d578600c401c119c012a90920805fab05cc9
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Mar 7 15:38:41 2016 -0500
+
+    bnxt_en: Refactor bnxt_fw_to_ethtool_advertised_spds().
+    
+    Include the conversion of pause bits and add one extra call layer so
+    that the same refactored function can be reused to get the link partner
+    advertisement bits.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 90e209213096110bce06ef580e1c73702fe4a288
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:08 2016 -0500
+
+    bnxt_en: Add hwrm_send_message_silent().
+    
+    This is used to send NVM_FIND_DIR_ENTRY messages which can return error
+    if the entry is not found.  This is normal and the error message will
+    cause unnecessary alarm, so silence it.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fbfbc4851dd709cf1327afc283f9cca00235dcb3
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:07 2016 -0500
+
+    bnxt_en: Refactor _hwrm_send_message().
+    
+    Add a new function bnxt_do_send_msg() to do essentially the same thing
+    with an additional paramter to silence error response messages.  All
+    current callers will set silent to false.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3ebf6f0a09a284adef62111c7cfca29f56d6cce7
+Author: Rob Swindell <swindell@broadcom.com>
+Date:   Fri Feb 26 04:00:06 2016 -0500
+
+    bnxt_en: Add installed-package firmware version reporting via Ethtool GDRVINFO
+    
+    For everything to fit, we remove the PHY microcode version and replace it
+    with the firmware package version in the fw_version string.
+    
+    Signed-off-by: Rob Swindell <swindell@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit a8643e1604c1f39a675c6b10a7f84260fa13590c
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:05 2016 -0500
+
+    bnxt_en: Fix dmesg log firmware error messages.
+    
+    Use appropriate firmware request header structure to prepare the
+    firmware messages.  This avoids the unnecessary conversion of the
+    fields to 32-bit fields.  Add appropriate endian conversion when
+    printing out the message fields in dmesg so that they appear correct
+    in the log.
+    
+    Reported-by: Rob Swindell <swindell@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ff4fe81d2d49e3cad3bb45c8c5b9a49ca90ee10b
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:04 2016 -0500
+
+    bnxt_en: Use firmware provided message timeout value.
+    
+    Before this patch, we used a hardcoded value of 500 msec as the default
+    value for firmware message response timeout.  For better portability with
+    future hardware or debug platforms, use the value provided by firmware in
+    the first response and store it for all susequent messages.  Redefine the
+    macro HWRM_CMD_TIMEOUT to the stored value.  Since we don't have the
+    value yet in the first message, use the 500 ms default if the stored value
+    is zero.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit dfc9c94a83909f4be80e5d0c67e79793830aa312
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:03 2016 -0500
+
+    bnxt_en: Add coalescing support for tx rings.
+    
+    When tx and rx rings don't share the same completion ring, tx coalescing
+    parameters can be set differently from the rx coalescing parameters.
+    Otherwise, use rx coalescing parameters on shared completion rings.
+    
+    Adjust rx coalescing default values to lower interrupt rate.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit bb053f52a54d66a6057c2220458349f7d39ce0d2
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:02 2016 -0500
+
+    bnxt_en: Refactor bnxt_hwrm_set_coal().
+    
+    Add a function to set all the coalescing parameters.  The function can
+    be used later to set both rx and tx coalescing parameters.
+    
+    v2: Fixed function parameters formatting requested by DaveM.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit dfb5b894f87cb78168e04283e8d15626dc3e6d5a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Fri Feb 26 04:00:01 2016 -0500
+
+    bnxt_en: Store irq coalescing timer values in micro seconds.
+    
+    Don't convert these to internal hardware tick values before storing
+    them.  This avoids the confusion of ethtool -c returning slightly
+    different values than the ones set using ethtool -C when we convert
+    hardware tick values back to micro seconds.  Add better comments for
+    the hardware settings.
+    
+    Also, rename the current set of coalescing fields with rx_ prefix.
+    The next patch will add support of tx coalescing values.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 19241368443ff976b1924019d29eef8e972158e7
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Fri Feb 26 04:00:00 2016 -0500
+
+    bnxt_en: Send PF driver unload notification to all VFs.
+    
+    During remove_one() when SRIOV is enabled, the PF driver
+    should broadcast PF driver unload notification to all
+    VFs that are attached to VMs. Upon receiving the PF
+    driver unload notification, the VF driver should print
+    a warning message to message log.  Certain operations on the
+    VF may not succeed after the PF has unloaded.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3874d6a8b61966a77aa743b4160ba96bf3081ce5
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Fri Feb 26 03:59:59 2016 -0500
+
+    bnxt_en: Improve bnxt_vf_update_mac().
+    
+    Allow the VF to setup its own MAC address if the PF has not administratively
+    set it for the VF.  To do that, we should always store the MAC address
+    from the firmware.  There are 2 cases:
+    
+    1. The MAC address is valid.  This MAC address is assigned by the PF and
+    it needs to override the current VF MAC address.
+    
+    2. The MAC address is zero.  The VF will use a random MAC address by default.
+    By storing this 0 MAC address in the VF structure, it will allow the VF
+    user to change the MAC address later using ndo_set_mac_address() when
+    it sees that the stored MAC address is 0.
+    
+    v2: Expanded descriptions and added more comments.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fbb0fa8b48892a3db8f5b89fb591c741fbd2fe7a
+Author: Michael Chan <michael.chan@broadcom.com>
+Date:   Mon Feb 22 02:10:26 2016 -0500
+
+    bnxt_en: Fix zero padding of tx push data.
+    
+    The arithmetic to zero pad the last 64-bit word in the push buffer is not
+    correct.
+    
+    1. It should be pdata + length to get to the end.
+    2. 'pdata' is void pointer and passing it to PTR_ALIGN() will cast the
+    aligned pointer to void.  Pass 'end' which is u64 pointer to PTR_ALIGN()
+    instead so that the aligned pointer - 1 is the last 64-bit pointer to data.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit ba41d46fe03223279054e58d570069fdc62fb768
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Fri Feb 19 19:43:21 2016 -0500
+
+    bnxt_en: Failure to update PHY is not fatal condition.
+    
+    If we fail to update the PHY, we should print a warning and continue.
+    The current code to exit is buggy as it has not freed up the NIC
+    resources yet.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit de73018fb5474b33dc4f6d6b8d889e40232e325b
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Fri Feb 19 19:43:20 2016 -0500
+
+    bnxt_en: Remove unnecessary call to update PHY settings.
+    
+    Fix bnxt_update_phy_setting() to check the correct parameters when
+    determining whether to update the PHY.  Requested line speed/duplex should
+    only be checked for forced speed mode.  This avoids unnecessary link
+    interruptions when loading the driver.
+    
+    Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 035a1539ab63bfdb284bdf6e8459e35897c60564
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Fri Feb 19 19:43:19 2016 -0500
+
+    bnxt_en: Poll link at the end of __bnxt_open_nic().
+    
+    When shutting down the NIC, we shutdown async event processing before
+    freeing all the rings.  If there is a link change event during reset, the
+    driver may miss it and the link state may be incorrect after the NIC is
+    re-opened.  Poll the link at the end of __bnxt_open_nic() to get the
+    correct link status.
+    
+    Signed-off-by Michael Chan <michael.chan@broadcom.com>
+    
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 51dd55b5688e81f9f13fb520a59900d4c3959a9a
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Feb 10 17:33:50 2016 -0500
+
+    bnxt_en: Reduce default ring sizes.
+    
+    The current default tx ring size of 512 causes an extra page to be
+    allocated for the tx ring with only 1 entry in it.  Reduce it to
+    511.  The default rx ring size is also reduced to 511 to use less
+    memory by default.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4419dbe6a0f031ddb2df4cd993805546a566d20e
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Feb 10 17:33:49 2016 -0500
+
+    bnxt_en: Fix implementation of tx push operation.
+    
+    tx push is supported for small packets to reduce DMA latency.  The
+    following bugs are fixed in this patch:
+    
+    1. Fix the definition of the push BD which is different from the DMA BD.
+    2. The push buffer has to be zero padded to the next 64-bit word boundary
+    or tx checksum won't be correct.
+    3. Increase the tx push packet threshold to 164 bytes (192 bytes with the BD)
+    so that small tunneled packets are within the threshold.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 1c49c421f3ec446f1e0eda6d965a6cb23214d7a1
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Feb 10 17:33:48 2016 -0500
+
+    bnxt_en: Remove 20G support and advertise only 40GbaseCR4.
+    
+    20G is not supported by production hardware and only the 40GbaseCR4 standard
+    is supported.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 0d8abf020199b0cbc5fb3aa309d36f0ac1b91631
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Feb 10 17:33:47 2016 -0500
+
+    bnxt_en: Cleanup and Fix flow control setup logic
+    
+    Cleanup bnxt_probe_phy() to cleanly separate 2 code blocks for autoneg
+    on and off.  Autoneg flow control is possible only if autoneg is enabled.
+    
+    In bnxt_get_settings(), Pause and Asym_Pause are always supported.
+    Only the advertisement bits change depending on the ethtool -A setting
+    in auto mode.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b763499ee16b74707af0fb26ab0a26bd9719870b
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Feb 10 17:33:46 2016 -0500
+
+    bnxt_en: Fix ethtool autoneg logic.
+    
+    1. Determine autoneg on|off setting from link_info->autoneg.  Using the
+    firmware returned setting can be misleading if autoneg is changed and
+    there hasn't been a phy update from the firmware.
+    
+    2. If autoneg is disabled, link_info->autoneg should be set to 0 to
+    indicate both speed and flow control autoneg are disabled.
+    
+    3. To enable autoneg flow control, speed autoneg must be enabled.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d612a579771385e08f7b665063b36bfa52c03ea3
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Thu Jan 28 03:11:22 2016 -0500
+
+    bnxt_en: Fix crash in bnxt_free_tx_skbs() during tx timeout.
+    
+    The ring index j is not wrapped properly at the end of the ring, causing
+    it to reference pointers past the end of the ring.  For proper loop
+    termination and to access the ring properly, we need to increment j and
+    mask it before referencing the ring entry.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 8a4d4c8dde7a4119bce3fd8287dca193ff6356da
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Thu Jan 28 03:11:21 2016 -0500
+
+    bnxt_en: Exclude rx_drop_pkts hw counter from the stack's rx_dropped counter.
+    
+    This hardware counter is misleading as it counts dropped packets that
+    don't match the hardware filters for unicast/broadcast/multicast.  We
+    will still report this counter in ethtool -S for diagnostics purposes.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 74608fc98d2856fa7201a498b61c9dd9455b504a
+Author: Prashant Sreedharan <prashant@broadcom.com>
+Date:   Thu Jan 28 03:11:20 2016 -0500
+
+    bnxt_en: Ring free response from close path should use completion ring
+    
+    Use completion ring for ring free response from firmware.  The response
+    will be the last entry in the ring and we can free the ring after getting
+    the response.  This will guarantee no spurious DMA to freed memory.
+    
+    Signed-off-by: Prashant Sreedharan <prashant@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 415b6f19e87e350b13585591859d4fdf50772229
+Author: Arnd Bergmann <arnd@arndb.de>
+Date:   Tue Jan 12 16:05:08 2016 +0100
+
+    net: bnxt: always return values from _bnxt_get_max_rings
+    
+    Newly added code in the bnxt driver uses a couple of variables that
+    are never initialized when CONFIG_BNXT_SRIOV is not set, and gcc
+    correctly warns about that:
+    
+    In file included from include/linux/list.h:8:0,
+                     from include/linux/module.h:9,
+                     from drivers/net/ethernet/broadcom/bnxt/bnxt.c:10:
+    drivers/net/ethernet/broadcom/bnxt/bnxt.c: In function 'bnxt_get_max_rings':
+    include/linux/kernel.h:794:26: warning: 'cp' may be used uninitialized in this function [-Wmaybe-uninitialized]
+    include/linux/kernel.h:794:26: warning: 'tx' may be used uninitialized in this function [-Wmaybe-uninitialized]
+    drivers/net/ethernet/broadcom/bnxt/bnxt.c:5730:11: warning: 'rx' may be used uninitialized in this function [-Wmaybe-uninitialized]
+    drivers/net/ethernet/broadcom/bnxt/bnxt.c:5736:6: note: 'rx' was declared here
+    
+    This changes the condition so that we fall back to using the PF
+    data if VF is not available, and always initialize the variables
+    to something useful.
+    
+    Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+    Fixes: 6e6c5a57fbe1 ("bnxt_en: Modify bnxt_get_max_rings() to support shared or non shared rings.")
+    Acked-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d2d6318cb996f39112ba24ff23abe67578a611bc
+Author: Rob Swindell <swindell@broadcom.com>
+Date:   Thu Jan 7 19:56:58 2016 -0500
+
+    bnxt_en: Reset embedded processor after applying firmware upgrade
+    
+    Use HWRM_FW_RESET command to request a self-reset of the embedded
+    processor(s) after successfully applying a firmware update. For boot
+    processor, the self-reset is currently deferred until the next PCIe reset.
+    
+    Signed-off-by: Rob Swindell <swindell@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d79979a103f7820d3107cdc04096e87b37f90008
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Thu Jan 7 19:56:57 2016 -0500
+
+    bnxt_en: Zero pad firmware messages to 128 bytes.
+    
+    For future compatibility, zero pad all messages that the driver sends
+    to the firmware to 128 bytes.  If these messages are extended in the
+    future with new byte enables, zero padding these messages now will
+    guarantee future compatibility.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 068c9ec62906b626a30526638fd36189b80b6464
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:45:04 2016 -0500
+
+    bnxt_en: Modify ethtool -l|-L to support combined or rx/tx rings.
+    
+    The driver can support either all combined or all rx/tx rings.  The
+    default is combined, but the user can now select rx/tx rings.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 01657bcd078b924e4599a83acd402ea6f85a1e45
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:45:03 2016 -0500
+
+    bnxt_en: Modify init sequence to support shared or non shared rings.
+    
+    Modify ring memory allocation and MSIX setup to support shared or
+    non shared rings and do the proper mapping.  Default is still to
+    use shared rings.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 6e6c5a57fbe1c77c2c55e266f87a83429adc3de7
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:45:02 2016 -0500
+
+    bnxt_en: Modify bnxt_get_max_rings() to support shared or non shared rings.
+    
+    Add logic to calculate how many shared or non shared rings can be
+    supported.  Default is to use shared rings.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b81a90d3028af92da61a61e2efd231a585180044
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:45:01 2016 -0500
+
+    bnxt_en: Re-structure ring indexing and mapping.
+    
+    In order to support dedicated or shared completion rings, the ring
+    indexing and mapping are re-structured as below:
+    
+    1. bp->grp_info[] array index is 1:1 with bp->bnapi[] array index and
+    completion ring index.
+    
+    2. rx rings 0 to n will be mapped to completion rings 0 to n.
+    
+    3. If tx and rx rings share completion rings, then tx rings 0 to m will
+    be mapped to completion rings 0 to m.
+    
+    4. If tx and rx rings use dedicated completion rings, then tx rings 0 to
+    m will be mapped to completion rings n + 1 to n + m.
+    
+    5. Each tx or rx ring will use the corresponding completion ring index
+    for doorbell mapping and MSIX mapping.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 3b2b7d9db74adb95aa0bd029298a741333eb847e
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:45:00 2016 -0500
+
+    bnxt_en: Check for NULL rx or tx ring.
+    
+    Each bnxt_napi structure may no longer be having both an rx ring and
+    a tx ring.  Check for a valid ring before using it.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b6ab4b01f53b5f9e17dbd4f91c95fa5049fa2101
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:44:59 2016 -0500
+
+    bnxt_en: Separate bnxt_{rx|tx}_ring_info structs from bnxt_napi struct.
+    
+    Currently, an rx and a tx ring are always paired with a completion ring.
+    We want to restructure it so that it is possible to have a dedicated
+    completion ring for tx or rx only.
+    
+    The bnxt hardware uses a completion ring for rx and tx events.  The driver
+    has to process the completion ring entries sequentially for the rx and tx
+    events.  Using a dedicated completion ring for rx only or tx only has these
+    benefits:
+    
+    1. A burst of rx packets can cause delay in processing tx events if the
+    completion ring is shared.  If tx queue is stopped by BQL, this can cause
+    delay in re-starting the tx queue.
+    
+    2. A completion ring is sized according to the rx and tx ring size rounded
+    up to the nearest power of 2.  When the completion ring is shared, it is
+    sized by adding the rx and tx ring sizes and then rounded to the next power
+    of 2, often with a lot of wasted space.
+    
+    3. Using dedicated completion ring, we can adjust the tx and rx coalescing
+    parameters independently for rx and tx.
+    
+    The first step is to separate the rx and tx ring structures from the
+    bnxt_napi struct.
+    
+    In this patch, an rx ring and a tx ring will point to the same bnxt_napi
+    struct to share the same completion ring.  No change in ring assignment
+    and mapping yet.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 9f5545905fbcc069f6fa8030b866e967ec6a5c73
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sat Jan 2 23:44:58 2016 -0500
+
+    bnxt_en: Refactor bnxt_dbg_dump_states().
+    
+    By adding 3 separate functions to dump the different ring states.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit fbc9a5237a767cada312cb07877d0992b1e34242
+Author: David Christensen <davidch@broadcom.com>
+Date:   Sun Dec 27 18:19:29 2015 -0500
+
+    bnxt_en: Add BCM57301 & BCM57402 devices.
+    
+    Added the PCI IDs for the BCM57301 and BCM57402 controllers.
+    
+    Signed-off-by: David Christensen <davidch@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c193554ecd050e63753aa0ec99c188800843bca2
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:28 2015 -0500
+
+    bnxt_en: Update to Firmware interface spec 1.0.0.
+    
+    This interface will be forward compatible with future changes.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b72d4a68c443e29cb59e15a1a9b2c2f4bf802831
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:27 2015 -0500
+
+    bnxt_en: Keep track of the ring group resource.
+    
+    Newer firmware will return the ring group resource when we call
+    hwrm_func_qcaps().  To be compatible with older firmware, use the
+    number of tx rings as the number of ring groups if the older firmware
+    returns 0.  When determining how many rx rings we can support, take
+    the ring group resource in account as well in _bnxt_get_max_rings().
+    Divide and assign the ring groups to VFs.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4a21b49b34c01137a67bf0fe185c5d0fff747e4d
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:26 2015 -0500
+
+    bnxt_en: Improve VF resource accounting.
+    
+    We need to keep track of all resources, such as rx rings, tx rings,
+    cmpl rings, rss contexts, stats contexts, vnics, after we have
+    divided them for the VFs.  Otherwise, subsequent ring changes on
+    the PF may not work correctly.
+    
+    We adjust all max resources in struct bnxt_pf_info after they have been
+    assigned to the VFs.  There is no need to keep the separate
+    max_pf_tx_rings and max_pf_rx_rings.
+    
+    When SR-IOV is disabled, we call bnxt_hwrm_func_qcaps() to restore the
+    max resources for the PF.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 92268c328a8dae4635b3deaca52a8ed329642219
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:25 2015 -0500
+
+    bnxt_en: Cleanup bnxt_hwrm_func_cfg().
+    
+    1. Use local variable pf for repeated access to this pointer.
+    
+    2.  The 2nd argument num_vfs was unnecessarily declared as pointer to int.
+    This function doesn't change num_vfs so change the argument to int.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2bcfa6f6e7cf867e4aa623f84caea4bc413d38c9
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:24 2015 -0500
+
+    bnxt_en: Check hardware resources before enabling NTUPLE.
+    
+    The hardware resources required to enable NTUPLE varies depending on
+    how many rx channels are configured.  We need to make sure we have the
+    resources before we enable NTUPLE.  Add bnxt_rfs_capable() to do the
+    checking.
+    
+    In addition, we need to do the same checking in ndo_fix_features().  As
+    the rx channels are changed using ethtool -L, we call
+    netdev_update_features() to make the necessary adjustment for NTUPLE.
+    
+    Calling netdev_update_features() in netif_running() state but before
+    calling bnxt_open_nic() would be a problem.  To make this work,
+    bnxt_set_features() has to be modified to test for BNXT_STATE_OPEN for
+    the true hardware state instead of checking netif_running().
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 27e241896f2e21c96200df711659117923dec8a2
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:23 2015 -0500
+
+    bnxt_en: Don't treat single segment rx frames as GRO frames.
+    
+    If hardware completes single segment rx frames, don't bother setting
+    up all the GRO related fields.  Pass the SKB up as a normal frame.
+    
+    Reviewed-by: vasundhara volam <vvolam@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 45019a180358c3cf290c3f3dc953c44f978d5527
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:22 2015 -0500
+
+    bnxt_en: Allocate rx_cpu_rmap only if Accelerated RFS is enabled.
+    
+    Also, no need to check for bp->rx_nr_rings as it is always >= 1.  If the
+    allocation fails, it is not a fatal error and we can still proceed.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 665e350ddbfde88c5c18142dfd7b8c64556bc964
+Author: Satish Baddipadige <sbaddipa@broadcom.com>
+Date:   Sun Dec 27 18:19:21 2015 -0500
+
+    bnxt_en: Increment checksum error counter only if NETIF_F_RXCSUM is set.
+    
+    rx_l4_csum_error is now incremented only when offload is enabled
+    
+    Signed-off-by: Satish Baddipadige <sbaddipa@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 2731d70fa9cbb62e45743171bf979784fb36778c
+Author: Rob Swindell <swindell@broadcom.com>
+Date:   Sun Dec 27 18:19:20 2015 -0500
+
+    bnxt_en: Add support for upgrading APE/NC-SI firmware via Ethtool FLASHDEV
+    
+    NC-SI firmware of type apeFW (10) is now supported.
+    
+    Signed-off-by: Rob Swindell <swindell@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit edd0c2cc2b73ff21f356d6cbd3b5bf83e692ea9d
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Dec 27 18:19:19 2015 -0500
+
+    bnxt_en: Optimize ring alloc and ring free functions.
+    
+    Remove the unnecessary "if" statement before the "for" statement:
+    
+    if (x) {
+            for (i = 0; i < x; i++)
+    ...
+    }
+    
+    Also, change the ring free function to return void as it only returns 0.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit be58a0da1672391b246880450e990fe36d7ba24d
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Sun Dec 27 18:19:18 2015 -0500
+
+    bnxt_en: support hwrm_func_drv_unrgtr command
+    
+    During remove_one, the driver should issue hwrm_func_drv_unrgtr
+    command to inform firmware that this function has been unloaded.
+    This is to let firmware keep track of driver present/absent state
+    when driver is gracefully unloaded. A keep alive timer is needed
+    later to keep track of driver state during abnormal shutdown.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 028de140ffdf481d4948de663b33dae78e1e9cc8
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Dec 9 19:35:44 2015 -0500
+
+    bnxt_en: Implement missing tx timeout reset logic.
+    
+    The reset logic calls bnxt_close_nic() and bnxt_open_nic() under rtnl_lock
+    from bnxt_sp_task.  BNXT_STATE_IN_SP_TASK must be cleared before calling
+    bnxt_close_nic() to avoid deadlock.
+    
+    v2: Fixed white space error.  Thanks Dave.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4cebdcec0933bf39c0ab42e8ce8c9d72f803fbe9
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Dec 9 19:35:43 2015 -0500
+
+    bnxt_en: Don't cancel sp_task from bnxt_close_nic().
+    
+    When implementing driver reset from tx_timeout in the next patch,
+    bnxt_close_nic() will be called from the sp_task workqueue.  Calling
+    cancel_work() on sp_task will hang the workqueue.
+    
+    Instead, set a new bit BNXT_STATE_IN_SP_TASK when bnxt_sp_task() is running.
+    bnxt_close_nic() will wait for BNXT_STATE_IN_SP_TASK to clear before
+    proceeding.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit caefe526d7b5af11d9b5977b2862eb144fa45537
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Dec 9 19:35:42 2015 -0500
+
+    bnxt_en: Change bp->state to bitmap.
+    
+    This allows multiple independent bits to be set for various states.
+    Subsequent patches to implement tx timeout reset will require this.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit de68f5de56512a2ff5d5810ef4d54c53470c3c45
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Dec 9 19:35:41 2015 -0500
+
+    bnxt_en: Fix bitmap declaration to work on 32-bit arches.
+    
+    The declaration of the bitmap vf_req_snif_bmap using fixed array of
+    unsigned long will only work on 64-bit archs.  Use DECLARE_BITMAP instead
+    which will work on all archs.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit b664f008b0d885db1d5617ed1c51d29a8c04da93
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Wed Dec 2 01:54:08 2015 -0500
+
+    bnxt_en: Setup uc_list mac filters after resetting the chip.
+    
+    Call bnxt_cfg_rx_mode() in bnxt_init_chip() to setup uc_list and
+    mc_list mac address filters.  Before the patch, uc_list is not
+    setup again after chip reset (such as ethtool ring size change)
+    and macvlans don't work any more after that.
+    
+    Modify bnxt_cfg_rx_mode() to return error codes appropriately so
+    that the init chip sequence can detect any failures.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit bdd4347b33f480187b44699cf1caac9400496d6d
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Wed Dec 2 01:54:07 2015 -0500
+
+    bnxt_en: enforce proper storing of MAC address
+    
+    For PF, the bp->pf.mac_addr always holds the permanent MAC
+    addr assigned by the HW.  For VF, the bp->vf.mac_addr always
+    holds the administrator assigned VF MAC addr. The random
+    generated VF MAC addr should never get stored to bp->vf.mac_addr.
+    This way, when the VF wants to change the MAC address, we can tell
+    if the adminstrator has already set it and disallow the VF from
+    changing it.
+    
+    v2: Fix compile error if CONFIG_BNXT_SRIOV is not set.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 1fc2cfd03bbf8f1f8b6b90f0858faba8bd6631c4
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Wed Dec 2 01:54:06 2015 -0500
+
+    bnxt_en: Fixed incorrect implementation of ndo_set_mac_address
+    
+    The existing ndo_set_mac_address only copies the new MAC addr
+    and didn't set the new MAC addr to the HW. The correct way is
+    to delete the existing default MAC filter from HW and add
+    the new one. Because of RFS filters are also dependent on the
+    default mac filter l2 context, the driver must go thru
+    close_nic() to delete the default MAC and RFS filters, then
+    open_nic() to set the default MAC address to HW.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 93d05d4a320cb16712bb3d57a9658f395d8cecb9
+Author: Eric Dumazet <edumazet@google.com>
+Date:   Wed Nov 18 06:31:03 2015 -0800
+
+    net: provide generic busy polling to all NAPI drivers
+    
+    NAPI drivers no longer need to observe a particular protocol
+    to benefit from busy polling (CONFIG_NET_RX_BUSY_POLL=y)
+    
+    napi_hash_add() and napi_hash_del() are automatically called
+    from core networking stack, respectively from
+    netif_napi_add() and netif_napi_del()
+    
+    This patch depends on free_netdev() and netif_napi_del() being
+    called from process context, which seems to be the norm.
+    
+    Drivers might still prefer to call napi_hash_del() on their
+    own, since they might combine all the rcu grace periods into
+    a single one, knowing their NAPI structures lifetime, while
+    core networking stack has no idea of a possible combining.
+    
+    Once this patch proves to not bring serious regressions,
+    we will cleanup drivers to either remove napi_hash_del()
+    or provide appropriate rcu grace periods combining.
+    
+    Signed-off-by: Eric Dumazet <edumazet@google.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 4bb6cdce386d620d10d2588ea5bf4093a3b21ab9
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Thu Nov 5 16:25:51 2015 -0500
+
+    bnxt_en: More robust SRIOV cleanup sequence.
+    
+    Instead of always calling pci_sriov_disable() in remove_one(),
+    the driver should detect whether VFs are currently assigned
+    to the VMs. If the VFs are active in VMs, then it should not
+    disable SRIOV as it is catastrophic to the VMs. Instead,
+    it just leaves the VFs alone and continues to unload the PF.
+    The user can then cleanup the VMs even after the PF driver
+    has been unloaded.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 84e86b98f6515aaeaac053b234be158b25457184
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Thu Nov 5 16:25:50 2015 -0500
+
+    bnxt_en: Fix comparison of u16 sw_id against negative value.
+    
+    Assign the return value from bitmap_find_free_region() to an integer
+    variable and check for negative error codes first, before assigning
+    the bit ID to the unsigned sw_id field.
+    
+    Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+    Cc: Dan Carpenter <dan.carpenter@oracle.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 11809490ac17810cff90c12e9f2f3e0303a72121
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Thu Nov 5 16:25:49 2015 -0500
+
+    bnxt_en: map CAG_REG_LEGACY_INT_STATUS_MASK to GRC window #4
+    
+    In order to use offset 0x4014 for reading CAG interrupt status,
+    the actual CAG register must be mapped to GRC bar0 window #4.
+    Otherwise, the driver is reading garbage. This patch corrects
+    this issue.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 614388ce39f3d61ad7f95db65f409d35d5943616
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Thu Nov 5 16:25:48 2015 -0500
+
+    bnxt_en: Determine tcp/ipv6 RSS hash type correctly.
+    
+    The profile ID in the completion record needs to be ANDed with the
+    profile ID mask of 0x1f.  This bug was causing the SKB hash type
+    and the gso_type to be wrong in some cases.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c5d7774db350e77f2506e36e1797c958d1b118c8
+Author: Jeffrey Huang <huangjw@broadcom.com>
+Date:   Thu Nov 5 16:25:47 2015 -0500
+
+    bnxt_en: Change sp events definitions to represent bit position.
+    
+    Fix the sp event bits to be bit positions instead of bit values since
+    the bit helper functions are expecting the former.
+    
+    Signed-off-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit d1611c3aba11ffa281bdd027aace52f5a370b8c5
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Sun Oct 25 22:27:57 2015 -0400
+
+    bnxt_en: Fix compile warnings when CONFIG_INET is not set.
+    
+    bnxt_gro_skb() has unused variables when CONFIG_INET is not set.  We
+    really cannot support hardware GRO if CONFIG_INET is not set, so
+    compile out bnxt_gro_skb() completely and define BNXT_FLAG_GRO to be 0
+    if CONFIG_INET is not set.  This will effectively always disable
+    hardware GRO if CONFIG_INET is not set.
+    
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit 379a80a1d048dcacfc2011d5d32e16d5c804b9f4
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Fri Oct 23 15:06:19 2015 -0400
+
+    bnxt_en: Fix compile errors when CONFIG_BNXT_SRIOV is not set.
+    
+    struct bnxt_pf_info needs to be always defined.  Move bnxt_update_vf_mac()
+    to bnxt_sriov.c and add some missing #ifdef CONFIG_BNXT_SRIOV.
+    
+    Reported-by: Jim Hull <jim.hull@hpe.com>
+    Tested-by: Jim Hull <jim.hull@hpe.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
+
+commit c0c050c58d840994ba842ad1c338a98e7c12b764
+Author: Michael Chan <mchan@broadcom.com>
+Date:   Thu Oct 22 16:01:17 2015 -0400
+
+    bnxt_en: New Broadcom ethernet driver.
+    
+    Broadcom ethernet driver for the new family of NetXtreme-C/E
+    ethernet devices.
+    
+    v5:
+      - Removed empty blank lines at end of files (noted by David Miller).
+      - Moved busy poll helper functions to bnxt.h to at least make the
+        .c file look less cluttered with #ifdef (noted by Stephen Hemminger).
+    
+    v4:
+      - Broke up 2 long message strings with "\n" (suggested by John Linville)
+      - Constify an array of strings (suggested by Stephen Hemminger)
+      - Improve bnxt_vf_pciid() (suggested by Stephen Hemminger)
+      - Use PCI_VDEVICE() to populate pci_device_id table for more compact
+        source.
+    
+    v3:
+      - Fixed 2 more sparse warnings.
+      - Removed some unused structures in .h files.
+    
+    v2:
+      - Fixed all kbuild test robot reported warnings.
+      - Fixed many of the checkpatch.pl errors and warnings.
+      - Fixed the Kconfig description (noted by Dmitry Kravkov).
+    
+    Acked-by: Eddie Wai <eddie.wai@broadcom.com>
+    Acked-by: Jeffrey Huang <huangjw@broadcom.com>
+    Signed-off-by: Prashant Sreedharan <prashant@broadcom.com>
+    Signed-off-by: Michael Chan <mchan@broadcom.com>
+    Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/ubuntu/bnxt/Makefile b/ubuntu/bnxt/Makefile
new file mode 100644 (file)
index 0000000..3531e72
--- /dev/null
@@ -0,0 +1,497 @@
+#!/usr/bin/make
+# Makefile for building Linux Broadcom Gigabit ethernet driver as a module.
+# $id$
+KVER=
+ifeq ($(KVER),)
+  KVER=$(shell uname -r)
+endif
+
+__ARCH=$(shell uname -m)
+
+# PREFIX may be set by the RPM build to set the effective root.
+PREFIX=
+ifeq ($(shell ls /lib/modules/$(KVER)/build > /dev/null 2>&1 && echo build),)
+# SuSE source RPMs
+  _KVER=$(shell echo $(KVER) | cut -d "-" -f1,2)
+  _KFLA=$(shell echo $(KVER) | cut -d "-" -f3)
+  _ARCH=$(shell file -b /lib/modules/$(shell uname -r)/build | cut -d "/" -f5)
+  ifeq ($(_ARCH),)
+    _ARCH=$(__ARCH)
+  endif
+  ifeq ($(shell ls /usr/src/linux-$(_KVER)-obj > /dev/null 2>&1 && echo linux),)
+    ifeq ($(shell ls /usr/src/kernels/$(KVER)-$(__ARCH) > /dev/null 2>&1 && echo linux),)
+      LINUX=
+    else
+      LINUX=/usr/src/kernels/$(KVER)-$(__ARCH)
+      LINUXSRC=$(LINUX)
+    endif
+  else
+    LINUX=/usr/src/linux-$(_KVER)-obj/$(_ARCH)/$(_KFLA)
+    LINUXSRC=/usr/src/linux-$(_KVER)
+  endif
+else
+  LINUX=/lib/modules/$(KVER)/build
+  ifeq ($(shell ls /lib/modules/$(KVER)/source > /dev/null 2>&1 && echo source),)
+    LINUXSRC=$(LINUX)
+  else
+    LINUXSRC=/lib/modules/$(KVER)/source
+  endif
+endif
+
+ifeq ($(shell ls $(LINUXSRC)/include/uapi/linux > /dev/null 2>&1 && echo uapi),)
+  UAPI=
+else
+  UAPI=uapi
+endif
+
+ifeq ($(BCMMODDIR),)
+  ifeq ($(shell ls /lib/modules/$(KVER)/updates > /dev/null 2>&1 && echo 1),1)
+    BCMMODDIR=/lib/modules/$(KVER)/updates
+  else
+    ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.conf > /dev/null 2>&1 && echo 1),1)
+      BCMMODDIR=/lib/modules/$(KVER)/updates
+    else
+      ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.d/* > /dev/null 2>&1 && echo 1),1)
+        BCMMODDIR=/lib/modules/$(KVER)/updates
+      else
+        BCMMODDIR=/lib/modules/$(KVER)/kernel/drivers/net
+      endif
+    endif
+  endif
+endif
+
+ifneq ($(shell grep -o "pci_enable_msix_range" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG = -DHAVE_MSIX_RANGE
+else
+  DISTRO_CFLAG =
+endif
+
+ifneq ($(shell grep -o "msix_cap" $(LINUXSRC)/include/linux/pci.h),)
+  ifeq ($(shell grep -o "pci_dev_rh1" $(LINUXSRC)/include/linux/pci.h),)
+    DISTRO_CFLAG += -DHAVE_MSIX_CAP
+  endif
+endif
+
+ifneq ($(shell grep -o "module_pci_driver" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_MODULE_PCI_DRIVER
+endif
+
+ifneq ($(shell grep "hlist_for_each_entry_safe" $(LINUXSRC)/include/linux/list.h | grep "tpos" > /dev/null 2>&1 && echo tpos),)
+  DISTRO_CFLAG += -DHAVE_OLD_HLIST
+endif
+
+ifneq ($(shell grep -o "csum_level" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_CSUM_LEVEL
+endif
+
+ifneq ($(shell grep -o "build_skb" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_BUILD_SKB
+  ifneq ($(shell grep "build_skb" $(LINUXSRC)/include/linux/skbuff.h | grep "int frag_size" > /dev/null 2>&1 && echo frag_size),)
+    DISTRO_CFLAG += -DHAVE_NEW_BUILD_SKB
+  endif
+endif
+
+ifneq ($(shell grep -o "inner_network_offset" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_INNER_NETWORK_OFFSET
+endif
+
+ifeq ($(shell grep -o "skb_frag_size" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DNO_SKB_FRAG_SIZE
+endif
+
+ifneq ($(shell grep -so "n_proto" $(LINUXSRC)/include/net/flow_keys.h),)
+  DISTRO_CFLAG += -DHAVE_N_PROTO
+endif
+
+ifneq ($(shell grep -so "flow_keys" $(LINUXSRC)/include/net/flow_keys.h),)
+  DISTRO_CFLAG += -DHAVE_FLOW_KEYS
+endif
+
+ifneq ($(shell grep -o "skb_get_hash_raw" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_GET_HASH_RAW
+endif
+
+ifneq ($(shell grep -o "PKT_HASH_TYPE" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_SKB_HASH_TYPE
+endif
+
+ifneq ($(shell grep -o "SKB_GSO_UDP_TUNNEL_CSUM" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_SKB_GSO_UDP_TUNNEL_CSUM
+else
+ifneq ($(shell grep -o "SKB_GSO_UDP_TUNNEL" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_SKB_GSO_UDP_TUNNEL
+endif
+endif
+
+ifneq ($(shell grep -o "skb_frag_page" $(LINUXSRC)/include/linux/skbuff.h),)
+  DISTRO_CFLAG += -DHAVE_SKB_FRAG_PAGE
+endif
+
+ifneq ($(shell grep "skb_checksum_none_assert" $(LINUXSRC)/include/linux/skbuff.h > /dev/null 2>&1 && echo skb_cs_none_assert),)
+  DISTRO_CFLAG += -DHAVE_SKB_CHECKSUM_NONE_ASSERT
+endif
+
+ifneq ($(shell grep -o "min_tx_rate" $(LINUXSRC)/include/$(UAPI)/linux/if_link.h),)
+  DISTRO_CFLAG += -DHAVE_IFLA_TX_RATE
+endif
+
+ifneq ($(shell grep -o "IFLA_XDP_PROG_ID" $(LINUXSRC)/include/$(UAPI)/linux/if_link.h),)
+  DISTRO_CFLAG += -DHAVE_IFLA_XDP_PROG_ID
+endif
+
+ifneq ($(shell grep -o "dma_set_mask_and_coherent" $(LINUXSRC)/include/linux/dma-mapping.h),)
+  DISTRO_CFLAG += -DHAVE_SET_MASK_AND_COHERENT
+endif
+
+ifneq ($(shell grep -o "dma_set_coherent_mask" $(LINUXSRC)/include/linux/dma-mapping.h),)
+  DISTRO_CFLAG += -DHAVE_SET_COHERENT_MASK
+endif
+
+ifneq ($(shell grep -o "ndo_add_vxlan_port" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NDO_ADD_VXLAN
+endif
+
+ifneq ($(shell grep -o "struct dev_addr_list" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_DEV_ADDR_LIST
+endif
+
+ifneq ($(shell grep "netif_set_real_num_tx" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo real_tx),)
+  DISTRO_CFLAG += -DHAVE_NETIF_SET_REAL_NUM_TX
+else
+  DISTRO_CFLAG += -DVOID_NETIF_SET_NUM_TX
+endif
+
+ifneq ($(shell grep "netif_set_real_num_tx" $(LINUXSRC)/include/linux/netdevice.h | grep void > /dev/null 2>&1 && echo netif_set_real),)
+  DISTRO_CFLAG += -DVOID_NETIF_SET_NUM_TX
+endif
+
+ifneq ($(shell grep -o "netdev_tx_sent_queue" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NETDEV_TX_QUEUE_CTRL
+endif
+
+ifneq ($(shell ls $(LINUXSRC)/include/net/flow_dissector.h > /dev/null 2>&1 && echo flow),)
+  DISTRO_CFLAG += -DNEW_FLOW_KEYS -DHAVE_FLOW_KEYS
+  ifneq ($(shell grep -so "static inline bool skb_flow_dissect_flow_keys" $(LINUXSRC)/include/linux/skbuff.h),)
+    ifneq ($(shell grep -A 2 "static inline bool skb_flow_dissect_flow_keys" $(LINUXSRC)/include/linux/skbuff.h | grep -o "unsigned int flags"),)
+      DISTRO_CFLAG += -DHAVE_NEW_FLOW_DISSECTOR_WITH_FLAGS
+    endif
+    ifneq ($(shell grep -o "FLOW_DIS_ENCAPSULATION" $(LINUXSRC)/include/net/flow_dissector.h),)
+      DISTRO_CFLAG += -DHAVE_NEW_FLOW_DISSECTOR
+    endif
+  endif
+endif
+
+ifneq ($(shell ls $(LINUXSRC)/include/net/udp_tunnel.h > /dev/null 2>&1 && echo udp_tunnel),)
+  DISTRO_CFLAG += -DHAVE_UDP_TUNNEL_H
+endif
+
+ifneq ($(shell grep -o "ether_addr_equal" $(LINUXSRC)/include/linux/etherdevice.h),)
+  DISTRO_CFLAG += -DHAVE_ETHER_ADDR_EQUAL
+endif
+
+ifneq ($(shell grep -o "ether_addr_copy" $(LINUXSRC)/include/linux/etherdevice.h),)
+  DISTRO_CFLAG += -DHAVE_ETHER_ADDR_COPY
+endif
+
+ifneq ($(shell grep -o "eth_broadcast_addr" $(LINUXSRC)/include/linux/etherdevice.h),)
+  DISTRO_CFLAG += -DHAVE_ETH_BROADCAST_ADDR
+endif
+
+ifneq ($(shell grep -o "eth_get_headlen" $(LINUXSRC)/include/linux/etherdevice.h),)
+  DISTRO_CFLAG += -DHAVE_ETH_GET_HEADLEN
+endif
+
+ifneq ($(shell grep -o "eth_hw_addr_random" $(LINUXSRC)/include/linux/etherdevice.h),)
+  DISTRO_CFLAG += -DHAVE_ETH_HW_ADDR_RANDOM
+endif
+
+ifneq ($(shell grep -o "get_rxnfc" $(LINUXSRC)/include/linux/ethtool.h),)
+  DISTRO_CFLAG += -DHAVE_RXNFC
+  ifneq ($(shell grep -A 2 "get_rxnfc" $(LINUXSRC)/include/linux/ethtool.h | grep -o "void"),)
+    DISTRO_CFLAG += -DHAVE_RXNFC_VOID
+  endif
+endif
+
+ifneq ($(shell grep -o "get_rxfh_key_size" $(LINUXSRC)/include/linux/ethtool.h),)
+  ifneq ($(shell grep -o "ETH_RSS_HASH_TOP" $(LINUXSRC)/include/linux/ethtool.h),)
+    DISTRO_CFLAG += -DHAVE_GET_RXFH_KEY_SIZE
+  endif
+endif
+
+ifneq ($(shell grep -o "get_rxfh_indir_size" $(LINUXSRC)/include/linux/ethtool.h),)
+  DISTRO_CFLAG += -DHAVE_RXFH_INDIR_SIZE
+endif
+
+ifneq ($(shell grep -o "set_phys_id" $(LINUXSRC)/include/linux/ethtool.h),)
+  DISTRO_CFLAG += -DHAVE_SET_PHYS_ID
+endif
+
+ifneq ($(shell grep -o "ETHTOOL_LINK_MODE_25000baseCR_Full_BIT" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),)
+  DISTRO_CFLAG += -DHAVE_ETHTOOL_GLINKSETTINGS_25G
+endif
+
+ifneq ($(shell grep -o "ethtool_tcpip6_spec" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),)
+  DISTRO_CFLAG += -DHAVE_ETHTOOL_IP6_SPEC
+endif
+
+ifeq ($(shell grep -o "rx_cpu_rmap" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DNO_NETDEV_CPU_RMAP
+else
+  ifneq ($(shell grep -o "struct net_device_extended" $(LINUXSRC)/include/linux/netdevice.h || \
+                grep -o "irq_run_affinity_notifiers" $(LINUXSRC)/include/linux/interrupt.h),)
+    DISTRO_CFLAG += -DNO_NETDEV_CPU_RMAP
+  endif
+endif
+
+ifneq ($(shell grep -o "hw_features" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "get_netdev_hw_features" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_HW_FEATURES
+  endif
+endif
+
+ifneq ($(shell grep -o "hw_enc_features" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DNETDEV_HW_ENC_FEATURES
+endif
+
+ifneq ($(shell grep -o "sriov_configure" $(LINUXSRC)/include/linux/pci.h),)
+  ifeq ($(shell grep -o "rh_reserved" $(LINUXSRC)/include/linux/pci.h),)
+    DISTRO_CFLAG += -DPCIE_SRIOV_CONFIGURE
+  endif
+endif
+
+ifneq ($(shell grep -o "pci_vfs_assigned" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_PCI_VFS_ASSIGNED
+endif
+
+ifneq ($(shell grep -o "pci_num_vf" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_PCI_NUM_VF
+endif
+
+ifneq ($(shell grep -o "ndo_fix_features" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_FEATURE_CONTROL
+  endif
+  ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_FEATURE_CONTROL
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_rx_flow_steer" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "netdev_rfs_info" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_RX_FLOW_STEER
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_busy_poll" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "net_device_extended" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_BUSY_POLL
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_GET_STATS64
+  endif
+  ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNETDEV_GET_STATS64
+  endif
+  ifneq ($(shell grep "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h | grep -o "void"),)
+    DISTRO_CFLAG += -DNETDEV_GET_STATS64_VOID
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_get_vf_config" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NDO_GET_VF_CONFIG
+endif
+
+ifneq ($(shell grep -A 2 "ndo_bridge_getlink" $(LINUXSRC)/include/linux/netdevice.h | grep -o "nlflags"),)
+  ifneq ($(shell grep -A 3 "ndo_dflt_bridge_getlink" $(LINUXSRC)/include/linux/rtnetlink.h | grep -o "filter_mask"),)
+    DISTRO_CFLAG += -DHAVE_NDO_BRIDGE_GETLINK
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_set_vf_link_state" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DHAVE_NDO_SET_VF_LINK_STATE
+  endif
+  ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DHAVE_NDO_SET_VF_LINK_STATE
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_set_vf_spoofchk" $(LINUXSRC)/include/linux/netdevice.h),)
+  ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DHAVE_VF_SPOOFCHK
+  endif
+  ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DHAVE_VF_SPOOFCHK
+  endif
+endif
+
+ifneq ($(shell grep -A 1 "ndo_set_vf_vlan" $(LINUXSRC)/include/linux/netdevice.h | grep -o "proto"),)
+  ifeq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DNEW_NDO_SET_VF_VLAN
+  endif
+endif
+
+ifneq ($(shell grep -o "ndo_set_vf_vlan_rh73" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NDO_SET_VF_VLAN_RH73
+endif
+
+ifneq ($(shell grep -o "ndo_setup_tc" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_SETUP_TC
+  ifneq ($(shell grep -o "struct tc_to_netdev" $(LINUXSRC)/include/linux/netdevice.h),)
+    DISTRO_CFLAG += -DHAVE_TC_TO_NETDEV
+    ifneq ($(shell grep -o "struct tc_mqprio_qopt" $(LINUXSRC)/include/linux/netdevice.h),)
+      DISTRO_CFLAG += -DHAVE_MQPRIO_QOPT
+    endif
+    ifneq ($(shell grep -A 1 "ndo_setup_tc" $(LINUXSRC)/include/linux/netdevice.h | grep -o "u32 chain_index"),)
+      DISTRO_CFLAG += -DHAVE_CHAIN_INDEX
+    endif
+  endif
+endif
+
+ifneq ($(shell grep -o "netdev_get_num_tc" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_GET_NUM_TC
+endif
+
+ifneq ($(shell grep -so "netdev_features_t" $(LINUXSRC)/include/linux/netdev_features.h ||     \
+        grep -o "netdev_features_t" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NETDEV_FEATURES_T
+endif
+
+ifneq ($(shell grep -o "ndo_fix_features" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NDO_FIX_FEATURES
+endif
+
+ifneq ($(shell grep -o "netif_set_real_num_rx" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NETIF_SET_REAL_NUM_RX
+endif
+
+ifneq ($(shell grep -o "netif_get_num_default_rss_queues" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NETIF_GET_DEFAULT_RSS
+endif
+
+ifneq ($(shell grep -o "ndo_vlan_rx_register" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_VLAN_RX_REGISTER
+endif
+
+ifneq ($(shell grep -o "ndo_xdp" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NDO_XDP
+  ifneq ($(shell ls $(LINUXSRC)/include/net/bpf_trace.h > /dev/null 2>&1 && echo bpf_trace),)
+    DISTRO_CFLAG += -DHAVE_BPF_TRACE
+  endif
+endif
+
+ifneq ($(shell grep -o "netdev_name" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NETDEV_NAME
+endif
+
+ifneq ($(shell grep -o "netdev_update_features" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NETDEV_UPDATE_FEATURES
+endif
+
+ifneq ($(shell grep -o "dev_port" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_DEV_PORT
+endif
+
+ifneq ($(shell grep -o "napi_hash_add" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NAPI_HASH_ADD
+endif
+
+ifneq ($(shell grep -o "napi_hash_del" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_NAPI_HASH_DEL
+endif
+
+ifneq ($(shell grep "napi_complete_done" $(LINUXSRC)/include/linux/netdevice.h | grep -o "bool"),)
+  DISTRO_CFLAG += -DHAVE_NEW_NAPI_COMPLETE_DONE
+endif
+
+ifneq ($(shell grep -o "min_mtu" $(LINUXSRC)/include/linux/netdevice.h),)
+  DISTRO_CFLAG += -DHAVE_MIN_MTU
+endif
+
+ifneq ($(shell grep -o "prandom_bytes" $(LINUXSRC)/include/linux/random.h),)
+  DISTRO_CFLAG += -DHAVE_PRANDOM_BYTES
+endif
+
+ifneq ($(shell grep -o "tcp_v6_check" $(LINUXSRC)/include/net/ip6_checksum.h),)
+  DISTRO_CFLAG += -DHAVE_TCP_V6_CHECK
+endif
+
+ifneq ($(shell grep -o "usleep_range" $(LINUXSRC)/include/linux/delay.h),)
+  DISTRO_CFLAG += -DHAVE_USLEEP_RANGE
+endif
+
+ifneq ($(shell grep -o "vzalloc" $(LINUXSRC)/include/linux/vmalloc.h),)
+  DISTRO_CFLAG += -DHAVE_VZALLOC
+endif
+
+ifneq ($(shell grep -o "pcie_get_minimum_link" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_PCIE_GET_MINIMUM_LINK
+endif
+
+ifneq ($(shell grep -o "pcie_capability_read_word" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_PCIE_CAPABILITY_READ_WORD
+endif
+
+ifneq ($(shell grep -o "PCIE_SPEED_2_5GT" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_PCIE_BUS_SPEED
+endif
+
+ifneq ($(shell grep -o "pci_is_bridge" $(LINUXSRC)/include/linux/pci.h),)
+  DISTRO_CFLAG += -DHAVE_PCI_IS_BRIDGE
+endif
+
+ifneq ($(shell ls $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h > /dev/null 2>&1 && echo net_tstamp),)
+  ifneq ($(shell ls $(LINUXSRC)/include/linux/timecounter.h > /dev/null 2>&1 && echo timecounter),)
+    ifneq ($(shell ls $(LINUXSRC)/include/linux/timekeeping.h > /dev/null 2>&1 && echo timekeeping),)
+      ifneq ($(shell grep -o "HWTSTAMP_FILTER_PTP_V2_EVENT" $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h),)
+        DISTRO_CFLAG += -DHAVE_IEEE1588_SUPPORT
+      endif
+    endif
+  endif
+endif
+
+EXTRA_CFLAGS += ${DISTRO_CFLAG} -g -DCHIMP_FW -D__LINUX -DCONFIG_BNXT_SRIOV -DCONFIG_BNXT_DCB -DCONFIG_BNXT_FLASHDEV -DHSI_DBG_DISABLE -DCONFIG_BNXT_RE
+
+cflags-y += $(EXTRA_CFLAGS)
+
+BCM_DRV = bnxt_en.ko
+ifneq ($(KERNELRELEASE),)
+
+obj-m += bnxt_en.o
+bnxt_en-y := bnxt.o bnxt_ethtool.o bnxt_sriov.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o #decode_hsi.o
+
+else
+
+default:
+ifeq ($(CROSS_COMPILE),)
+       make -C $(LINUX) SUBDIRS=$(shell pwd) modules
+else ifneq ($(CROSS_COMPILE),)
+       make -C $(LINUXSRC) SUBDIRS=$(shell pwd) modules CROSS_COMPILE=$(CROSS_COMPILE) ARCH=$(ARCH)
+endif
+
+yocto_all:
+       $(MAKE) -C $(LINUXSRC) M=$(shell pwd)
+
+modules_install:
+       $(MAKE) -C $(LINUXSRC) M=$(shell pwd) modules_install
+
+endif
+
+install: default
+       mkdir -p $(PREFIX)/$(BCMMODDIR);
+       install -m 444 $(BCM_DRV) $(PREFIX)/$(BCMMODDIR);
+       @if [ "$(PREFIX)" = "" ]; then /sbin/depmod -a ;\
+       else echo " *** Run '/sbin/depmod -a' to update the module database.";\
+       fi
+
+.PHONEY: all clean install
+
+clean:
+       -rm -f bnxt.o bnxt.mod.c bnxt.mod.o .bnxt.*.cmd *.cmd *.markers *.order *.symvers decode_hsi.o .decode_*
+       -rm -rf .tmp_versions
+       -rm -rf bnxt_en.o bnxt_en.ko bnxt_en.mod.o bnxt_en.mod.c .bnxt_en.* bnxt_sriov.o .bnxt_sriov.* bnxt_ethtool.o .bnxt_ethtool.* bnxt_dcb.o .bnxt_dcb.* bnxt_ulp.o .bnxt_ulp.* bnxt_xdp.o .bnxt_xdp.*
+       -rm -f Module.markers Module.symvers modules.order
+
diff --git a/ubuntu/bnxt/README.TXT b/ubuntu/bnxt/README.TXT
new file mode 100644 (file)
index 0000000..52ec742
--- /dev/null
@@ -0,0 +1,948 @@
+                               README Notes
+                        Broadcom bnxt_en Linux Driver
+                              Version 1.8.1
+                                07/11/2017
+
+                            Broadcom Limited
+                         5300 California Avenue,
+                            Irvine, CA 92617
+
+                 Copyright (c) 2015 - 2016 Broadcom Corporation
+                   Copyright (c) 2016 - 2017 Broadcom Limited
+                           All rights reserved
+
+
+Table of Contents
+=================
+
+  Introduction
+  Limitations
+  Port Speeds
+  BNXT_EN Driver Dependencies
+  BNXT_EN Driver Settings
+  Autoneg
+  Energy Efficient Ethernet
+  Enabling Receive Side Scaling (RSS)
+  Enabling Accelerated Receive Flow Steering (RFS)
+  Enabling Busy Poll Sockets
+  Enabling SR-IOV
+  Virtual Ethernet Bridge (VEB)
+  Hardware QoS
+  PTP Hardware Clock
+  BNXT_EN Driver Parameters
+  BNXT_EN Driver Defaults
+  Statistics
+  Unloading and Removing Driver
+  Updating Firmware for Broadcom NetXtreme-C and NetXtreme-E devices
+  Updating Firmware for Broadcom Nitro device
+
+
+Introduction
+============
+
+This file describes the bnxt_en Linux driver for the Broadcom NetXtreme-C
+and NetXtreme-E BCM573xx and BCM574xx 10/25/40/50 Gbps Ethernet Network
+Controllers and Broadcom Nitro BCM58700 4-port 1/2.5/10 Gbps Ethernet Network
+Controller.
+
+
+Limitations
+===========
+
+1. The current version of the driver will compile on RHEL7.x, RHEL6.x,
+OLE6.x UEK, SLES12, SLES11SP1 and newer, most 3.x/4.x kernels, and some
+2.6 kernels starting from 2.6.32.
+
+2. Laser needs to be brought up for Nitro BCM58700 Ethernet controller
+using the following command, to bring up the Link.
+
+    i2cset -f -y 1 0x70 0 7 && i2cset -f -y 1 0x24 0xff 0x0
+
+3. Each device supports hundreds of MSIX vectors.  The driver will enable
+all MSIX vectors when it loads.  On some systems running on some kernels,
+the system may run out of interrupt descriptors.
+
+
+Port Speeds
+===========
+
+On some dual-port devices, the port speed of each port must be compatible
+with the port speed of the other port.  10Gbps and 25Gbps are not compatible
+speeds.  For example, if one port is set to 10Gbps and link is up, the other
+port cannot be set to 25Gbps.  However, the driver will allow incompatible
+speeds to be set on the two ports if link is not up yet.  Subsequent link up
+on one port will render the incompatible speed on the other port to become
+unsupported.  A console message like this may appear when this scenario
+happens:
+
+   bnxt_en 0000:04:00.0 eth0: Link speed 25000 no longer supported
+
+If the link is up on one port, the driver will not allow the other port to
+be set to an incompatible speed.  An attempt to do that will result in an
+error.  For example, eth0 and eth1 are the 2 ports of the dual-port device,
+eth0 is set to 10Gbps and link is up.
+
+   ethtool -s eth1 speed 25000
+   Cannot set new settings: Invalid argument
+     not setting speed
+
+This operation will only be allowed when the link goes down on eth0 or if
+eth0 is brought down using ifconfig/ip.
+
+On some NPAR (NIC partioning) devices where one port is shared by multiple
+PCI functions, the port speed is pre-configured and cannot be changed by
+the driver.
+
+See Autoneg section below for additional information.
+
+
+BNXT_EN Driver Dependencies
+===========================
+
+The driver has no dependencies on user-space firmware packages as all necessary
+firmware must be programmed in NVRAM(or QSPI for Nitro BCM58700 devices).
+Starting with driver version 1.0.0, the goal is that the driver will be
+compatible with all future versions of production firmware. All future versions
+of the driver will be backwards compatible with firmware as far back as the
+first production firmware.
+
+The first production firmware is version 20.1.11 using Hardware Resource
+Manager (HWRM) spec. 1.0.0.
+
+ethtool -i displays the firmware versions.  For example:
+
+   ethtool -i eth0
+
+will show among other things:
+
+   firmware-version: 20.1.11/1.0.0 pkg 20.02.00.03
+
+In this example, the first version number (20.1.11) is the firmware version,
+the second version number (1.0.0) is the HWRM spec. version.  The third
+version number (20.02.00.03) is the package version of all the different
+firmware components in NVRAM.  The package version may not be available on
+all devices.
+
+Using kernels older than 4.7, if CONFIG_VLAN_MODULE kernel option is set as a
+module option, the vxlan.ko module must be loaded before the bnxt_en.ko module.
+
+
+BNXT_EN Driver Settings
+=======================
+
+The bnxt_en driver settings can be queried and changed using ethtool. The
+latest ethtool can be downloaded from
+ftp://ftp.kernel.org/pub/software/network/ethtool if it is not already
+installed. The following are some common examples on how to use ethtool. See
+the ethtool man page for more information. ethtool settings do not persist
+across reboot or module reload. The ethtool commands can be put in a startup
+script such as /etc/rc.local to preserve the settings across a reboot. On
+Red Hat distributions, "ethtool -s" parameters can be specified in the
+ifcfg-ethx scripts using the ETHTOOL_OPTS keyword.
+
+Some ethtool examples:
+
+1. Show current speed, duplex, and link status:
+
+   ethtool eth0
+
+2. Set speed:
+
+Example: Set speed to 10Gbps with autoneg off:
+
+   ethtool -s eth0 speed 10000 autoneg off
+
+Example: Set speed to 25Gbps with autoneg off:
+
+   ethtool -s eth0 speed 25000 autoneg off
+
+On some NPAR (NIC partitioning) devices, the port speed and flow control
+settings cannot be changed by the driver.
+
+See Autoneg section below for additional information on configuring
+Autonegotiation.
+
+3. Show offload settings:
+
+   ethtool -k eth0
+
+4. Change offload settings:
+
+Example: Turn off TSO (TCP Segmentation Offload)
+
+   ethtool -K eth0 tso off
+
+Example: Turn off hardware GRO and LRO
+
+   ethtool -K eth0 gro off lro off
+
+Example: Turn on hardware GRO only
+
+   ethtool -K eth0 gro on lro off
+
+Note that if both gro and lro are set, the driver will use hardware GRO.
+
+5. Show ring sizes:
+
+   ethtool -g eth0
+
+6. Change ring sizes:
+
+   ethtool -G eth0 rx N
+
+Note that the RX Jumbo ring size is set automatically when needed and
+cannot be changed by the user.
+
+7. Get statistics:
+
+   ethtool -S eth0
+
+8. Show number of channels (rings):
+
+   ethtool -l eth0
+
+9. Set number of channels (rings):
+
+   ethtool -L eth0 rx N tx N combined 0
+
+   ethtool -L eth0 rx 0 tx 0 combined M
+
+Note that the driver can support either all combined or all rx/tx channels,
+but not a combination of combined and rx/tx channels.  The default is
+combined channels to match the number of CPUs up to 8.  Combined channels
+use less system resources but may have lower performance than rx/tx channels
+under very high traffic stress.  rx and tx channels can have different numbers
+for rx and tx but must both be non-zero.
+
+10. Show interrupt coalescing settings:
+
+    ethtool -c eth0
+
+11. Set interrupt coalescing settings:
+
+    ethtool -C eth0 rx-frames N
+
+    Note that only these parameters are supported:
+    rx-usecs, rx-frames, rx-usecs-irq, rx-frames-irq,
+    tx-usecs, tx-frames, tx-usecs-irq, tx-frames-irq,
+    stats-block-usecs.
+
+12. Show RSS flow hash indirection table and RSS hash key:
+
+    ethtool -x eth0
+
+13. Run self test:
+
+    ethtool -t eth0
+
+    Note that only single function PFs can execute self tests.  If a PF has
+    active VFs, only online tests can be executed.
+
+14. See ethtool man page for more options.
+
+
+Autoneg
+=======
+
+The bnxt_en driver supports Autonegotiation of speed and flow control on
+most devices.  Some dual-port 25G devices do not support Autoneg.  Autoneg
+must be enabled for 10GBase-T devices.
+
+Note that parallel detection is not supported when autonegotiating
+50GBase-CR2, 40GBase-CR4, 25GBase-CR, 10GbE SFP+.  If one side is
+autonegoatiating and the other side is not, link will not come up.
+
+25G and 50G advertisements are newer standards first defined in the 4.7
+kernel's ethtool interface.  To fully support these new advertisement speeds
+for autonegotiation, 4.7 (or newer) kernel and a newer ethtool utility are
+required.
+
+Below are some examples to illustrate the limitations when using 4.6 and
+older kernels:
+
+1. Enable Autoneg with all supported speeds advertised when the device
+currently has Autoneg disabled:
+
+   ethtool -s eth0 autoneg on advertise 0x0
+
+Note that to advertise all supported speeds (including 25G and 50G), the
+device must initially have Autoneg disabled.  advertise is a hexadecimal
+value specifying one or more advertised speed.  0x0 is special value that
+means all supported speeds.  See ethtool man page.  These advertise values
+are supported by the driver:
+
+0x020           1000baseT Full
+0x1000          10000baseT Full
+0x1000000       40000baseCR4 Full
+
+2. Enable Autoneg with only 10G advertised:
+
+   ethtool -s eth0 autoneg on advertise 0x1000
+
+or:
+
+   ethtool -s eth0 autoneg on speed 10000 duplex full
+
+
+3. Enable Autoneg with only 40G advertised:
+
+   ethtool -s eth0 autoneg on advertise 0x01000000
+
+4. Enable Autoneg with 40G and 10G advertised:
+
+   ethtool -s eth0 autoneg on advertise 0x01001000
+
+Note that the "Supported link modes" and "Advertised link modes" will not
+show 25G and 50G even though they may be supported or advertised.  For
+example, on a device that is supporting and advertising 10G, 25G, 40G, and
+50G, and linking up at 50G, ethtool will show the following:
+
+   ethtool eth0
+   Settings for eth0:
+           Supported ports: [ FIBRE ]
+           Supported link modes:   10000baseT/Full
+                                   40000baseCR4/Full
+           Supported pause frame use: Symmetric Receive-only
+           Supports auto-negotiation: Yes
+           Advertised link modes:  10000baseT/Full
+                                   40000baseCR4/Full
+           Advertised pause frame use: Symmetric
+           Advertised auto-negotiation: Yes
+           Speed: 50000Mb/s
+           Duplex: Full
+           Port: FIBRE
+           PHYAD: 1
+           Transceiver: internal
+           Auto-negotiation: on
+           Current message level: 0x00000000 (0)
+
+           Link detected: yes
+
+Using kernels 4.7 or newer and ethtool version 4.8 or newer, 25G and 50G
+advertisement speeds can be properly configured and displayed, without any
+of the limitations described above.  ethtool version 4.8 has a bug that
+ignores the advertise parameter, so it is recommended to use ethtool 4.10.
+Example ethtool 4.10 output showing 10G/25G/40G/50G advertisement settings:
+
+   ethtool eth0
+   Settings for eth0:
+           Supported ports: [ FIBRE ]
+           Supported link modes:   10000baseT/Full
+                                   40000baseCR4/Full
+                                   25000baseCR/Full
+                                   50000baseCR2/Full
+           Supported pause frame use: Symmetric Receive-only
+           Supports auto-negotiation: Yes
+           Advertised link modes:  10000baseT/Full
+                                   40000baseCR4/Full
+                                   25000baseCR/Full
+                                   50000baseCR2/Full
+           Advertised pause frame use: No
+           Advertised auto-negotiation: Yes
+           Speed: 50000Mb/s
+           Duplex: Full
+           Port: Direct Attach Copper
+           PHYAD: 1
+           Transceiver: internal
+           Auto-negotiation: on
+           Supports Wake-on: d
+           Wake-on: d
+           Current message level: 0x00000000 (0)
+
+           Link detected: yes
+
+These are the complete advertise values supported by the driver using 4.7
+kernel or newer and a compatible version of ethtool supporting the new
+values:
+
+0x020           1000baseT Full
+0x1000          10000baseT Full
+0x1000000       40000baseCR4 Full
+0x80000000      25000baseCR Full
+0x400000000     50000baseCR2 Full
+
+Note that the driver does not make a distinction on the exact physical
+layer encoding and media type for a link speed.  For example, at 50G, the
+device may support 50000baseCR2 and 50000baseSR2 for copper and multimode
+fiber cables respectively.  Regardless of what cabling is used for 50G,
+the driver currently uses only the ethtool value defined for 50000baseCR2
+to cover all variants of the 50G media types.  The same applies to all
+other advertise value for other link speeds listed above.
+
+
+Energy Efficient Ethernet
+=========================
+
+The driver supports Energy Efficient Ethernet (EEE) settings on 10GBase-T
+devices.  If enabled, and connected to a link partner that advertises EEE,
+EEE will become active.  EEE saves power by entering Low Power Idle (LPI)
+state when the transmitter is idle.  The downside is increased latency as
+it takes a few microseconds to exit LPI to start transmitting again.
+
+On a 10GBase-T device that supports EEE, the link up console message will
+include the current state of EEE.  For example:
+
+   bnxt_en 0000:05:00.0 eth0: NIC Link is Up, 10000 Mbps full duplex, Flow control: none
+   bnxt_en 0000:05:00.0 eth0: EEE is active
+
+The active state means that EEE is negotiated to be active during
+autonegotiation.  Additional EEE parameters can be obtained using ethtool:
+
+   ethtool --show-eee eth0
+
+   EEE Settings for eth0:
+           EEE status: enabled - active
+           Tx LPI: 8 (us)
+           Supported EEE link modes:  10000baseT/Full
+           Advertised EEE link modes:  10000baseT/Full
+           Link partner advertised EEE link modes:  10000baseT/Full
+
+The tx LPI timer of 8 microseconds is currently fixed and cannot be adjusted.
+EEE is only supported on 10GBase-T.  1GBase-T does not currently support EEE.
+
+To disable EEE:
+
+   ethtool --set-eee eth0 eee off
+
+To enable EEE, but disable LPI:
+
+   ethtool --set-eee eth0 eee on tx-lpi off
+
+This setting will negotiate EEE with the link partner but the transmitter on
+eth0 will not enter LPI during idle.  The link partner may independently
+choose to enter LPI when its transmitter is idle.
+
+
+Enabling Receive Side Scaling (RSS)
+===================================
+
+By default, the driver enables RSS by allocating receive rings to match the
+the number of CPUs (up to 8).  Incoming packets are run through a 4-tuple
+or 2-tuple hash function for TCP/IP packets and IP packets respectively.
+Non fragmented UDP packets are run through a 4-tuple hash function on newer
+devices (2-tuple on older devices).  See below for more information about
+4-tuple and 2-tuple and how to configure it.
+
+The computed hash value will determine the receive ring number for the
+packet.  This way, RSS distributes packets to multiple receive rings while
+guaranteeing that all packets from the same flow will be steered to the same
+receive ring.  The processing of each receive ring can be done in parallel
+by different CPUs to achieve higher performance.  For example, irqbalance
+will distribute the MSIX vector of each RSS receive ring across CPUs.
+However, RSS does not guarantee even distribution or optimal distribution of
+packets.
+
+To disable RSS, set the number of receive channels (or combined channels) to 1:
+
+   ethtool -L eth0 rx 1 combined 0
+
+or
+
+   ethtool -L eth0 combined 1 rx 0 tx 0
+
+To re-enable RSS, set the number of receive channels or (combined channels) to
+a value higher than 1.
+
+The RSS hash can be configured for 4-tuple or 2-tuple for various flow types.
+4-tuple means that the source, destination IP addresses and layer 4 port
+numbers are included in the hash function.  2-tuple means that only the source
+and destination IP addresses are included.  4-tuple generally gives better
+results.  Below are some examples on how to set and display the hash function.
+
+To display the current hash for TCP over IPv4:
+
+   ethtool -u eth0 rx-flow-hash tcp4
+
+To disable 4-tuple (enable 2-tuple) for UDP over IPv4:
+
+   ethtool -U eth0 rx-flow-hash udp4 sd
+
+To enable 4-tuple for UDP over IPv4:
+
+   ethtool -U eth0 rx-flow-hash udp4 sdfn
+
+
+Enabling Accelerated Receive Flow Steering (RFS)
+================================================
+
+RSS distributes packets based on n-tuple hash to multiple receive rings.
+The destination receive ring of a packet flow is solely determined by the
+hash value.  This receive ring may or may not be processed in the kernel by
+the CPU where the sockets application consuming the packet flow is running.
+
+Accelerated RFS will steer incoming packet flows to the ring whose MSI-X
+vector will interrupt the CPU running the sockets application consuming
+the packets.  The benefit is higher cache locality of the packet data from
+the moment it is processed by the kernel until it is consumed by the
+application.
+
+Accelerated RFS requires n-tuple filters to be supported.  On older
+devices, only Physical Functions (PFs, see SR-IOV below) support n-tuple
+filters.  On the latest devices, n-tuple filters are supported and enabled
+by default on all functions.  Use ethtool to disable n-tuple filters:
+
+   ethtool -K eth0 ntuple off
+
+To re-enable n-tuple filters:
+
+   ethtool -K eth0 ntuple on
+
+After n-tuple filters are enabled, Accelerated RFS will be automatically
+enabled when RFS is enabled.  These are example steps to enable RFS on
+a device with 8 rx rings:
+
+echo 32768 > /proc/sys/net/core/rps_sock_flow_entries
+echo 2048 > /sys/class/net/eth0/queues/rx-0/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-1/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-2/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-3/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-4/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-5/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-6/rps_flow_cnt
+echo 2048 > /sys/class/net/eth0/queues/rx-7/rps_flow_cnt
+
+These steps will set the global flow table to have 32K entries and each
+receive ring to have 2K entries.  These values can be adjusted based on
+usage.
+
+Note that for Accelerated RFS to be effective, the number of receive channels
+(or combined channels) should generally match the number of CPUs.  Use
+ethtool -L to fine-tune the number of receive channels (or combined channels)
+if necessary.  Accelerated RFS has precedence over RSS.  If a packet matches an
+n-tuple filter rule, it will be steered to the RFS specified receive ring.
+If the packet does not match any n-tuple filter rule, it will be steered
+according to RSS hash.
+
+To display the active n-tuple filters setup for Accelerated RFS:
+
+   ethtool -n eth0
+
+IPv6, GRE and IP-inIP n-tuple filters are supported on 4.5 and newer kernels.
+
+
+Enabling Busy Poll Sockets
+==========================
+
+Using 3.11 and newer kernels (also backported to some major distributions),
+Busy Poll Sockets are supported by the bnxt_en driver if
+CONFIG_NET_RX_BUSY_POLL is enabled.  Individual sockets can set the
+SO_BUSY_POLL option, or it can be enabled globally using sysctl:
+
+    sysctl -w net.core.busy_read=50
+
+This sets the time to busy read the device's receive ring to 50 usecs.
+For socket applications waiting for data to arrive, using this method
+can decrease latency by 2 or 3 usecs typically at the expense of
+higher CPU utilization.  The value to use depends on the expected
+time the socket will wait for data to arrive.  Use 50 usecs as a
+starting recommended value.
+
+In addition, the following sysctl parameter should also be set:
+
+    sysctl -w net.core.busy_poll=50
+
+This sets the time to busy poll for socket poll and select to 50 usecs.
+50 usecs is a recommended value for a small number of polling sockets.
+
+
+Enabling SR-IOV
+===============
+
+The Broadcom NetXtreme-C and NetXtreme-E devices support Single Root I/O
+Virtualization (SR-IOV) with Physical Functions (PFs) and Virtual Functions
+(VFs) sharing the Ethernet port.  The same bnxt_en driver is used for both
+PFs and VFs under Linux.
+
+Only the PFs are automatically enabled.  If a PF supports SR-IOV, lspci
+will show that it has the SR-IOV capability and the total number of VFs
+supported.  To enable one or more VFs, write the desired number of VFs
+to the following sysfs file:
+
+    /sys/bus/pci/devices/<domain>:<bus>:<device>:<function>/sriov_numvfs
+
+For example, to enable 4 VFs on bus 82 device 0 function 0:
+
+    echo 4 > /sys/bus/pci/devices/0000:82:00.0/sriov_numvfs
+
+To disable the VFs, write 0 to the same sysfs file.  Note that to change
+the number of VFs, 0 must first be written before writing the new number
+of VFs.
+
+On older 2.6 kernels that do not support the sysfs method to enable SR-IOV,
+the driver uses the module parameter "num_vfs" to enable the desired number
+of VFs.  Note that this is a global parameter that applies to all PF
+devices in the system.  For example, to enable 4 VFs on all supported PFs:
+
+    modprobe bnxt_en num_vfs=4
+
+The 4 VFs of each supported PF will be enabled when the PF is brought up.
+
+The VF and the PF operate almost identically under the same Linux driver
+but not all operations supported on the PF are supported on the VF.
+
+The resources needed by each VF are assigned by the PF based on how many
+VFs are requested to be enabled and the resources currently used by the PF.
+It is important to fully configure the PF first with all the desired features,
+such as number of RSS/TSS channels, jumbo MTU, etc, before enabling SR-IOV.
+After enabling SR-IOV, there may not be enough resources left to reconfigure
+the PF.
+
+The resources are evenly divided among the VFs.  Enabling a large number of
+VFs will result in less resources (such as RSS/TSS channels) for each VF.
+
+Refer to other documentation on how to map a VF to a VM or a Linux Container.
+
+Some attributes of a VF can be set using iproute2 through the PF.  SR-IOV
+must be enabled by setting the number of desired VFs before any attributes
+can be set.  Some examples:
+
+1. Set VF MAC address:
+
+   ip link set <pf> vf <vf_index> mac <vf_mac>
+
+Example:
+
+   ip link set eth0 vf 0 mac 00:12:34:56:78:9a
+
+Note that if the VF MAC addres is not set as shown, a random MAC address will
+be used for the VF.  If the VF MAC address is changed while the VF driver has
+already brought up the VF, it is necessary to bring down and up the VF before
+the new MAC address will take effect.
+
+2. Set VF link state:
+
+   ip link set <pf> vf <vf_index> state auto|enable|disable
+
+The default is "auto" which reflects the true link state.  Setting the VF
+link to "enable" allows loopback traffic regardless of the true link state.
+
+Example:
+
+   ip link set eth0 vf 0 state enable
+
+3. Set VF default VLAN:
+
+   ip link set <pf> vf <vf_index> vlan <vlan id>
+
+Example:
+
+   ip link set eth0 vf 0 vlan 100
+
+4. Set VF MAC address spoof check:
+
+   ip link set <pf> vf <vf_index> spoofchk on|off
+
+Example:
+
+   ip link set eth0 vf 0 spoofchk on
+
+Note that spoofchk is only effective if a VF MAC address has been set as
+shown in #1 above.
+
+
+Virtual Ethernet Bridge (VEB)
+=============================
+
+The NetXtreme-C/E devices contain an internal hardware Virtual Ethernet
+Bridge (VEB) to bridge traffic between virtual ports enabled by SR-IOV.
+VEB is normally turned on by default.  VEB can be switched to VEPA
+(Virtual Ethernet Port Aggregator) mode if an external VEPA switch is used
+to provide bridging between the virtual ports.
+
+Use the bridge command to switch between VEB/VEPA mode.  Note that only
+the PF driver will accept the command for all virtual ports belonging to the
+same physical port.  The bridge mode cannot be changed if there are multiple
+PFs sharing the same physical port (e.g. NPAR or Multi-Host).
+
+To set the bridge mode:
+
+   bridge link set dev <pf> hwmode {veb/vepa}
+
+To show the bridge mode:
+
+   bridge link show dev <pf>
+
+Example:
+
+   bridge link set dev eth0 hwmode vepa
+
+Note that older firmware does not support VEPA mode.
+
+
+Hardware QoS
+============
+
+The NetXtreme-C/E devices support hardware QoS.  The hardware has multiple
+internal queues, each can be configured to support different QoS attributes,
+such as latency, bandwidth, lossy or lossless data delivery.  These QoS
+attributes are specified in the IEEE Data Center Bridging (DCB) standard
+extensions to Ethernet.  DCB parameters include Enhanced Transmission
+Selection (ETS) and Priority-based Flow Control (PFC).  In a DCB network,
+all traffic will be classified into multiple Traffic Classes (TCs), each
+of which is assigned different DCB parameters.
+
+Typically, all traffic is VLAN tagged with a 3-bit priority in the VLAN
+tag.  The VLAN priority is mapped to a TC.  For example, a network with
+3 TCs may have the following priority to TC mapping:
+
+0:0,1:0,2:0,3:2,4:1,5:0,6:0,7:0
+
+This means that priorities 0,1,2,5,6,7 are mapped to TC0, priority 3 to TC2,
+and priority 4 to TC1.  ETS allows bandwidth assigment for the TCs.  For
+example, the ETS bandwidth assignment may be 40%, 50%, and 10% to TC0, TC1,
+and TC2 respectively.  PFC provides link level flow control for each VLAN
+priority independently.  For example, if PFC is enabled on VLAN priority 4,
+then only TC1 will be subject to flow control without affecting the other
+two TCs.
+
+Typically, DCB parameters are automatically configured using the DCB
+Capabilities Exchange protocol (DCBX).  The bnxt_en driver currently
+supports the Linux lldpad DCBX agent.  lldpad supports all versions of
+DCBX but the bnxt_en driver currently only supports the IEEE DCBX version.
+Typically, the DCBX enabled switch will convey the DCB parameters to lldpad
+which will then send the hardware QoS parameters to bnxt_en to configure
+the device.  Refer to the lldpad(8) and lldptool(8) man pages for further
+information on how to setup the lldpad DCBX agent.
+
+To support hardware TCs, the proper Linux qdisc must be used to classify
+outgoing traffic into their proper hardware TCs.  For example, the mqprio
+qdisc may be used.  A simple example using mqprio qdisc is illustrated below.
+Refer to the tc-mqprio(8) man page for more information.
+
+   tc qdisc add dev eth0 root mqprio num_tc 3 map 0 0 0 2 1 0 0 0 hw 1
+
+The above command creates the mqprio qdisc with 3 hardware TCs.  The priority
+to TC mapping is the same as the example at the beginning of the section.
+The bnxt_en driver will create 3 groups of tx rings, with each group mapping
+to an internal hardware TC.
+
+Once this is created, SKBs with different priorities will be mapped to the
+3 TCs according to the specified map above.  Note that this SKB priority
+is only used to direct packets within the kernel stack to the proper hardware
+ring.  If the outgoing packets are VLAN tagged, the SKB priority does not
+automatically map to the VLAN priority of the packet.  The VLAN egress map
+has to be set up to have the proper VLAN priority for each packet.
+
+In the current example, if VLAN 100 is used for all traffic, the VLAN egress
+map can be set up like this:
+
+   ip link add link eth0 name eth0.100 type vlan id 100 \
+      egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+
+This creates a one-to-one mapping of SKB priority to VLAN egress priority.
+In other words, SKB priority 0 maps VLAN priority 0, SKB priority 1 maps to
+VLAN priority 1, etc.  This one-to-one mapping should generally be used.
+
+If each TC has more than one ring, TSS will be performed to select a tx ring
+within the TC.
+
+To display the current qdisc configuration:
+
+   tc qdisc show
+
+Example output:
+
+    qdisc mqprio 8010: dev eth0 root  tc 3 map 0 0 0 2 1 0 0 0 0 0 0 0 0 0 0 0
+                 queues:(0:3) (4:7) (8:11)
+
+The example above shows that bnxt_en has allocated 4 tx rings for each of the
+3 TCs.  SKBs with priorities 0,1,2,5,6,7 will be transmitted using tx rings
+0 to 3 (TC0).  SKBs with priority 4 will be transmitted using rings 4 to 7
+(TC1).  SKBs with priority 3 will be transmitted using rings 8 to 11 (TC2).
+
+Next, SKB priorities have to be set for different applications so that the
+packets from the different applications will be mapped to the proper TCs.
+By default, the SKB priority is set to 0.  There are multiple methods to set
+SKB priorities.  net_prio cgroup is a convenient way to do this.  Refer to the
+link below for more information:
+
+https://www.kernel.org/doc/Documentation/cgroup-v1/net_prio.txt
+
+As mentioned previously, the DCB attributes of each TC are normally configured
+by the DCBX agent in lldpad.  It is also possible to set the DCB attributes
+manually in a simple network or for test purposes.  The following example
+will manually set up eth0 with the example DCB local parameters mentioned at
+the beginning of the section.
+
+   lldpad -d
+   lldptool -T -i eth0 -V ETS-CFG tsa=0:ets,1:ets,2:ets \
+            up2tc=0:0,1:0,2:0,3:2,4:1,5:0,6:0,7:0 \
+            tcbw=40,50,10
+   lldptool -T -i eth0 -V PFC enabled=4
+
+Note that the ETS bandwidth distribution will only be evident when all
+traffic classes are transmitting and reaching the link capacity.
+
+See lldptool-ets(8) and lldptool-pfc(8) man pages for more information.
+
+On an NPAR device with multiple partitions sharing the same network port,
+DCBX cannot be run on more than one partition.  In other words, the lldpad
+adminStatus can be set to rxtx on no more than one partition.  The same is
+true for SRIOV virtual functions.  DCBX cannot be run on the VFs.
+
+On these multi-function devices, the hardware TCs are generally shared
+between all the functions.  The DCB parameters negotiated and setup on
+the main function (NPAR or PF function) will be the same on the other
+functions sharing the same port.  Note that the standard lldptool will
+not be able to show the DCB parameters on the other functions which have
+adminStatus disabled.
+
+
+PTP Hardware Clock
+==================
+
+The NetXtreme-C/E devices support PTP Hardware Clock which provides hardware
+timestamps for PTP v2 packets.  The Linux PTP project contains more
+information about this feature.  A newer 4.x kernel and newer firmware
+(2.6.134 or newer) are required to use this feature.  Only the first PF
+of the network port has access to the hardware PTP feature.  Use ethtool -T
+to check if PTP Hardware Clock is supported.
+
+
+BNXT_EN Module Parameters
+=========================
+
+On newer 3.x/4.x kernels, the driver does not support any driver parameters.
+Please use standard tools (sysfs, ethtool, iproute2, etc) to configure the
+driver.
+
+The only exception is the "num_vfs" module parameter supported on older 2.6
+kernels to enable SR-IOV.  Please see the SR-IOV section above.
+
+
+BNXT_EN Driver Defaults
+=======================
+
+Speed :                    1G/2.5G/10G/25G/40G/50G depending on the board.
+
+Flow control :             None
+
+MTU :                      1500 (range 60 - 9500)
+
+Rx Ring Size :             511 (range 0 - 2047)
+
+Rx Jumbo Ring Size :       2044 (range 0 - 8191) automatically adjusted by the
+                           driver.
+
+Tx Ring Size :             511 (range (MAX_SKB_FRAGS+1) - 2047)
+
+                           MAX_SKB_FRAGS varies on different kernels and
+                           different architectures. On a 2.6/3.x kernel for
+                           x86, MAX_SKB_FRAGS is 18.
+
+Number of RSS/TSS channels:Up to 8 combined channels to match the number of
+                           CPUs
+
+TSO :                      Enabled
+
+GRO (hardware) :           Enabled
+
+LRO :                      Disabled
+
+Coalesce rx usecs :        12 usec
+
+Coalesce rx usecs irq :    1 usec
+
+Coalesce rx frames :       15 frames
+
+Coalesce rx frames irq :   1 frame
+
+Coalesce tx usecs :        25 usec
+
+Coalesce tx usecs irq :    2 usec
+
+Coalesce tx frames :       30 frames
+
+Coalesce tx frames irq :   2 frame
+
+Coalesce stats usecs :     1000000 usec (range 250000 - 1000000, 0 to disable)
+
+
+Statistics
+==========
+
+The driver reports all major standard network counters to the stack.  These
+counters are reported in /proc/net/dev or by other standard tools such as
+netstat -i.
+
+Note that the counters are updated every second by the firmware by
+default.  To increase the frequency of these updates, ethtool -C can
+be used to increase the frequency to 0.25 seconds if necessary.
+
+More detailed statistics are reported by ethtool -S.  Some of the counters
+reported by ethtool -S are for diagnostics purposes only.  For example,
+the "rx_drops" counter reported by ethtool -S includes dropped packets
+that don't match the unicast and multicast filters in the hardware.  A
+non-zero count is normal and does not generally reflect any error conditions.
+This counter should not be confused with the "RX-DRP" counter reported by
+netstat -i.  The latter reflects dropped packets due to buffer overflow
+conditions.
+
+Another example is the "tpa_aborts" counter reported by ethtool -S.  It
+counts the LRO (Large Receive Offload) aggregation aborts due to normal
+TCP conditions.  A high tpa_aborts count is generally not an indication
+of any errors.
+
+The "rx_ovrsz_frames" counter reported by ethtool -S may count all
+packets bigger than 1518 bytes when using earlier versions of the firmware.
+Newer version of the firmware has reprogrammed the counter to count
+packets bigger than 9600 bytes.
+
+
+Unloading and Removing Driver
+=============================
+
+rmmod bnxt_en
+
+Note that if SR-IOV is enabled and there are active VFs running in VMs, the
+PF driver should never be unloaded.  It can cause catastrophic failures such
+as kernel panics or reboots.  The only time the PF driver can be unloaded
+with active VFs is when all the VFs and the PF are running in the same host
+kernel environment with one driver instance controlling the PF and all the
+VFs.  Using Linux Containers is one such example where the PF driver can be
+unloaded to gracefully shutdown the PF and all the VFs.
+
+
+Updating Firmware for Broadcom NetXtreme-C and NetXtreme-E devices
+==================================================================
+
+Controller firmware may be updated using the Linux request_firmware interface
+in conjunction with the ethtool "flash device" interface.
+
+Using the ethtool utility, the controller's boot processor firmware may be
+updated by copying the 2 "boot code" firmware files to the local /lib/firmware/
+directory:
+
+    cp bc1_cm_a.bin bc2_cm_a.bin /lib/firmware
+
+and then issuing the following 2 ethtool commands (both are required):
+
+    ethtool -f <device> bc1_cm_a.bin 4
+
+    ethtool -f <device> bc2_cm_a.bin 18
+
+NVM packages (*.pkg files) containing controller firmware, microcode, 
+pre-boot software and configuration data may be installed into a controller's
+NVRAM using the ethtool utility by first copying the .pkg file to the local
+/lib/firmware/ directory and then executing a single command:
+
+    ethtool -f <device> <filename.pkg>
+
+Note: do not specify the full path to the file on the ethtool -f command-line.
+
+Note: root privileges are required to successfully execute these commands.
+
+After "flashing" new firmware into the controller's NVRAM, a cold restart of 
+the system is required for the new firmware to take effect. This requirement
+will be removed in future firmware and driver versions.
+
+Updating Firmware for Broadcom Nitro device
+===========================================
+
+Nitro controller firmware should be updated from Uboot prompt by following the
+below steps
+
+    sf probe
+    sf erase 0x50000 0x30000
+    tftpboot 0x85000000 <location>/chimp_xxx.bin
+    sf write 0x85000000 0x50000 <size in hex>
diff --git a/ubuntu/bnxt/RELEASE.TXT b/ubuntu/bnxt/RELEASE.TXT
new file mode 100644 (file)
index 0000000..9e59f2b
--- /dev/null
@@ -0,0 +1,1594 @@
+                              Release Notes
+                       Broadcom bnxt_en Linux Driver
+                              Version 1.8.1
+                                07/11/2017
+
+                            Broadcom Limited
+                         5300 California Avenue,
+                            Irvine, CA 92617
+
+                 Copyright (c) 2015 - 2016 Broadcom Corporation
+                   Copyright (c) 2016 - 2017 Broadcom Limited
+                           All rights reserved
+
+v1.8.1 (July 11, 2017)
+======================
+This version uses HWRM 1.8.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1.  (CTRL-25863) Fix warnings and errors in netpoll mode.
+        2.  (CTRL-25665) Fix uninitalized ethtool -S counters when
+            interface is down on older kernels.
+        3.  (CTRL-25917) Fix crash when ifconfig is retreiving counters
+            while device is closing.
+        4.  (CTRL-26071) Fix occasional failures when changing ethtool -L
+            channels from combined to rx/tx or vice versa.
+        5.  Fix SRIOV on big-endian systems.
+
+Enhancements:
+        1.  Fix compile errors on 4.13 kernel.
+        2.  Allow users to disable periodic counter updates for debugging
+            purposes.
+        3.  Add PTP hardware timestamp support (requires HWRM 1.8.0)
+        4.  Update to HWRM 1.8.0 with better support for link duplex
+            reporting.
+
+v1.7.30 (June 16, 2017)
+=======================
+This version uses HWRM 1.7.8.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1.  Fix potential memory corruption when lots of RFS filters are in
+            use. (backported from 1.7.53)
+        2.  Fixed compile errors on 4.11 kernel. (backported from 1.7.51)
+
+Enhancements:
+        1.  (CTRL-25004) Add VEPA support. (backported from 1.7.54)
+        2.  Fix compile errors on RHEL7.4. (backported from 1.7.53)
+
+v1.7.25 (April 10, 2017)
+========================
+This version uses HWRM 1.7.6.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed regression in v1.7.24 on the kernels that the fix was
+           intended for (e.g. 3.10).
+
+v1.7.24 (April 10, 2017)
+=======================
+This version uses HWRM 1.7.6.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed VF tx rate attribute on some older kernels (e.g. 3.10).
+
+v1.7.23 (April 8, 2017)
+=======================
+This version uses HWRM 1.7.6.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-25309) Do not allow lldpad to set host based DCBX if
+           firmware DCBX agent is running.
+
+v1.7.22 (April 7, 2017)
+=======================
+This version uses HWRM 1.7.6.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-25309) Report to DCBNL if firmware DCBX agent is enabled
+           so that lldpad will not try to configure the device.
+        2. Fix VF qos attribute reporting.
+
+Enhancements:
+        1. (CTRL-25284) Use up to 8 RSS rings by default on RHEL6.3 and
+           other similar old kernels.
+
+v1.7.21 (Mar 31, 2017)
+=======================
+This version uses HWRM 1.7.6.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-24874) Cap the use of MSIX with max available completion
+           rings.
+
+v1.7.20 (Mar 29, 2017)
+=======================
+This version uses HWRM 1.7.5 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. DMA unmapping bug during shutdown in XDP mode.
+        2. Removed ethtool -d which is unused and buggy.
+        3. Fixed compile errors on 4.12 rc kernels.
+        4. (CTRL-25056) Fixed NULL pointer crash in one open error path.
+
+Enhancements:
+        1. (CTRL-24492) Use short TX BDs for XDP.
+
+v1.7.9 (Mar 08, 2017)
+=====================
+This version uses HWRM 1.7.5 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Do not call firmware to get the list of available selftests
+           unless it is a single PF on a port.
+        2. Do not allow offline tests if there are active VFs.
+
+v1.7.8 (Mar 07, 2017)
+=====================
+This version uses HWRM 1.7.5 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. GPL clean up.  Remove miscellaneous non-GPL files that are not
+           needed.
+        2. Ignore 0 value in autoneg supported speed from firmware.
+
+v1.7.7 (Mar 04, 2017)
+=====================
+This version uses HWRM 1.7.5 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. Updated to HWRM spec 1.7.5.
+
+v1.7.6 (Mar 03, 2017)
+=====================
+This version uses HWRM 1.7.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24746) Fix phy loopback self test failure when link
+           speed is autoneg.
+
+v1.7.5 (Mar 01, 2017)
+=====================
+This version uses HWRM 1.7.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24724) Perform function reset earlier so that reserved
+           rings will not be cleared by firmware.
+
+Enhancements:
+        1. (CTRL-23660) Add ethtool -t selftest supported by HWRM 1.7.4.
+        2. Disallow lldpad if firmware DCBX/LLDP agent is running.
+        3. Notify RDMA driver during tx timeout.
+
+v1.7.4 (Feb 20, 2017)
+=====================
+This version uses HWRM 1.7.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (SOC-5134) Reject driver probe against all bridge devices.
+        2. (CTRL-24802) Fix NULL pointer dereference when open fails.
+        3. (CTRL-24803) Fix PCI cleanup when probe fails.
+
+Enhancements:
+        1. (CTRL-23667) Update to HWRM 1.7.1 spec to support VF MAC address
+           spoof check.
+
+v1.7.3 (Feb 13, 2017)
+=====================
+This version uses HWRM 1.7.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24618) Fix ethtool -l pre-set max combined channels.
+        2. (CTRL-24206) Fix bugs in retry NVRAM install update operation.
+
+Enhancements:
+        1. (CTRL-24491, CTRL-24492) Merge with latest upstream XDP
+           implementation.
+
+v1.7.2 (Feb 6, 2017)
+====================
+This version uses HWRM 1.7.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. Removed -DNEW_TX compile option.  It will always use the new
+           logic if HWRM spec is 1.6.1 or newer.
+
+v1.7.1 (Feb 6, 2017)
+====================
+This version uses HWRM 1.7.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-24206) Retry NVRAM install update with defragmentation.
+
+v1.7.0 (Feb 3, 2017)
+====================
+This version uses HWRM 1.7.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fix compile errors on Ubuntu 4.2 kernel.
+
+Enhancements:
+        1. (CTRL-24646) Add support for 57452 and 57454 devices.
+
+v1.6.10 (Jan 26, 2017)
+======================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24550, CTRL-24540) Fix compile errors on various
+           distributions introduced by the Yocto Makefile changes for SoC.
+
+v1.6.9 (Jan 24, 2017)
+=====================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24522) Fix compile warnings on RHEL6.9.
+        2. (CTRL-24491) Fix compile errors on newer kernels supporting XDP.
+        3. (CTRL-24491) Fix compile errors on early 4.x kernels.
+
+v1.6.8 (Jan 23, 2017)
+=====================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fix a kernel mutex deadlock situation during some link event
+           scenarios.
+        2. (CTRL-24496) Allow NTUPLE to be enabled on VFs.
+        3. (CTRL-24365) Silence warning messages from NPAR and VFs by
+           skipping phy configuration unless it is a single PF.
+        4. Fix poor TPA GRO performance on Cu devices when TCP timestamp
+           is not enabled.
+        5. Fix memory leak when setting DCBX APP TLV.
+        6. Fix ethtool -p kernel compatibility checks.
+
+v1.6.7 (Jan 09, 2017)
+=====================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24308) Fix proper punctuations in bnxt_setup_tc() error
+           message.
+        2. (CTRL-24269) Fix compiler warnings when CONFIG_RFS_ACCEL is
+           not defined.
+
+Enhancements:
+        1. (CTRL-24268) Compile RDMA interface for all supported kernels.
+        2. (CTRL-23664) Add GRE and IP encapsulated NTUPLE filter support.
+        3. Add ethtool -p support.
+        4. Add SRIOV hooks for RDMA interface.
+
+v1.6.6 (Dec 28, 2016)
+=====================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-23746) Reserve TX rings when changing the number of TCs
+           (-DNEW_TX compile option required).
+        2. (CTRL-24283) Handle one available RX ring gracefully by
+           disabling TPA and HDS.
+
+Enhancements:
+        1. Added support for 4.10 kernel's new core MTU structure.
+
+v1.6.5 (Dec 23, 2016)
+=====================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24266) Fix regression on VF driver.
+
+v1.6.4 (Dec 22, 2016)
+=====================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24213) Disable MSIX before PCI shutdown to prevent
+           PCIE unsupported request error.
+        2. (CTRL-23907, CTRL-24172) Set default completion ring for
+           async event, otherwise all async events may end up going
+           to bnxt_re's completion ring.
+
+v1.6.3 (Dec 19, 2016)
+======================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-24084, CTRL-24149, CTRL-24191) Fix Makefile for proper
+           RHEL7.3 compatibility.
+        2. Fixed endian conversion for DCBX protocol_id.
+
+v1.6.2 (Dec 14, 2016)
+======================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-23854) Added support for ipv6 flows on RFS.
+        2. (CTRL-23683) Implemented new TX ring allocation scheme as a compile
+                        option.
+        3. (CTRL-23653) Display FEC settings during link up.
+
+v1.6.1 (Dec 08, 2016)
+======================
+This version uses HWRM 1.6.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed hang issue during modprobe introduced in 1.6.0 when
+           running on HWRM 1.6.0 or above firmware.
+
+Enhancements:
+        1. Updated to 1.6.1 firmware HSI.
+
+v1.6.0 (Dec 06, 2016)
+======================
+This version uses HWRM 1.6.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-23711) Fix IRQ disable sequence during shutdown.
+        2. (CTRL-23724) Improve resource allocation for VFs.
+        3. Fix TPA/GRO code path on Cu+ devices.
+
+Enhancements:
+        1. Updated to 1.6.0 firmware HSI.
+        2. (CTRL-22565) Improve interface with RDMA driver.
+        3. Added support for improved Cu+ RFS mode which also supports RFS
+           on VFs (requires new HWRM 1.6.0 firmware).
+        4. DCBNL setapp/delapp implemented according to new spec.
+
+v1.5.13 (Nov 17, 2016)
+======================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-23724) Fix bnxt_re registration failure.
+
+v1.5.12 (Nov 16, 2016)
+=====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. Updated HSI file to include 1.5.0 RoCE HSI.
+        2. Added ethtool -u|-U to display/set RSS hash (see README.TXT).
+        3. Better DCB support for NPAR/SR-IOV (see README.TXT).
+
+v1.5.11 (Nov 9, 2016)
+=====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-22565) Improve NULL pointer fix when bnxt_re registers
+           using new MSIX scheme (initial Fix in v1.5.7).
+
+v1.5.10 (Nov 8, 2016)
+=====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Move function reset from open to probe.  With the new MSIX
+           scheme, doing function reset in open may free all resources
+           allocated by bnxt_re.
+
+v1.5.9 (Nov 4, 2016)
+====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-22565) Improve early MSIX enablement scheme to prevent
+           race conditions with RoCE driver.
+
+v1.5.8 (Nov 4, 2016)
+====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-23629) Fixed RoCE registration regression caused by
+           fix for CTRL-23568.
+
+v1.5.7 (Nov 2, 2016)
+====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-23568) Fixed RoCE MSIX regression seen on VF.
+        2. (CTRL-23475) Fixed VF virtual link state regression since
+           firmware 20.6.0.
+        3. (CTRL-22565) Fix NULL pointer derefernce when bnxt_re loads
+           before network is up.
+
+Enhancements:
+        1. Added per priority PFC statistics.
+        2. Changed egress VLAN priority scheme.
+
+v1.5.6 (Oct 26, 2016)
+====================
+This version uses HWRM 1.5.4 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed hang issue when enabling PFC mapped to TC 2 and above.
+        2. (CTRL-23288) Do full function reset after suspend/resume.
+
+Enhancements:
+        1. (CTRL-22565) Enable MSIX early during PCI probe so that MSIX
+           will always be available whether the network interface is up or
+           down.
+
+v1.5.5 (Oct 19, 2016)
+====================
+This version uses HWRM 1.5.3 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-23432) Send DCBX RoCE app TLV data to firmware.
+
+v1.5.4 (Oct 6, 2016)
+====================
+This version uses HWRM 1.5.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. Updated to HWRM 1.5.2 spec to use the new FORCE_LINK_DWN bit.
+
+v1.5.3 (Sep 23, 2016)
+=====================
+This version uses HWRM 1.5.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-23107, CTRL-23121, CTRL-23122) Fixed memory corruption
+           after setting ETS TLV under some conditions.
+        2. Fix compile error on kernels without CONFIG_DCB enabled.
+        3. Fix compile error on kernels without RTC_LIB enabled.
+        4. Fix compile error on SLES11SP4.
+
+v1.5.2 (Sep 16, 2016)
+====================
+This version uses HWRM 1.5.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed Compilation issue on 3.7 Debian kernel.
+        2. Fixed autoneg issues on some dual-port devices.
+        3. (CTRL-22897) Reserved some statistics contexts for RoCE.
+
+Enhancements:
+        1. Added UDP RSS for 57X1X (Cu+) devices.
+        2. Added initial hardware QoS support.
+
+v1.5.1 (Sep 2, 2016)
+====================
+This version uses HWRM 1.5.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed TX PUSH operation on ARM64 and similar architectures.
+        2. (CTRL-22870) Fixed compile issue on Debian 7.7 (3.2.0-4 kernel)
+
+Enhancements:
+        1. Updated to support HWRM spec 1.5.1.
+        2. (CTRL-22776) Added missing 57407 and 5741X NPAR PCI IDs.
+        3. (CTRL-22886) Improved the setting of VF link state.
+        4. (CTRL-22737) Added ethtool -r support.
+
+v1.5.0 (Aug 25, 2016)
+======================
+This version uses HWRM 1.5.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-22748) Fix ethtool -l|-L minor inconsistency.
+        2. Pad TX packets below 52 bytes to allow traffic to loopback to BMC.
+
+Enhancements:
+        1. Updated to support HWRM spec 1.5.0.
+        2. (CTRL-21966) Added secure firmware update.
+        3. Enable software GRO on 2.6 kernels.
+        4. Removed "Single-port/Dual-port" from device strings.
+
+v1.3.23 (Jul 15, 2016)
+======================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fix 5731X/5741X GRO logic.
+
+Enhancements:
+        1. (CTRL-22449) Added support for RoCE statistics context.
+        2. (CTRL-22525) Added all NPAR and 57416/57417 device IDs.
+        3. (CTRL-22021) Added support for 58700 Nitro devices.
+        4. Improved ntuple filters by including destination MAC in the
+           filter.
+
+v1.3.22 (Jun 30, 2016)
+======================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fix RoCE MSI-X and completion ring reservation logic.  It was
+           causing SRIOV to fail when enabling a large number of VFs.
+
+v1.3.21 (Jun 29, 2016)
+======================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-22163) Added support for changing statistics coalescing
+           using ethtool -C (requires firmware 2.6.11 or newer).
+        2. Added promiscuous mode support on VF if default VLAN is in use.
+
+v1.3.20 (Jun 23, 2016)
+======================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fix hwrm_vnic_cfg with MRU enable bit for correctness.
+
+Enhancements:
+        1. Increased maximum MTU to 9500.
+
+v1.3.2 (Jul 28, 2016)
+=====================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-22663) Removed "Single-port/Dual-port" from device strings.
+
+v1.3.1 (Jul 13, 2016)
+=====================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-22510) Added 5740X NPAR and 57407 device IDs.
+
+v1.3.0 (Jun 29, 2016)
+=====================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-22089) Added support for suspend/resume.
+        2. Set up dev_port sysfs attribute with port ID.
+
+v1.2.22 (Jun 17, 2016)
+======================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. ifup/ifdown on the PF will not cause catastrophic failure on
+           active VFs.
+        2. (CTRL-22235) Add support for Kong and Bono firmware updates.
+        3. Fixed compile issues on 4.6 and 4.7 kernels.
+        4. (CTRL-22014) Added RoCE driver hooks.
+
+v1.2.21 (Jun 03, 2016)
+======================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-22179) Fix tx push race condition causing driver crash.
+
+Enhancements:
+        1. (CTRL-22201) Request APE reset after firmware upgrade.
+        2. (CTRL-21966) Support NVM secure update item.
+
+v1.2.20 (May 25, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-22125) Added support for 5731X and 5741X devices.
+
+Known Issues:
+        1. Default VLAN not working on 5731x/5741x (20.6.6 firmware issue).
+        2. Accelerated RFS not working on 5731x/5741x (20.6.6 firmware issue).
+
+v1.2.8 (Jun 17, 2016)
+=====================
+This version uses HWRM 1.3.0 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-22211) Fixed VLAN receive on 3.8 and similar kernels.
+        2. (CTRL-22319) Fix compatibility issue on SRIOV mode which caused
+           the set_vf_link_state code to be disabled.
+
+Enhancements:
+        1. (CTRL-22089) Added magic packet WoL support (shutdown only).
+
+v1.2.7 (Jun 3, 2016)
+====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-22158) Return error code when NPAR/VF functions try to
+           change speed, flow control, etc.
+
+v1.2.6 (May 18, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Re-implemented default VLAN properly by disabling RX VLAN
+           acceleration on the VF.  New firmware which fixes CTRL-22105 is
+           also required.
+
+Enhancements:
+        1. (CTRL-21914) Add NPAR support for 57402, 57404, 57406.
+
+v1.2.5 (May 16, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Various fixes for ethtool -m.
+        2. (CTRL-21932) Unsupported SFP modules are now detected before device
+           is up.
+        3. (CTRL-22078) Firmware wait time extended to the proper time
+           specified by the firmware.  This may fix some VF firmware timeout
+           issue.
+        4. (CTRL-22019) Fixed powerpc panic issue.
+
+Enhancements:
+        1. (CTRL-21956) Added PCIE link speed and width message during
+           modprobe.
+
+v1.2.4 (Apr 22, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-21914) Fix invalid max channels displayed by ethtool -l
+           when one MSIX is assigned to the function.
+        2. Fix rx path on architectures using 64K PAGE_SIZE.
+
+Enhancements:
+        1. (CTRL-21786) Added support for ethtool -m.
+
+v1.2.3 (Apr 15, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-21965) Don't allow autoneg on NICs that don't support it.
+
+v1.2.2 (Apr 12, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-21776) Add workaround for hardware duplicate rx opaque
+           bug (CUMULUS-7831).
+        2. (CTRL-21934) Don't fall back to INTA if msix allocation fails
+           on the VF.
+
+v1.2.1 (Mar 25, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. Fixed incorrect link down state when unsupported optical modules
+           notifications are enabled.
+
+v1.2.0 (Mar 24, 2016)
+=====================
+This version uses HWRM 1.2.2 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-21822) Implemented new HWRM scheme for VF MAC address change
+           for VMWare that also works for Linux PF.
+        2. (CTRL-21859) Fixed flow control reporting when autoneg is off.
+
+v1.0.7 (Mar 16, 2016)
+=====================
+This version uses HWRM 1.2.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-21839) Fixed inconsistent EEE reporting on dmesg.
+        2. Fixed flow control settings logic.  This fixes the
+           update_phy error message when bringing up the VFs with
+           flow control set to autoneg.
+
+v1.0.6 (Mar 14, 2016)
+=====================
+This version uses HWRM 1.2.1 and is compatible with all firmware using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. (CTRL-21729) Provide NVM and FW status information via ethtool
+           GEEPROM.
+        2. Adjusted default coalescing parameters to reduce interrupts.
+        3. Added check for valid forced speed during ethtool -s.
+        4. Improved ethtool forced speed settings display.
+        5. Added default VLAN support for VFs.
+
+Fixes:
+        1. Fixed RHEL6.8 compile error.
+        2. (CTRL-21440) Worked around RHEL6.3 source tree bug that caused
+           issues with IOMMU.
+        3. Disallow forced speed on 10GbaseT/1GBaseT.
+
+v1.0.5 (Mar 2, 2016)
+====================
+
+Enhancements:
+        1. Added unsupported SFP+ module reporting.
+
+v1.0.4 (Mar 1, 2016)
+====================
+
+This version is compatible with all ChiMP FW versions 20.1.x and above using
+HWRM 1.0.0 or above.
+
+Enhancements:
+        1. Updated to support HWRM 1.2.0, backwards compatible to 1.0.0.
+        2. (CTRL-21401) Added EEE for 10GBase-T devices.
+
+v1.0.3 (Feb 16, 2016)
+=====================
+
+This version is compatible with all ChiMP FW version 20.1.x and above using
+HWRM 1.0.0 or above.
+
+Fixes:
+        1. (CTRL-21636) Fixed link down condition when doing rapid speed
+           changes and changing other settings.
+        2. (CTRL-20564) Improve default firmware timeout behavior so that
+           messages requiring long timeouts (such as NVM commands) will work
+           better with older firmware.
+
+Enhancements:
+        1. (CTRL-21405) Add PCIE advanced error reporting.
+       2. Added port statistics for PF.
+        3. (CTRL-21587) Add autoneg support for 25G/40G/50G. See README.TXT.
+
+v1.0.2 (Jan 26, 2016)
+=====================
+
+This version requires ChiMP FW version 20.1.11 (HWRM 1.0.0).
+
+Fixes:
+        1. (CTRL-21271) Reduce default ring sizes and change default to
+           combined channels to reduce memory and DMA mappings.
+        2. (CTRL-21271) Fix crash when freeing tx ring during tx timeout.
+        3. Fix firmware error message logging to print message fields
+           properly.
+        4. Use completion ring to process ring free response response
+           from firmware.
+
+Enhancements:
+        1. (CTRL-21288) Add package version information to ethtool -i.
+
+v1.0.1 (Jan 08, 2016)
+=====================
+
+This version requires ChiMP FW version 20.1.9 (HWRM 1.0.0).
+
+Fixes:
+        1. (CTRL-21410, CUMULUS-6643) Exclude hw rx_drop_pkts from the stack's
+           rx_dropped counter.
+
+v1.0.0 (Jan 07, 2016)
+=====================
+
+This version requires ChiMP FW version 20.1.7 (HWRM 1.0.0).
+
+Enhancements:
+        1. Driver is now compatible with all future versions of production
+           firmware using HWRM spec 1.0.0 or newer.
+
+v0.1.32 (Dec 18, 2015)
+======================
+
+This version requires ChiMP FW version 20.1.5 (HWRM 0.8.0).
+
+Enhancements:
+        1. Enhanced ethtool -d with a signature to determine endianess.
+
+v0.1.31 (Dec 16, 2015)
+======================
+
+This version requires ChiMP FW version 20.1.3 (HWRM 0.7.8).
+
+Fixes:
+        1. (CTRL-21319) Fixed compile error on RHEL6 2.6.32-573.el6 kernel.
+        2. (CTRL-20830) Keep track of ring group resources advertised by
+           firmware.
+        3. Check for adequate resources before allowing NTUPLE to be enabled.
+        4. Fixed compile issues on 4.3/4.4 kernels.
+        5. (CTRL-21379) Fixed rmmod hang on older 3.x kernels.
+
+Enhancements:
+        1. Removed all A0 workarounds.
+        2. Added preliminary support for ethtool -d.
+        3. Added reset to tx timeout and reverted back to 5 seconds.
+
+v0.1.30 (Nov 23, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.41 (HWRM 0.7.8).
+
+Fixes:
+        1. Fixed compile error on generic 3.10 kernel.  This was a
+           regression introduced in version 0.1.29.
+
+v0.1.29 (Nov 23, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.41 (HWRM 0.7.8).
+
+Enhancements:
+        1. (CTRL-21302) Added support for upgrading APE/NC-SI firmware using
+           ethtool -f.
+        2. (CTRL-20980) Completed support for all RHEL6.x kernels and generic
+           2.6.32 kernel.
+
+Fixes:
+        1. (CTRL-21203) Increment software checksum error counter only if
+           rx checksum offload is enabled.
+
+v0.1.28 (Nov 17, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.39 (HWRM 0.7.8).
+
+Enhancements:
+        1. (CTRL-21270) Added PCI IDs for 57301 and 57402 devices.
+
+        2. More robust SRIOV cleanup sequence to prevent VF crash when
+           the PF driver is unloaded.
+
+        3. (CTRL-20989, CTRL-20925) Implement rx/tx channels to improve tx
+           performance with dedicated tx and rx completion rings.  See
+           ethtool -L in README.TXT for more information.
+
+Fixes:
+        1. (CTRL-21217) Fixed SRIOV implementation on RHEL6.x kernels with
+           num_vfs module parameter.
+
+        2. (CTRL-21211) Fixed VLAN acceleration for RHEL6.x kernels.
+
+        3. (CTRL-21255) Fixed macvlan issue after driver reset.  The
+           unicast address list needs to be reprogrammed after reset.
+
+        4. Fixed INTA implementation by properly mapping the CAG register.
+
+v0.1.27 (Oct 28, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.38 (HWRM 0.7.8).
+
+Fixes:
+        1. (CTRL-21223) Fixed GRO issue when running ipv6 without TCP
+           timestamps.
+
+v0.1.26 (Oct 23, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.38 (HWRM 0.7.8).
+
+Fixes:
+        1. Fixed MAC address change to take effect immediately.
+
+Enhancements:
+        1. (CTRL-21163) Added support for RHEL6.3 kernel and OLE6.3 UEK.
+        2. (CTRL-21186) Added ChiMP reset after flashing firmware with
+           ethtool -f.
+        3. (CTRL-21165) Added support for flashing AVS firmware.
+
+v0.1.25 (Oct 09, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.36 (HWRM 0.7.8).
+
+Fixes:
+        1. (CTRL-21064) Fix PF max ring calculation to prevent ethtool -L
+           failure on PF when SRIOV is enabled.
+
+v0.1.24 (Oct 07, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.36 (HWRM 0.7.8).
+
+New Features:
+        1. (CTRL-21053) Added support for INTA mode.  The driver will
+           automatically fall back to INTA if MSIX is not available.
+
+        2. (CTRL-20980) Added initial support for RHEL6.7 and some
+           similar kernels.
+
+Fixes:
+        1. Added fixes for ethtool -K ntuple on|off.
+
+        2. Added proper accounting of statistics context resources when
+           changing ethtool channels.
+
+        3. (CTRL-21095) Ensure we have enough RSS contexts before enabling
+           a VF.
+
+        4. (CTRL-21048) Make sure GRO packets have valid hash.  Disable TPA
+           during shutdown.  Close the device if we cannot initialize it
+           during configuration changes.
+
+        5. (CTRL-20989) Partial fix by setting a limit on tx completion
+           processing.
+
+        6. (CTRL-21011) Make sure ring pages don't exceed the maximum.
+           Adjust maximum ring sizes to prevent exceeding the maximum.
+
+        7. (CTRL-21064) Do not do function reset during ring changes
+           so that PF/VFs are not affected during ring changes.
+
+Enhancements:
+        1. Added extra padding for small packets to work in loopback mode
+           using latest firmware that has TXP padding disabled.
+
+        2. Enhanced pause setting changes to trigger link change and
+           print a new link message when appropriate.
+
+Known Issues:
+        1. Known VEB firmware issue of duplicating multicast packets on
+           NIC2.  This would cause ipv6 to detect duplicate address.
+
+v0.1.23 (Sep 22, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.34 (HWRM 0.7.6).
+
+New Features:
+        1. Added support for BCM57302 PCI ID.
+
+Fixes:
+        1. (CTRL-21032) Use modified firmware ring reset to workaround
+           hardware issue CUMULUS-5196.
+
+        2. (CTRL-20989) Partial fix of tx timeout issue by increasing the
+           timeout value from 5 to 10 seconds.  This is a temporary workaround
+           for the occasional tx completions that can take longer than 5
+           seconds while the issue is being investigated.
+
+        3. (CTRL-21048) Fix macvtap BUG condition while doing ring changes
+           with TPA (GRO) enabled.
+
+        4. Fix the msix table size read from the msix capability.  It was off
+           by 1 in previous versions.
+
+        5. (CTRL-21059) Fix RFS on 2nd port.  ChiMP firmware fix in 0.1.33
+           also required.
+
+        6. (CTRL-21033) Limit maximum ethtool channels to no more than the
+           msix table size to prevent failures.
+
+Enhancements:
+        1. Use smaller (256-byte) 1st buffer with HDS mode.
+
+        2. Support asynchronous link events in VF driver.
+
+v0.1.22 (Sep 14, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.32 (HWRM 0.7.6).
+
+Fixes:
+        1. (CTRL-20993) Reject SRIOV configuration from sysfs if PF is down.
+        2. Fixed ethtool -S counter names discard and drop to match exactly
+           hardware counter definitions.
+        3. (CTRL-20705) Fixed kernel oops when running ethtool -L rx 0 tx 0.
+
+Enhancements:
+        1. (CTRL-20686) Call firmware hwrm_ring_reset to perform recovery
+           from duplicate opaque hardware issue (CUMULUS-5196)
+        2. Added support for 3.8 kernel.
+
+v0.1.21 (Sep 10, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.31 (HWRM 0.7.6).
+
+New Features:
+        1. Added autoneg support for 57406 10GBase-T device.
+        2. (CTRL-20619) Added rx_l4_csum_errors software counter.
+
+Fixes:
+        1. (CTRL-20704) ethttol -L now returns error when user tries to set
+           combined or other channels.
+        2. (CTRL-20704) Fix incorrect ethtool -S statistics when device is
+           down.
+        3. Added RSS hash values to skb->hash for RFS packets.
+
+Enhancements:
+        1. Improved ethtool -{L|l} when VFs are configured.
+        2. Added support for 3.9 kernel.
+
+v0.1.20 (Sep 01, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.30 (HWRM 0.7.6).
+
+New Features:
+        1. (CTRL-20800) Added ethtool -x to display RSS indirection table and
+           hash key.
+
+Fixes:
+        1. Fixed the problem of non-functioning Low Latency Polling mode
+           after driver config. changes such as MTU change.
+
+Enhancements:
+        1. Added ability to configure VF MAC address at any time after SR-IOV
+           is enabled.
+
+v0.1.19 (Aug 26, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.28 (HWRM 0.7.6).
+
+New Features:
+        1. Added Accelerated RFS support on PF for TCP/IPv4.
+        2. Enabled Header Data split for TPA and jumbo frames.
+
+Fixes:
+        1. (CTRL-20905) Fixed over-subscription of rx rings, causing failure
+           when enabling a large number of VFs.
+        2. Use correct length hint for TSO packets for correct TSO mbuf usage.
+        3. (CTRL-20930) Enhanced duplicate opaque workaround logic for dual
+           port and multiple fuctions.
+
+Enhancements:
+        1. Added basic register and state dump during tx timeout, but no
+           recovery logic yet.
+        2. Added support for setting VF link state.
+           
+v0.1.18 (Aug 20, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.27 (HWRM 0.7.6).
+
+New Features:
+        1. Added support for 57406 10GBase-T device.
+
+Fixes:
+        1. (CTRL-20890) Fixed ethtool -L driver bug when increasing the rings
+           beyond 16.
+        2. (CTRL-20905) Fixed vnic allocation in VF to increase the number
+           of supported VF. ChiMP fix is also required to support more
+           than 32 VFs.
+
+Enhancements:
+        1. Modified MSI-X IRQ name to work with affinity scripts.
+        2. Enhanced recovery steps further to include RE_FLUSH and rx doorbell
+           unmap for hw bug CTRL-20686 (CUMULUS-5196).
+
+Known Issues:
+        1. PF cannot be brought down or reconfigured if there are active VFs.
+
+v0.1.17 (Aug 12, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.26 (HWRM 0.7.5).
+
+New Features:
+        1. (CTRL-20869) Add support to ethtool -f for pre-boot components.
+
+Enhancements:
+        1. Speed up the recovery steps for hw bug CTRL-20686 (CUMULUS-5196).
+
+v0.1.16 (Aug 09, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.26 (HWRM 0.7.5).
+
+New Features:
+        1. Added VF device ID 0x16d3 for 57404 VF.
+
+v0.1.15 (Aug 05, 2015)
+======================
+
+This version requires ChiMP FW version 0.1.24 (HWRM 0.7.5).
+
+New Features:
+        1. SR-IOV tested to work on ChiMP FW 0.1.24.
+
+Fixes:
+        1. (CTRL-20804) Added workaround for TPA_FLUSH to make recovery from
+           duplicate agg completion (CTRL-20686) reliable.
+
+        2. (CUMULUS-5361) Use different TPA_TIMERS_CFG to make TPA timeouts
+           reliable.
+
+Known Issues:
+        1. ChiMP FW 0.1.24 has an issue with multicast and SR-IOV.  For
+           example, this problem can affect ipv6 neighbor discovery with
+           VFs enabled.
+
+v0.1.14 (July 30, 2015)
+=======================
+
+This version requires ChiMP FW version 0.1.23 (HWRM 0.7.5).
+
+Fixes:
+        1. (CTRL-20848) Fix failure to create vxlan tunnel.
+        2. (CTRL-20651) Update NVRAM HWRM API to work with ChiMP 0.1.23.
+        3. (CTRL-20823) Fix ethtool -C failure due to out-of-range rx-frames
+           value.
+
+Known Issues:
+        1. (CTRL-20841) MTU may need to be adjusted when creating a tunnel so
+           that the tunnel MTU is divisble by 4 to avoid PCIE errors caused
+           by known issue with TPA enabled (CTRL-20641).
+        2. SRIOV has issues with Firmware 0.1.23.
+
+v0.1.13 (July 28, 2015)
+=======================
+
+This version requires ChiMP FW version 0.1.22 (HWRM 0.7.5).
+
+New Features:
+        1. Geneve tunnel stateless offload.
+        2. VXLAN arbitrary UDP port.
+        3. Support 4.x kernels.
+        4. PCIe and PHY microcode update using ethtool -f.
+
+Fixes:
+        1. (CTRL-20793, CTRL-20796) Fix bridging issues with hardware GRO.
+
+Enhancements:
+        1. Improved workaround for CTRL-20686.
+
+v0.1.12 (July 13, 2015)
+=======================
+
+Fixes:
+       01. Increased TPA concurrent aggregations to max (64).
+       02. Refined IRQ coalescing to only enable RING_IDLE feature if
+           rx-usecs is less than 25 us.
+
+v0.1.11 (July 09, 2015)
+=======================
+
+This version supports ChiMP FW version 0.1.20 (HWRM 0.7.4).
+
+Fixes:
+       01. (CTRL-20686) Implemented workaround to recover from duplicate
+           aggregation ring completions hardware bug.
+
+v0.1.10 (June 30, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.19 (HWRM 0.7.4).
+
+Fixes:
+       01. Disabled HDS (header data split) to workaround CTRL-20686 hardware
+           issue so that standard MTU will not hit the issue.
+
+v0.1.9 (June 30, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.19 (HWRM 0.7.4).
+
+Fixes:
+       01. Added support for generic 4.0 kernel.
+       02. Enabled HDS (header data split) when aggregation ring is used.
+
+v0.1.8 (June 26, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.18 (HWRM 0.7.4).
+
+Fixes:
+       01. Added support for generic 3.10/3.11 kernels.
+
+v0.1.7 (June 24, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.18 (HWRM 0.7.4).
+
+Fixes:
+
+       01. (CTRL-20641) Added SOP (start of frame padding) to workaround
+           PCIE error with TPA enabled.
+
+v0.1.6 (June 20, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.18 (HWRM 0.7.4).
+
+Fixes:
+
+       01. Enabled link change events.
+
+       02. Added support for link pause flow control (preview feature, not
+            well tested).
+
+       03. (CTRL-20630) Fixed most crashes during GRO and LRO settings
+            changes.
+
+v0.1.5 (June 17, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.16 (HWRM 0.7.3).
+
+Fixes:
+
+       01. Fixed low latency (busy poll) socket mode.
+
+       02. (CTRL-20624) Fixed "ethtool -C rx-frames 1" problem.
+
+       03. Added ethtool -f support for upgrading firmware.
+
+       04. Added ethtool -e support to read NVRAM.
+
+       05. Added multiple unicast address support, eliminating the
+           need to always go into promiscuous mode.
+
+       06. Fixed low performance with tunnel packets in GRO mode.
+
+       07. (CTRL-20660) Fixed macvtap crash with GRO enabled.
+
+v0.1.4 (May 29, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.12 (HWRM 0.7.3).
+
+Fixes:
+
+       01. (CTRL-20585) Added workaround for bridge multicast storm issue.
+
+       02. Added interrupt coalescing support with new default settings
+            and configurable using ethtool.
+
+       03. Fixed VLAN issue by configuring chip to strip VLAN tags on RX.
+
+       04. Added page table support for the larger ring sizes.
+
+       05. (CTRL-20599) Fixed transmit queue 8 timeout issue.
+
+       06. Updated Chip names with official marketing names.
+
+v0.1.3 (May 23, 2015)
+======================
+
+This version supports ChiMP FW version 0.1.11 (HWRM 0.7.3).
+
+Fixes:
+
+       01. Fixed TPA setup with large MTUs.  Before this fix, TCP
+            performance was very poor with larger MTUs.
+
+        02. Fixed the problem of statistics counters not working after
+            configuration changes.
+
+v0.1.2 (May 21, 2015)
+======================
+
+Fixes:
+       01. Fixed the "DMA: Out of SW-IOMMU space ..." error by enabling
+            64-bit DMA.  This should also improve performance slightly.
+
+       02. Fixed firmware version in ethtool -i by reporting the ChiMP
+            firmware version as "bc" and the HWRM spec version as "rm".
+            PHY firmware version was added as "ph".
+
+        03. Added module version and description for modinfo.
+
+
+v0.1.1 (May 19, 2015)
+======================
+Features tested
+       01. Cumulus A0 NIC2 and NIC3 at 10/25 Gbps.
+       02. Dual port support on NIC2
+       03. TX/RX Checksum offload (IPv4/IPv6)
+       04. TCP segmentation offload (TSO IPv4/IPv6)
+       05. MultiQ (TSS / RSS)
+       06. Statistics (PF only)
+       07. Ethtool (only options -s -k,-K,-i,-g,-G,-l,-L)
+       08. vlan acceleration
+       09. TPA GRO and LRO with ethtool support
+       10. Broadcast/multicast replication
+       11. Promiscuous mode support
+
+v0.1.0 (May 15, 2015)
+======================
+Features tested
+       01. Cumulus A0 NIC2 at 10 Gbps.
+       02. Dual port support
+       03. TX/RX Checksum offload (IPv4/IPv6)
+       04. TCP segmentation offload (TSO IPv4/IPv6)
+       05. MultiQ (TSS / RSS)
+       06. Statistics (PF only)
+       07. Ethtool (only options -k,-K,-i,-g,-G,-l,-L)
+       08. vlan acceleration
+       09. TPA GRO and LRO with ethtool support
+       10. Broadcast/multicast replication
+       11. Promiscuous mode support
+
+v0.008 (Mar 11, 2015)
+======================
+Features supported
+       01. Dual port support
+       02. TX/RX Checksum offload (IPv4/IPv6)
+       03. TCP segmentation offload (TSO IPv4/IPv6)
+       04. MultiQ (TSS / RSS)
+       05. Change MTU
+       06. Jumbo Frames
+       07. Statistics (PF only)
+       08. Ethtool (only options -k,-K,-i,-g,-G)
+       09. vlan acceleration
+       10. VXLAN stateless offload (only UDP port 4789 is supported)
+       11. GRE stateless offload
+       12. IPinIP stateless offload
+       13. SRIOV
+       14. VEB
+       15. TPA GRO and LRO with ethtool support
+       16. Broadcast/multicast replication fully supported on PF and VF
+       17. TXP padding disabled and XLMAC padding enabled to workaround
+            JIRA-4080
+       18. Added double doorbell to workaround JIRA-3918
+       19. Added promiscuous mode support
+
+Features not supported:
+       01. Ethtool options other than -k,-K,-i,-g,-G,-S
+       02. QoS/DCBX
+
+FPGA:          3.1.27
+Chimp FW:      0.0.18
+
+v0.007 (Feb 25, 2015)
+======================
+Features supported
+       01. Dual port support
+       02. TX/RX Checksum offload (IPv4/IPv6)
+       03. TCP segmentation offload (TSO IPv4/IPv6)
+       04. MultiQ (TSS / RSS)
+       05. Change MTU
+       06. Jumbo Frames
+       07. Statistics (PF only)
+       08. Ethtool (only options -k,-K,-i,-g,-G)
+       09. vlan acceleration
+       10. VXLAN stateless offload (only UDP port 4789 is supported)
+       11. GRE stateless offload
+       12. IPinIP stateless offload
+       13. SRIOV (most previous limitations removed)
+       14. VEB (previous limitations removed)
+       15. TPA GRO and LRO with ethtool support (previous limitations removed)
+       16. Broadcast/multicast replication fully supported on PF and VF
+       17. TXP padding disabled and XLMAC padding enabled to workaround
+            JIRA-4080
+       18. Added double doorbell to workaround JIRA-3918
+
+Features not supported:
+       01. Ethtool options other than -k,-K,-i,-g,-G,-S
+       02. QoS/DCBX
+
+FPGA:          3.1.27
+Chimp FW:      0.0.18
+
+v0.006 (Jan 29, 2015)
+======================
+Features supported
+       01. Dual port support
+       02. TX/RX Checksum offload (IPv4/IPv6)
+       03. TCP segmentation offload (TSO IPv4/IPv6)
+       04. Change MTU
+       05. Jumbo Frames
+       06. Statistics (PF only)
+       07. Ethtool (only options -k,-K,-i,-g,-G)
+       08. vlan acceleration
+       09. VXLAN stateless offload (only UDP port 4789 is supported)
+       10. GRE stateless offload
+       11. IPinIP stateless offload
+       12. SRIOV (limited functionality, check limitations below)
+       13. VEB (limited functionality, check limitations below)
+       14. TPA GRO and LRO with ethtool support (check limitations below)
+
+Features not supported:
+       01. Ethtool options other than -k,-K,-i,-g,-G,-S
+       02. QoS/DCBX
+       03. MultiQ (TSS / RSS)
+
+FPGA:          3.1.21
+Chimp FW:      0.0.17
+
+v0.005 (Jan 21, 2015)
+======================
+Features supported
+       01. Dual port support
+       02. TX/RX Checksum offload (IPv4/IPv6)
+       03. TCP segmentation offload (TSO IPv4/IPv6)
+       04. Change MTU
+       05. Change Mac Address
+       06. Jumbo Frames
+       07. Ethtool (only options -k,-K,-i)
+       08. vlan acceleration
+       09. VXLAN stateless offload (only UDP port 4789 is supported)
+       10. GRE stateless offload
+       11. IPinIP stateless offload
+       12. SRIOV (limited functionality, check limitations below)
+       13. VEB (limited functionality, check limitations below)
+       13. TPA GRO and LRO with ethtool support (check limitations below)
+
+Features not supported:
+       01. Statistics
+       02. Ethtool options other than -k,-K,-i
+       03. QoS/DCBX
+       04. MultiQ (TSS / RSS)
+
+
+v0.004 (Jan 20, 2015)
+======================
+Features supported
+       01. Dual port support
+       02. TX/RX Checksum offload (IPv4/IPv6)
+       03. TCP segmentation offload (TSO IPv4/IPv6)
+       04. Change MTU
+       05. Change Mac Address
+       06. Jumbo Frames
+       07. Ethtool (only options -k,-K,-i)
+       08. vlan acceleration
+       09. VXLAN stateless offload (only UDP port 4789 is supported)
+       10. GRE stateless offload
+       11. IPinIP stateless offload
+       12. SRIOV (limited functionality, check limitations below)
+       13. TPA (known issues, check limitations)
+
+Features not supported:
+       01. Statistics
+       02. Ethtool options other than -k,-K,-i
+       03. QoS/DCBX
+       04. MultiQ (TSS / RSS)
+
+
+v0.003 (Dec 15, 2014)
+======================
+Bug fixes:
+       1. Aggregation ring enablement (JIRA 3359).
+       2. Broadcast/Multicast filters (JIRA's 3315, 3358).
+
+Features supported
+       01. TX/RX Checksum offload (IPv4/IPv6)
+       02. TCP segmentation offload (TSO IPv4/IPv6)
+       03. MultiQ (TSS / RSS)
+       04. Change MTU
+       05. Change Mac Address
+       06. Jumbo Frames
+       07. Ethtool (only options -k,-K,-i)
+       08. vlan acceleration
+       09. TPA (only LRO)
+       10. VXLAN stateless offload (only UDP port 4789 is supported)
+       11. GRE stateless offload
+       12. IPinIP stateless offload
+        
+Features not supported:
+       01. SRIOV
+       02. Statistics
+       03. Ethtool options other than -k,-K,-i
+       04. QoS/DCBX
+
+v0.001 (Oct 31, 2014)
+======================
+Features supported:
+
+       1. TX/RX Checksum offload
+       2. Large segment offload (LSO)
+       3. MultiQ (TSS / RSS)
+       4. Change MTU
+       5. Change Mac Address
+       6. Jumbo Frames
+       7. vlan
+       8. Ethtool (only options -k,-K,-i)
+
+Features not supported:
+       1. Statistics (basic & advanced)
+       2. Ethtool options other than -k,-K,-i
+       3. vlan acceleration
+       4. TPA (LRO/GRO)
+       5. SRIOV
+       6. QoS/DCBX
+       7. Vxlan
+
+========================================================================
+DETAILS:
+
+FPGA version 3.1.27:
+
+       To program FPGA
+       1. RDP to any of pb_sniffer[5-10]
+       2. Map \\pb_file_srvr\drive_e to a drive.
+       3. Change directory to <DRIVE>\ProDesign\<FPGA version>
+       4. Run “prg_fpga.bat” <IP address of FPGA>
+
+Chimp FW version 0.0.18:
+       Locaton: ~/nseg/rels/bcm5734x/firmware/ChiMP_Firmware/0.0.18
+
+       To program Chimp FW to NVRAM:
+       1.esx-dbg (Administrator/broadcom) has the dos USB key.
+       2.Open your setup idrac and map the usb key (use Mozilla or Chrome)
+       3.Boot to dos
+       4.cd cdiag/0915/
+       5.cdiag.exe -eng
+       6.Enter the following command in cdiag and wait for completion
+               1:> nvm upgrade –cfw ncfc0000.17
+       7.Enter the following command to get fw version
+               1:> nvm dir
+
+Kernel:
+       Standard kernel that comes with RHEL7.0 or above
+       (Development was done using 3.10.0-123.el7.x86_64)
+
+Driver:
+       nseg: ~/nseg/rels/bcm5734x/drivers/linux/0.006
+
+iproute2:
+       https://www.kernel.org/pub/linux/utils/net/iproute2/  OR
+       git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git
+
+============================================================================
+Know Issues/Limitations:
+
+SRIOV:
+       1. Max 2 VF's can be enabled.
+       2. All VF development/testing was done using 'network namespace' on the
+          hypervisor itself, no major issues are expected when VF driver is
+          brought up on a real VM.
+
+       a. To enable 'x' VF's
+       #echo x > /sys/bus/pci/devices/<pci_bus_id of PF0>/sriov_numvfs
+
+       b. To disable VF's
+       #echo 0 > /sys/bus/pci/devices/<pci_bus_id of PF0>/sriov_numvfs
+
+Vlan:
+       1. RX side vlan stripping cannot be disabled on HW. Thus there is no
+          support for non-accelerated vlan on RX. If user disables vlan
+          stripping it will not ping.
+
+       2. On TX side vlan normal/accelerated modes are supported.
+
+       a. To enable/disable TX vlan acceleration
+       #ethtool -K vlan-tx-offload  on/off
+
+       b. To enable/disable RX vlan acceleration
+       #ethtool -K vlan-rx-offload  on/off
+
+============================================================================
diff --git a/ubuntu/bnxt/bnxt.c b/ubuntu/bnxt/bnxt.c
new file mode 100644 (file)
index 0000000..4ce9b64
--- /dev/null
@@ -0,0 +1,9075 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/rtc.h>
+#ifdef HAVE_NDO_XDP
+#include <linux/bpf.h>
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(HAVE_UDP_TUNNEL_H)
+#include <net/udp_tunnel.h>
+#endif
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#ifdef HAVE_NDO_ADD_VXLAN
+#include <net/vxlan.h>
+#endif
+#endif
+#if !defined(NEW_FLOW_KEYS) && defined(HAVE_FLOW_KEYS)
+#include <net/flow_keys.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#ifndef NO_NETDEV_CPU_RMAP
+#include <linux/cpu_rmap.h>
+#endif
+#ifdef HAVE_IEEE1588_SUPPORT
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#endif
+
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_dcb.h"
+#include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
+#ifndef HSI_DBG_DISABLE
+#include "decode_hsi.h"
+#endif
+
+#define BNXT_TX_TIMEOUT                (5 * HZ)
+
+static const char version[] =
+       "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 164
+
+#ifndef PCIE_SRIOV_CONFIGURE
+static unsigned int num_vfs;
+module_param(num_vfs, uint, 0);
+MODULE_PARM_DESC(num_vfs, " Number of supported virtual functions (0 means sriov is disabled)");
+#endif
+
+enum board_idx {
+       BCM57301,
+       BCM57302,
+       BCM57304,
+       BCM57417_NPAR,
+       BCM58700,
+       BCM57311,
+       BCM57312,
+       BCM57402,
+       BCM57404,
+       BCM57406,
+       BCM57402_NPAR,
+       BCM57407,
+       BCM57412,
+       BCM57414,
+       BCM57416,
+       BCM57417,
+       BCM57412_NPAR,
+       BCM57314,
+       BCM57417_SFP,
+       BCM57416_SFP,
+       BCM57404_NPAR,
+       BCM57406_NPAR,
+       BCM57407_SFP,
+       BCM57407_NPAR,
+       BCM57414_NPAR,
+       BCM57416_NPAR,
+       BCM57452,
+       BCM57454,
+       NETXTREME_E_VF,
+       NETXTREME_C_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+       char *name;
+} board_info[] = {
+       { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+       { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+       { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+       { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+       { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
+       { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+       { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+       { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+       { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
+       { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
+       { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+       { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+       { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+       { "Broadcom NetXtreme-C Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+       { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
+       { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+       { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+       { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
+       { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
+       { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
+       { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
+       { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+       { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+       { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
+       { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
+       { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
+       { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
+       { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
+       { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
+       { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
+       { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
+       { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+       { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+       { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
+#ifdef CONFIG_BNXT_SRIOV
+       { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+#endif
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+       HWRM_FUNC_CFG,
+       HWRM_PORT_PHY_QCFG,
+       HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static const u16 bnxt_async_events_arr[] = {
+       ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+       ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+       ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+       ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
+       ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+       return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
+}
+
+#define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS            (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS    (DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons)                                 \
+               writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons)                                       \
+               writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db)                                         \
+               writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+const u16 bnxt_lhint_arr[] = {
+       TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+       TX_BD_FLAGS_LHINT_512_TO_1023,
+       TX_BD_FLAGS_LHINT_1024_TO_2047,
+       TX_BD_FLAGS_LHINT_1024_TO_2047,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+       TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct tx_bd *txbd;
+       struct tx_bd_ext *txbd1;
+       struct netdev_queue *txq;
+       int i;
+       dma_addr_t mapping;
+       unsigned int length, pad = 0;
+       u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+       u16 prod, last_frag;
+       struct pci_dev *pdev = bp->pdev;
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_sw_tx_bd *tx_buf;
+       __le32 lflags = 0;
+
+       i = skb_get_queue_mapping(skb);
+       if (unlikely(i >= bp->tx_nr_rings)) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+
+       txq = netdev_get_tx_queue(dev, i);
+       txr = &bp->tx_ring[bp->tx_ring_map[i]];
+       prod = txr->tx_prod;
+
+       free_size = bnxt_tx_avail(bp, txr);
+       if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+               netif_tx_stop_queue(txq);
+               return NETDEV_TX_BUSY;
+       }
+
+       length = skb->len;
+       len = skb_headlen(skb);
+       last_frag = skb_shinfo(skb)->nr_frags;
+
+       txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+       txbd->tx_bd_opaque = prod;
+
+       tx_buf = &txr->tx_buf_ring[prod];
+       tx_buf->skb = skb;
+       tx_buf->nr_frags = last_frag;
+
+       vlan_tag_flags = 0;
+       cfa_action = 0;
+       if (skb_vlan_tag_present(skb)) {
+               vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+                                skb_vlan_tag_get(skb);
+               /* Currently supports 8021Q, 8021AD vlan offloads
+                * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+                */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+               if (skb->vlan_proto == htons(ETH_P_8021Q))
+#endif
+                       vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+       }
+
+#ifdef HAVE_IEEE1588_SUPPORT
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+               struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+               if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
+                   atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
+                       lflags = cpu_to_le32(TX_BD_FLAGS_STAMP);
+                       goto normal_tx;
+               }
+       }
+#endif
+
+       if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+               struct tx_push_buffer *tx_push_buf = txr->tx_push;
+               struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
+               struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
+               void *pdata = tx_push_buf->data;
+               u64 *end;
+               int j, push_len;
+
+               /* Set COAL_NOW to be ready quickly for the next push */
+               tx_push->tx_bd_len_flags_type =
+                       cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+                                       TX_BD_TYPE_LONG_TX_BD |
+                                       TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+                                       TX_BD_FLAGS_COAL_NOW |
+                                       TX_BD_FLAGS_PACKET_END |
+                                       (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       tx_push1->tx_bd_hsize_lflags =
+                                       cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+               else
+                       tx_push1->tx_bd_hsize_lflags = 0;
+
+               tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+               tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+               end = pdata + length;
+               end = PTR_ALIGN(end, 8) - 1;
+               *end = 0;
+
+               skb_copy_from_linear_data(skb, pdata, len);
+               pdata += len;
+               for (j = 0; j < last_frag; j++) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+                       void *fptr;
+
+                       fptr = skb_frag_address_safe(frag);
+                       if (!fptr)
+                               goto normal_tx;
+
+                       memcpy(pdata, fptr, skb_frag_size(frag));
+                       pdata += skb_frag_size(frag);
+               }
+
+               txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
+               txbd->tx_bd_haddr = txr->data_mapping;
+               prod = NEXT_TX(prod);
+               txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+               memcpy(txbd, tx_push1, sizeof(*txbd));
+               prod = NEXT_TX(prod);
+               tx_push->doorbell =
+                       cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+               txr->tx_prod = prod;
+
+               tx_buf->is_push = 1;
+               netdev_tx_sent_queue(txq, skb->len);
+               wmb();  /* Sync is_push and byte queue before pushing data */
+
+               push_len = (length + sizeof(*tx_push) + 7) / 8;
+               if (push_len > 16) {
+                       __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
+                       __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+                                        (push_len - 16) << 1);
+               } else {
+                       __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
+                                        push_len);
+               }
+
+               goto tx_done;
+       }
+
+normal_tx:
+       if (length < BNXT_MIN_PKT_SIZE) {
+               pad = BNXT_MIN_PKT_SIZE - length;
+               if (skb_pad(skb, pad)) {
+                       /* SKB already freed. */
+                       tx_buf->skb = NULL;
+                       return NETDEV_TX_OK;
+               }
+               length = BNXT_MIN_PKT_SIZE;
+       }
+
+       mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+               dev_kfree_skb_any(skb);
+               tx_buf->skb = NULL;
+               return NETDEV_TX_OK;
+       }
+
+       dma_unmap_addr_set(tx_buf, mapping, mapping);
+       flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+               ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+       txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+       prod = NEXT_TX(prod);
+       txbd1 = (struct tx_bd_ext *)
+               &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+       txbd1->tx_bd_hsize_lflags = lflags;
+       if (skb_is_gso(skb)) {
+               u32 hdr_len;
+
+#ifdef HAVE_INNER_NETWORK_OFFSET
+               if (skb->encapsulation)
+                       hdr_len = skb_inner_network_offset(skb) +
+                               skb_inner_network_header_len(skb) +
+                               inner_tcp_hdrlen(skb);
+               else
+#endif
+                       hdr_len = skb_transport_offset(skb) +
+                               tcp_hdrlen(skb);
+
+               txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+                                       TX_BD_FLAGS_T_IPID |
+                                       (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+               length = skb_shinfo(skb)->gso_size;
+               txbd1->tx_bd_mss = cpu_to_le32(length);
+               length += hdr_len;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               txbd1->tx_bd_hsize_lflags |=
+                       cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+               txbd1->tx_bd_mss = 0;
+       }
+
+       length >>= 9;
+       flags |= bnxt_lhint_arr[length];
+       txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+       txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+       txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+       for (i = 0; i < last_frag; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               prod = NEXT_TX(prod);
+               txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+               len = skb_frag_size(frag);
+               mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+                                          DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+                       goto tx_dma_error;
+
+               tx_buf = &txr->tx_buf_ring[prod];
+               dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+               txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+               flags = len << TX_BD_LEN_SHIFT;
+               txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+       }
+
+       flags &= ~TX_BD_LEN;
+       txbd->tx_bd_len_flags_type =
+               cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+                           TX_BD_FLAGS_PACKET_END);
+
+       netdev_tx_sent_queue(txq, skb->len);
+
+#ifdef HAVE_IEEE1588_SUPPORT
+       if (unlikely(lflags))
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+       skb_tx_timestamp(skb);
+#endif
+
+       /* Sync BD data before updating doorbell */
+       wmb();
+
+       prod = NEXT_TX(prod);
+       txr->tx_prod = prod;
+
+       writel(DB_KEY_TX | prod, txr->tx_doorbell);
+       writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+       mmiowb();
+
+       if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+               netif_tx_stop_queue(txq);
+
+               /* netif_tx_stop_queue() must be done before checking
+                * tx index in bnxt_tx_avail() below, because in
+                * bnxt_tx_int(), we update tx index before checking for
+                * netif_tx_queue_stopped().
+                */
+               smp_mb();
+               if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+                       netif_tx_wake_queue(txq);
+       }
+       return NETDEV_TX_OK;
+
+tx_dma_error:
+       if (lflags)
+           atomic_inc(&bp->ptp_cfg->tx_avail);
+
+       last_frag = i;
+
+       /* start back at beginning and unmap skb */
+       prod = txr->tx_prod;
+       tx_buf = &txr->tx_buf_ring[prod];
+       tx_buf->skb = NULL;
+       dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+                        skb_headlen(skb), PCI_DMA_TODEVICE);
+       prod = NEXT_TX(prod);
+
+       /* unmap remaining mapped pages */
+       for (i = 0; i < last_frag; i++) {
+               prod = NEXT_TX(prod);
+               tx_buf = &txr->tx_buf_ring[prod];
+               dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+                              skb_frag_size(&skb_shinfo(skb)->frags[i]),
+                              PCI_DMA_TODEVICE);
+       }
+
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+       struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+       u16 cons = txr->tx_cons;
+       struct pci_dev *pdev = bp->pdev;
+       int i;
+       unsigned int tx_bytes = 0;
+
+       for (i = 0; i < nr_pkts; i++) {
+               struct bnxt_sw_tx_bd *tx_buf;
+               struct sk_buff *skb;
+               int j, last;
+
+               tx_buf = &txr->tx_buf_ring[cons];
+               cons = NEXT_TX(cons);
+               skb = tx_buf->skb;
+               tx_buf->skb = NULL;
+
+               if (tx_buf->is_push) {
+                       tx_buf->is_push = 0;
+                       goto next_tx_int;
+               }
+
+               dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+                                skb_headlen(skb), PCI_DMA_TODEVICE);
+               last = tx_buf->nr_frags;
+
+               for (j = 0; j < last; j++) {
+                       cons = NEXT_TX(cons);
+                       tx_buf = &txr->tx_buf_ring[cons];
+                       dma_unmap_page(
+                               &pdev->dev,
+                               dma_unmap_addr(tx_buf, mapping),
+                               skb_frag_size(&skb_shinfo(skb)->frags[j]),
+                               PCI_DMA_TODEVICE);
+               }
+
+#ifdef HAVE_IEEE1588_SUPPORT
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+                       u64 ts;
+
+                       if (!bnxt_get_tx_ts(bp, &ts)) {
+                               struct skb_shared_hwtstamps timestamp;
+                               struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+                               u64 ns;
+
+                               memset(&timestamp, 0, sizeof(timestamp));
+                               ns = timecounter_cyc2time(&ptp->tc, ts);
+                               timestamp.hwtstamp = ns_to_ktime(ns);
+                               skb_tstamp_tx(skb, &timestamp);
+                       }
+                       atomic_inc(&bp->ptp_cfg->tx_avail);
+               }
+#endif
+
+next_tx_int:
+               cons = NEXT_TX(cons);
+
+               tx_bytes += skb->len;
+               dev_kfree_skb_any(skb);
+       }
+
+       netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+       txr->tx_cons = cons;
+
+       /* Need to make the tx_cons update visible to bnxt_start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that bnxt_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
+
+       if (unlikely(netif_tx_queue_stopped(txq)) &&
+           (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+               __netif_tx_lock(txq, smp_processor_id());
+               if (netif_tx_queue_stopped(txq) &&
+                   bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+                   txr->dev_state != BNXT_DEV_STATE_CLOSING)
+                       netif_tx_wake_queue(txq);
+               __netif_tx_unlock(txq);
+       }
+}
+
+#ifdef HAVE_BUILD_SKB
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+                                        gfp_t gfp)
+{
+       struct device *dev = &bp->pdev->dev;
+       struct page *page;
+
+       page = alloc_page(gfp);
+       if (!page)
+               return NULL;
+
+       *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+       if (dma_mapping_error(dev, *mapping)) {
+               __free_page(page);
+               return NULL;
+       }
+       *mapping += bp->rx_dma_offset;
+       return page;
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+                                      gfp_t gfp)
+{
+       u8 *data;
+       struct pci_dev *pdev = bp->pdev;
+
+       data = kmalloc(bp->rx_buf_size, gfp);
+       if (!data)
+               return NULL;
+
+       *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
+                                 bp->rx_buf_use_size, bp->rx_dir);
+
+       if (dma_mapping_error(&pdev->dev, *mapping)) {
+               kfree(data);
+               data = NULL;
+       }
+       return data;
+}
+#else
+
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+                                        gfp_t gfp)
+{
+       return NULL;
+}
+
+static inline struct sk_buff *__bnxt_alloc_rx_data(struct bnxt *bp,
+                                                  dma_addr_t *mapping,
+                                                  gfp_t gfp)
+{
+       struct sk_buff *skb;
+       u8 *data;
+       struct pci_dev *pdev = bp->pdev;
+
+       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       if (skb == NULL)
+               return NULL;
+
+       data = skb->data;
+
+       *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
+                                 bp->rx_buf_use_size, bp->rx_dir);
+
+       if (dma_mapping_error(&pdev->dev, *mapping)) {
+               dev_kfree_skb(skb);
+               skb = NULL;
+       }
+       return skb;
+}
+#endif
+
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                      u16 prod, gfp_t gfp)
+{
+       struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+       struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+       dma_addr_t mapping;
+
+       if (BNXT_RX_PAGE_MODE(bp)) {
+               struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
+
+               if (!page)
+                       return -ENOMEM;
+
+               rx_buf->data = page;
+               rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+       } else {
+#ifdef HAVE_BUILD_SKB
+               u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+#else
+               struct sk_buff *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+#endif
+
+               if (!data)
+                       return -ENOMEM;
+
+               rx_buf->data = data;
+#ifdef HAVE_BUILD_SKB
+               rx_buf->data_ptr = data + bp->rx_offset;
+#else
+               rx_buf->data_ptr = data->data + bp->rx_offset;
+#endif
+       }
+       rx_buf->mapping = mapping;
+
+       rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+       return 0;
+}
+
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
+{
+       u16 prod = rxr->rx_prod;
+       struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+       struct rx_bd *cons_bd, *prod_bd;
+
+       prod_rx_buf = &rxr->rx_buf_ring[prod];
+       cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+       prod_rx_buf->data = data;
+       prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
+
+       prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+       prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+       cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+       prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+       u16 next, max = rxr->rx_agg_bmap_size;
+
+       next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+       if (next >= max)
+               next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+       return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+                                    struct bnxt_rx_ring_info *rxr,
+                                    u16 prod, gfp_t gfp)
+{
+       struct rx_bd *rxbd =
+               &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+       struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+       struct pci_dev *pdev = bp->pdev;
+       struct page *page;
+       dma_addr_t mapping;
+       u16 sw_prod = rxr->rx_sw_agg_prod;
+       unsigned int offset = 0;
+
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+               page = rxr->rx_page;
+               if (!page) {
+                       page = alloc_page(gfp);
+                       if (!page)
+                               return -ENOMEM;
+                       rxr->rx_page = page;
+                       rxr->rx_page_offset = 0;
+               }
+               offset = rxr->rx_page_offset;
+               rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+               if (rxr->rx_page_offset == PAGE_SIZE)
+                       rxr->rx_page = NULL;
+               else
+                       get_page(page);
+       } else {
+               page = alloc_page(gfp);
+               if (!page)
+                       return -ENOMEM;
+       }
+
+       mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+       if (dma_mapping_error(&pdev->dev, mapping)) {
+               __free_page(page);
+               return -EIO;
+       }
+
+       if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+               sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+       __set_bit(sw_prod, rxr->rx_agg_bmap);
+       rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+       rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+       rx_agg_buf->page = page;
+       rx_agg_buf->offset = offset;
+       rx_agg_buf->mapping = mapping;
+       rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+       rxbd->rx_bd_opaque = sw_prod;
+       return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+                                  u32 agg_bufs)
+{
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       u16 prod = rxr->rx_agg_prod;
+       u16 sw_prod = rxr->rx_sw_agg_prod;
+       u32 i;
+
+       for (i = 0; i < agg_bufs; i++) {
+               u16 cons;
+               struct rx_agg_cmp *agg;
+               struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+               struct rx_bd *prod_bd;
+               struct page *page;
+
+               agg = (struct rx_agg_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+               cons = agg->rx_agg_cmp_opaque;
+               __clear_bit(cons, rxr->rx_agg_bmap);
+
+               if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+                       sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+               __set_bit(sw_prod, rxr->rx_agg_bmap);
+               prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+               cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+               /* It is possible for sw_prod to be equal to cons, so
+                * set cons_rx_buf->page to NULL first.
+                */
+               page = cons_rx_buf->page;
+               cons_rx_buf->page = NULL;
+               prod_rx_buf->page = page;
+               prod_rx_buf->offset = cons_rx_buf->offset;
+
+               prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+               prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+               prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+               prod_bd->rx_bd_opaque = sw_prod;
+
+               prod = NEXT_RX_AGG(prod);
+               sw_prod = NEXT_RX_AGG(sw_prod);
+               cp_cons = NEXT_CMP(cp_cons);
+       }
+       rxr->rx_agg_prod = prod;
+       rxr->rx_sw_agg_prod = sw_prod;
+}
+
+#ifdef HAVE_BUILD_SKB
+#ifdef BNXT_RX_PAGE_MODE_SUPPORT
+static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+                                       struct bnxt_rx_ring_info *rxr,
+                                       u16 cons, void *data, u8 *data_ptr,
+                                       dma_addr_t dma_addr,
+                                       unsigned int offset_and_len)
+{
+       unsigned int payload = offset_and_len >> 16;
+       unsigned int len = offset_and_len & 0xffff;
+       struct skb_frag_struct *frag;
+       struct page *page = data;
+       u16 prod = rxr->rx_prod;
+       struct sk_buff *skb;
+       int off, err;
+
+       err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+       if (unlikely(err)) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               return NULL;
+       }
+       dma_addr -= bp->rx_dma_offset;
+       dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+
+       if (unlikely(!payload))
+               payload = eth_get_headlen(data_ptr, len);
+
+       skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+       if (!skb) {
+               __free_page(page);
+               return NULL;
+       }
+
+       off = (void *)data_ptr - page_address(page);
+       skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+       memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+              payload + NET_IP_ALIGN);
+
+       frag = &skb_shinfo(skb)->frags[0];
+       skb_frag_size_sub(frag, payload);
+       frag->page_offset += payload;
+       skb->data_len -= payload;
+       skb->tail += payload;
+
+       return skb;
+}
+#endif
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+                                  struct bnxt_rx_ring_info *rxr, u16 cons,
+                                  void *data, u8 *data_ptr,
+                                  dma_addr_t dma_addr,
+                                  unsigned int offset_and_len)
+{
+       u16 prod = rxr->rx_prod;
+       struct sk_buff *skb;
+       int err;
+
+       err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+       if (unlikely(err)) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               return NULL;
+       }
+
+       skb = build_skb(data, 0);
+       dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+                        bp->rx_dir);
+       if (!skb) {
+               kfree(data);
+               return NULL;
+       }
+
+       skb_reserve(skb, bp->rx_offset);
+       skb_put(skb, offset_and_len & 0xffff);
+       return skb;
+}
+#else
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+                                  struct bnxt_rx_ring_info *rxr, u16 cons,
+                                  void *data, u8 *data_ptr,
+                                  dma_addr_t dma_addr,
+                                  unsigned int offset_and_len)
+{
+       struct sk_buff *skb = data;
+       u16 prod = rxr->rx_prod;
+       int err;
+
+       err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+       if (unlikely(err)) {
+               bnxt_reuse_rx_data(rxr, cons, skb);
+               return NULL;
+       }
+
+       dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+                        bp->rx_dir);
+       skb_reserve(skb, bp->rx_offset);
+       skb_put(skb, offset_and_len & 0xffff);
+       return skb;
+}
+#endif
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+                                    struct sk_buff *skb, u16 cp_cons,
+                                    u32 agg_bufs)
+{
+       struct pci_dev *pdev = bp->pdev;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       u16 prod = rxr->rx_agg_prod;
+       u32 i;
+
+       for (i = 0; i < agg_bufs; i++) {
+               u16 cons, frag_len;
+               struct rx_agg_cmp *agg;
+               struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+               struct page *page;
+               dma_addr_t mapping;
+
+               agg = (struct rx_agg_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+               cons = agg->rx_agg_cmp_opaque;
+               frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+                           RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+               cons_rx_buf = &rxr->rx_agg_ring[cons];
+               skb_fill_page_desc(skb, i, cons_rx_buf->page,
+                                  cons_rx_buf->offset, frag_len);
+               __clear_bit(cons, rxr->rx_agg_bmap);
+
+               /* It is possible for bnxt_alloc_rx_page() to allocate
+                * a sw_prod index that equals the cons index, so we
+                * need to clear the cons entry now.
+                */
+               mapping = cons_rx_buf->mapping;
+               page = cons_rx_buf->page;
+               cons_rx_buf->page = NULL;
+
+               if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+                       struct skb_shared_info *shinfo;
+                       unsigned int nr_frags;
+
+                       shinfo = skb_shinfo(skb);
+                       nr_frags = --shinfo->nr_frags;
+                       __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+                       dev_kfree_skb(skb);
+
+                       cons_rx_buf->page = page;
+
+                       /* Update prod since possibly some pages have been
+                        * allocated already.
+                        */
+                       rxr->rx_agg_prod = prod;
+                       bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+                       return NULL;
+               }
+
+               dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
+                              PCI_DMA_FROMDEVICE);
+
+               skb->data_len += frag_len;
+               skb->len += frag_len;
+               skb->truesize += PAGE_SIZE;
+
+               prod = NEXT_RX_AGG(prod);
+               cp_cons = NEXT_CMP(cp_cons);
+       }
+       rxr->rx_agg_prod = prod;
+       return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                              u8 agg_bufs, u32 *raw_cons)
+{
+       u16 last;
+       struct rx_agg_cmp *agg;
+
+       *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+       last = RING_CMP(*raw_cons);
+       agg = (struct rx_agg_cmp *)
+               &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+       return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+                                           unsigned int len,
+                                           dma_addr_t mapping)
+{
+       struct bnxt *bp = bnapi->bp;
+       struct pci_dev *pdev = bp->pdev;
+       struct sk_buff *skb;
+
+       skb = napi_alloc_skb(&bnapi->napi, len);
+       if (!skb)
+               return NULL;
+
+       dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+                               bp->rx_dir);
+
+       memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+              len + NET_IP_ALIGN);
+
+       dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+                                  bp->rx_dir);
+
+       skb_put(skb, len);
+       return skb;
+}
+
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+                          u32 *raw_cons, void *cmp)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct rx_cmp *rxcmp = cmp;
+       u32 tmp_raw_cons = *raw_cons;
+       u8 cmp_type, agg_bufs = 0;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+
+       if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+               agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+                           RX_CMP_AGG_BUFS) >>
+                          RX_CMP_AGG_BUFS_SHIFT;
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               struct rx_tpa_end_cmp *tpa_end = cmp;
+
+               agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                           RX_TPA_END_CMP_AGG_BUFS) >>
+                          RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+       }
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+                       return -EBUSY;
+       }
+       *raw_cons = tmp_raw_cons;
+       return 0;
+}
+
+static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+       if (!rxr->bnapi->in_reset) {
+               rxr->bnapi->in_reset = true;
+               set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+       rxr->rx_next_cons = 0xffff;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                          struct rx_tpa_start_cmp *tpa_start,
+                          struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+       u8 agg_id = TPA_START_AGG_ID(tpa_start);
+       u16 cons, prod;
+       struct bnxt_tpa_info *tpa_info;
+       struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+       struct rx_bd *prod_bd;
+       dma_addr_t mapping;
+
+       cons = tpa_start->rx_tpa_start_cmp_opaque;
+       prod = rxr->rx_prod;
+       cons_rx_buf = &rxr->rx_buf_ring[cons];
+       prod_rx_buf = &rxr->rx_buf_ring[prod];
+       tpa_info = &rxr->rx_tpa[agg_id];
+
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               bnxt_sched_reset(bp, rxr);
+               return;
+       }
+
+       prod_rx_buf->data = tpa_info->data;
+       prod_rx_buf->data_ptr = tpa_info->data_ptr;
+
+       mapping = tpa_info->mapping;
+       prod_rx_buf->mapping = mapping;
+
+       prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+       prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+       tpa_info->data = cons_rx_buf->data;
+       tpa_info->data_ptr = cons_rx_buf->data_ptr;
+       cons_rx_buf->data = NULL;
+       tpa_info->mapping = cons_rx_buf->mapping;
+
+       tpa_info->len =
+               le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+                               RX_TPA_START_CMP_LEN_SHIFT;
+       if (likely(TPA_START_HASH_VALID(tpa_start))) {
+               u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+               tpa_info->hash_type = PKT_HASH_TYPE_L4;
+               tpa_info->gso_type = SKB_GSO_TCPV4;
+               /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+               if (hash_type == 3)
+                       tpa_info->gso_type = SKB_GSO_TCPV6;
+               tpa_info->rss_hash =
+                       le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+       } else {
+               tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+               tpa_info->gso_type = 0;
+               if (netif_msg_rx_err(bp))
+                       netdev_warn(bp->dev, "TPA packet without valid hash\n");
+       }
+       tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+       tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+       tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+
+       rxr->rx_prod = NEXT_RX(prod);
+       cons = NEXT_RX(cons);
+       rxr->rx_next_cons = NEXT_RX(cons);
+       cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+       bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+       rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+       cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+                          u16 cp_cons, u32 agg_bufs)
+{
+       if (agg_bufs)
+               bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
+                                          int payload_off, int tcp_ts,
+                                          struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+       struct tcphdr *th;
+       int len, nw_off;
+       u16 outer_ip_off, inner_ip_off, inner_mac_off;
+       u32 hdr_info = tpa_info->hdr_info;
+       bool loopback = false;
+
+       inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+       inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+       outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+       /* If the packet is an internal loopback packet, the offsets will
+        * have an extra 4 bytes.
+        */
+       if (inner_mac_off == 4) {
+               loopback = true;
+       } else if (inner_mac_off > 4) {
+               __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
+                                           ETH_HLEN - 2));
+
+               /* We only support inner iPv4/ipv6.  If we don't see the
+                * correct protocol ID, it must be a loopback packet where
+                * the offsets are off by 4.
+                */
+               if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
+                       loopback = true;
+       }
+       if (loopback) {
+               /* internal loopback packet, subtract all offsets by 4 */
+               inner_ip_off -= 4;
+               inner_mac_off -= 4;
+               outer_ip_off -= 4;
+       }
+
+       nw_off = inner_ip_off - ETH_HLEN;
+       skb_set_network_header(skb, nw_off);
+       if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
+               struct ipv6hdr *iph = ipv6_hdr(skb);
+
+               skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+               len = skb->len - skb_transport_offset(skb);
+               th = tcp_hdr(skb);
+               th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+       } else {
+               struct iphdr *iph = ip_hdr(skb);
+
+               skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+               len = skb->len - skb_transport_offset(skb);
+               th = tcp_hdr(skb);
+               th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+       }
+
+       if (inner_mac_off) { /* tunnel */
+               struct udphdr *uh = NULL;
+               __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+                                           ETH_HLEN - 2));
+
+               if (proto == htons(ETH_P_IP)) {
+                       struct iphdr *iph = (struct iphdr *)skb->data;
+
+                       if (iph->protocol == IPPROTO_UDP)
+                               uh = (struct udphdr *)(iph + 1);
+               } else {
+                       struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+                       if (iph->nexthdr == IPPROTO_UDP)
+                               uh = (struct udphdr *)(iph + 1);
+               }
+               if (uh) {
+                       if (uh->check)
+                               skb_shinfo(skb)->gso_type |=
+                                       SKB_GSO_UDP_TUNNEL_CSUM;
+                       else
+                               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+               }
+       }
+#endif
+       return skb;
+}
+
+#define BNXT_IPV4_HDR_SIZE     (sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE     (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
+                                          int payload_off, int tcp_ts,
+                                          struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+       struct tcphdr *th;
+       int len, nw_off, tcp_opt_len = 0;
+
+       if (tcp_ts)
+               tcp_opt_len = 12;
+
+       if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+               struct iphdr *iph;
+
+               nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+                        ETH_HLEN;
+               skb_set_network_header(skb, nw_off);
+               iph = ip_hdr(skb);
+               skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+               len = skb->len - skb_transport_offset(skb);
+               th = tcp_hdr(skb);
+               th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+       } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+               struct ipv6hdr *iph;
+
+               nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+                        ETH_HLEN;
+               skb_set_network_header(skb, nw_off);
+               iph = ipv6_hdr(skb);
+               skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+               len = skb->len - skb_transport_offset(skb);
+               th = tcp_hdr(skb);
+               th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+       } else {
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+
+       if (nw_off) { /* tunnel */
+               struct udphdr *uh = NULL;
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       struct iphdr *iph = (struct iphdr *)skb->data;
+
+                       if (iph->protocol == IPPROTO_UDP)
+                               uh = (struct udphdr *)(iph + 1);
+               } else {
+                       struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+                       if (iph->nexthdr == IPPROTO_UDP)
+                               uh = (struct udphdr *)(iph + 1);
+               }
+               if (uh) {
+                       if (uh->check)
+                               skb_shinfo(skb)->gso_type |=
+                                       SKB_GSO_UDP_TUNNEL_CSUM;
+                       else
+                               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+               }
+       }
+#endif
+       return skb;
+}
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
+                                          struct bnxt_tpa_info *tpa_info,
+                                          struct rx_tpa_end_cmp *tpa_end,
+                                          struct rx_tpa_end_cmp_ext *tpa_end1,
+                                          struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+       int payload_off;
+       u16 segs;
+
+       segs = TPA_END_TPA_SEGS(tpa_end);
+       if (segs == 1)
+               return skb;
+
+       NAPI_GRO_CB(skb)->count = segs;
+       skb_shinfo(skb)->gso_size =
+               le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+       skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+       payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                      RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+                     RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+       skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+       if (likely(skb))
+               tcp_gro_complete(skb);
+#endif
+       return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+                                          struct bnxt_napi *bnapi,
+                                          u32 *raw_cons,
+                                          struct rx_tpa_end_cmp *tpa_end,
+                                          struct rx_tpa_end_cmp_ext *tpa_end1,
+#ifdef OLD_VLAN
+                                          u32 *vlan,
+#endif
+                                          u8 *event)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       u8 agg_id = TPA_END_AGG_ID(tpa_end);
+       u8 *data_ptr, agg_bufs;
+       u16 cp_cons = RING_CMP(*raw_cons);
+       unsigned int len;
+       struct bnxt_tpa_info *tpa_info;
+       dma_addr_t mapping;
+       struct sk_buff *skb;
+#ifdef HAVE_BUILD_SKB
+       void *data;
+#else
+       struct sk_buff *data;
+#endif
+
+       if (unlikely(bnapi->in_reset)) {
+               int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+
+               if (rc < 0)
+                       return ERR_PTR(-EBUSY);
+               return NULL;
+       }
+
+       tpa_info = &rxr->rx_tpa[agg_id];
+       data = tpa_info->data;
+       data_ptr = tpa_info->data_ptr;
+       prefetch(data_ptr);
+       len = tpa_info->len;
+       mapping = tpa_info->mapping;
+
+       agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                   RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+                       return ERR_PTR(-EBUSY);
+
+               *event |= BNXT_AGG_EVENT;
+               cp_cons = NEXT_CMP(cp_cons);
+       }
+
+       if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
+               bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+               if (agg_bufs > MAX_SKB_FRAGS)
+                       netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+                                   agg_bufs, (int)MAX_SKB_FRAGS);
+               return NULL;
+       }
+
+       if (len <= bp->rx_copy_thresh) {
+               skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
+               if (!skb) {
+                       bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+                       return NULL;
+               }
+       } else {
+#ifdef HAVE_BUILD_SKB
+               u8 *new_data;
+#else
+               struct sk_buff *new_data;
+#endif
+               dma_addr_t new_mapping;
+
+               new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+               if (!new_data) {
+                       bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+                       return NULL;
+               }
+
+               tpa_info->data = new_data;
+#ifdef HAVE_BUILD_SKB
+               tpa_info->data_ptr = new_data + bp->rx_offset;
+#else
+               tpa_info->data_ptr = new_data->data + bp->rx_offset;
+#endif
+               tpa_info->mapping = new_mapping;
+
+#ifdef HAVE_BUILD_SKB
+               skb = build_skb(data, 0);
+#else
+               skb = data;
+#endif
+               dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+                                bp->rx_dir);
+
+#ifdef HAVE_BUILD_SKB
+               if (!skb) {
+                       kfree(data);
+                       bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+                       return NULL;
+               }
+               skb_reserve(skb, bp->rx_offset);
+#endif
+               skb_put(skb, len);
+       }
+
+       if (agg_bufs) {
+               skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+               if (!skb) {
+                       /* Page reuse already handled by bnxt_rx_pages(). */
+                       return NULL;
+               }
+       }
+       skb->protocol = eth_type_trans(skb, bp->dev);
+
+       if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+               skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+       if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+               u16 vlan_proto = tpa_info->metadata >>
+                       RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
+
+#ifdef OLD_VLAN
+               if (vlan_proto == ETH_P_8021Q)
+                       *vlan = vtag | OLD_VLAN_VALID;
+#else
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+#endif
+       }
+
+       skb_checksum_none_assert(skb);
+       if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+#ifdef HAVE_CSUM_LEVEL
+               skb->csum_level =
+                       (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+#elif defined(HAVE_INNER_NETWORK_OFFSET)
+               skb->encapsulation =
+                       (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+#endif
+       }
+
+       if (TPA_END_GRO(tpa_end))
+               skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
+
+       return skb;
+}
+
+/* returns the following:
+ * 1       - 1 packet successfully received
+ * 0       - successful TPA_START, packet not completed yet
+ * -EBUSY  - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO    - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+                      u8 *event)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       struct net_device *dev = bp->dev;
+       struct rx_cmp *rxcmp;
+       struct rx_cmp_ext *rxcmp1;
+       u32 tmp_raw_cons = *raw_cons;
+       u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+       struct bnxt_sw_rx_bd *rx_buf;
+       unsigned int len;
+#ifdef HAVE_BUILD_SKB
+       u8 *data_ptr, agg_bufs, cmp_type;
+       void *data;
+#else
+       struct sk_buff *data;
+       u8 *data_ptr, agg_bufs, cmp_type;
+#endif
+       dma_addr_t dma_addr;
+       struct sk_buff *skb;
+       int rc = 0;
+#ifdef OLD_VLAN
+       u32 vlan = 0;
+#endif
+       u32 misc, flags;
+#ifdef HAVE_IEEE1588_SUPPORT
+       u64 ts = 0;
+#endif
+
+       rxcmp = (struct rx_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+       cp_cons = RING_CMP(tmp_raw_cons);
+       rxcmp1 = (struct rx_cmp_ext *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+               return -EBUSY;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+
+       prod = rxr->rx_prod;
+
+       if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+               bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+                              (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+               *event |= BNXT_RX_EVENT;
+               goto next_rx_no_prod;
+
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+                                  (struct rx_tpa_end_cmp *)rxcmp,
+                                  (struct rx_tpa_end_cmp_ext *)rxcmp1,
+#ifdef OLD_VLAN
+                                  &vlan,
+#endif
+                                  event);
+
+               if (unlikely(IS_ERR(skb)))
+                       return -EBUSY;
+
+               rc = -ENOMEM;
+               if (likely(skb)) {
+                       skb_record_rx_queue(skb, bnapi->index);
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+                       skb_mark_napi_id(skb, &bnapi->napi);
+#endif
+#ifdef OLD_VLAN
+                       if (vlan && bp->vlgrp)
+                               vlan_gro_receive(&bnapi->napi, bp->vlgrp,
+                                                (u16)vlan, skb);
+#else
+                       if (bnxt_busy_polling(bnapi))
+                               netif_receive_skb(skb);
+#endif
+                       else
+                               napi_gro_receive(&bnapi->napi, skb);
+                       rc = 1;
+               }
+               *event |= BNXT_RX_EVENT;
+               goto next_rx_no_prod;
+       }
+
+       cons = rxcmp->rx_cmp_opaque;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data;
+       data_ptr = rx_buf->data_ptr;
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+
+               bnxt_sched_reset(bp, rxr);
+               return rc1;
+       }
+       prefetch(data_ptr);
+
+       misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+       agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+                       return -EBUSY;
+
+               cp_cons = NEXT_CMP(cp_cons);
+               *event |= BNXT_AGG_EVENT;
+       }
+       *event |= BNXT_RX_EVENT;
+
+       rx_buf->data = NULL;
+       if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (agg_bufs)
+                       bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+               rc = -EIO;
+               goto next_rx;
+       }
+
+       flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
+       len = flags >> RX_CMP_LEN_SHIFT;
+       dma_addr = rx_buf->mapping;
+
+#ifdef HAVE_IEEE1588_SUPPORT
+       if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
+                    RX_CMP_FLAGS_ITYPE_PTP_W_TS))
+               bnxt_get_rx_ts(bp, &ts);
+#endif
+
+       if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
+               rc = 1;
+               goto next_rx;
+       }
+
+       if (len <= bp->rx_copy_thresh) {
+               skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+       } else {
+               u32 payload;
+
+               if (rx_buf->data_ptr == data_ptr)
+                       payload = misc & RX_CMP_PAYLOAD_OFFSET;
+               else
+                       payload = 0;
+               skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+                                     payload | len);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+       }
+
+       if (agg_bufs) {
+               skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+               if (!skb) {
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+       }
+
+       if (RX_CMP_HASH_VALID(rxcmp)) {
+               u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+               enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+               /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+               if (hash_type != 1 && hash_type != 3)
+                       type = PKT_HASH_TYPE_L3;
+               skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+       }
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       if ((rxcmp1->rx_cmp_flags2 &
+            cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+               u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+               u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
+               u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+#ifdef OLD_VLAN
+               if (vlan_proto == ETH_P_8021Q)
+                       vlan = vtag | OLD_VLAN_VALID;
+#else
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+#endif
+       }
+
+       skb_checksum_none_assert(skb);
+       if (RX_CMP_L4_CS_OK(rxcmp1)) {
+               if (dev->features & NETIF_F_RXCSUM) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+#ifdef HAVE_CSUM_LEVEL
+                       skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+#elif defined(HAVE_INNER_NETWORK_OFFSET)
+                       skb->encapsulation = RX_CMP_ENCAP(rxcmp1);
+#endif
+               }
+       } else {
+               if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
+                       if (dev->features & NETIF_F_RXCSUM)
+                               cpr->sw_stats.rx_l4_csum_errors++;
+               }
+       }
+
+#ifdef HAVE_IEEE1588_SUPPORT
+       if (unlikely(ts)) {
+               u64 ns;
+
+               ns = timecounter_cyc2time(&bp->ptp_cfg->tc, ts);
+               memset(skb_hwtstamps(skb), 0, sizeof(*skb_hwtstamps(skb)));
+               skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+       }
+#endif
+
+       skb_record_rx_queue(skb, bnapi->index);
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+       skb_mark_napi_id(skb, &bnapi->napi);
+#endif
+#ifdef OLD_VLAN
+       if (vlan && bp->vlgrp)
+               vlan_gro_receive(&bnapi->napi, bp->vlgrp, (u16)vlan, skb);
+#else
+       if (bnxt_busy_polling(bnapi))
+               netif_receive_skb(skb);
+#endif
+       else
+               napi_gro_receive(&bnapi->napi, skb);
+       rc = 1;
+
+next_rx:
+       rxr->rx_prod = NEXT_RX(prod);
+       rxr->rx_next_cons = NEXT_RX(cons);
+
+next_rx_no_prod:
+       *raw_cons = tmp_raw_cons;
+
+       return rc;
+}
+
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+                                u32 *raw_cons, u8 *event)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 tmp_raw_cons = *raw_cons;
+       struct rx_cmp_ext *rxcmp1;
+       struct rx_cmp *rxcmp;
+       u16 cp_cons;
+       u8 cmp_type;
+
+       cp_cons = RING_CMP(tmp_raw_cons);
+       rxcmp = (struct rx_cmp *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+       cp_cons = RING_CMP(tmp_raw_cons);
+       rxcmp1 = (struct rx_cmp_ext *)
+                       &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+       if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+               return -EBUSY;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+       if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+               rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+                       cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               struct rx_tpa_end_cmp_ext *tpa_end1;
+
+               tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+               tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+                       cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+       }
+       return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+}
+
+#define BNXT_GET_EVENT_PORT(data)      \
+       (data & ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+
+static int bnxt_async_event_process(struct bnxt *bp,
+                                   struct hwrm_async_event_cmpl *cmpl)
+{
+       u16 event_id = le16_to_cpu(cmpl->event_id);
+
+       /* TODO CHIMP_FW: Define event id's for link change, error etc */
+       switch (event_id) {
+       case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+               u32 data1 = le32_to_cpu(cmpl->event_data1);
+               struct bnxt_link_info *link_info = &bp->link_info;
+
+               if (BNXT_VF(bp))
+                       goto async_event_process_exit;
+               if (data1 & 0x20000) {
+                       u16 fw_speed = link_info->force_link_speed;
+                       u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
+
+                       netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+                                   speed);
+               }
+               set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
+               /* fall thru */
+       }
+       case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+               set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+               break;
+       case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+               set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
+               break;
+       case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+               u32 data1 = le32_to_cpu(cmpl->event_data1);
+               u16 port_id = BNXT_GET_EVENT_PORT(data1);
+
+               if (BNXT_VF(bp))
+                       break;
+
+               if (bp->pf.port_id != port_id)
+                       break;
+
+               set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
+               break;
+       }
+       case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+               if (BNXT_PF(bp))
+                       goto async_event_process_exit;
+               set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
+               break;
+       default:
+               goto async_event_process_exit;
+       }
+       schedule_work(&bp->sp_task);
+async_event_process_exit:
+       bnxt_ulp_async_events(bp, cmpl);
+       return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+       u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+       struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+       struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+                               (struct hwrm_fwd_req_cmpl *)txcmp;
+
+       switch (cmpl_type) {
+       case CMPL_BASE_TYPE_HWRM_DONE:
+               seq_id = le16_to_cpu(h_cmpl->sequence_id);
+               if (seq_id == bp->hwrm_intr_seq_id)
+                       bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+               else
+                       netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+               break;
+
+       case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+               vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+               if ((vf_id < bp->pf.first_vf_id) ||
+                   (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+                       netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+                                  vf_id);
+                       return -EINVAL;
+               }
+
+               set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+               set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+               break;
+
+       case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+               bnxt_async_event_process(bp,
+                                        (struct hwrm_async_event_cmpl *)txcmp);
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+       struct bnxt_napi *bnapi = dev_instance;
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+       prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+       napi_schedule(&bnapi->napi);
+       return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+       u32 raw_cons = cpr->cp_raw_cons;
+       u16 cons = RING_CMP(raw_cons);
+       struct tx_cmp *txcmp;
+
+       txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+       return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+       struct bnxt_napi *bnapi = dev_instance;
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 cons = RING_CMP(cpr->cp_raw_cons);
+       u32 int_status;
+
+       prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+       if (!bnxt_has_work(bp, cpr)) {
+               int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
+               /* return if erroneous interrupt */
+               if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+                       return IRQ_NONE;
+       }
+
+       /* disable ring IRQ */
+       BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+       /* Return here if interrupt is shared and is disabled. */
+       if (unlikely(atomic_read(&bp->intr_sem) != 0))
+               return IRQ_HANDLED;
+
+       napi_schedule(&bnapi->napi);
+       return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       u32 raw_cons = cpr->cp_raw_cons;
+       u32 cons;
+       int tx_pkts = 0;
+       int rx_pkts = 0;
+       u8 event = 0;
+       struct tx_cmp *txcmp;
+
+       while (1) {
+               int rc;
+
+               cons = RING_CMP(raw_cons);
+               txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+               if (!TX_CMP_VALID(txcmp, raw_cons))
+                       break;
+
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               dma_rmb();
+               if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+                       tx_pkts++;
+                       /* return full budget so NAPI will complete. */
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                               rx_pkts = budget;
+               } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+                       if (likely(budget))
+                               rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+                       else
+                               rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+                                                          &event);
+                       if (likely(rc >= 0))
+                               rx_pkts += rc;
+                       else if (rc == -EBUSY)  /* partial completion */
+                               break;
+               } else if (unlikely((TX_CMP_TYPE(txcmp) ==
+                                    CMPL_BASE_TYPE_HWRM_DONE) ||
+                                   (TX_CMP_TYPE(txcmp) ==
+                                    CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+                                   (TX_CMP_TYPE(txcmp) ==
+                                    CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+                       bnxt_hwrm_handler(bp, txcmp);
+               }
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+
+               if (rx_pkts == budget)
+                       break;
+       }
+
+       if (event & BNXT_TX_EVENT) {
+               struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+               void __iomem *db = txr->tx_doorbell;
+               u16 prod = txr->tx_prod;
+
+               /* Sync BD data before updating doorbell */
+               wmb();
+
+               writel(DB_KEY_TX | prod, db);
+               writel(DB_KEY_TX | prod, db);
+       }
+
+       cpr->cp_raw_cons = raw_cons;
+       /* ACK completion ring before freeing tx ring and producing new
+        * buffers in rx/agg rings to prevent overflowing the completion
+        * ring.
+        */
+       BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+       if (tx_pkts)
+               bnapi->tx_int(bp, bnapi, tx_pkts);
+
+       if (event & BNXT_RX_EVENT) {
+               struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+               writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+               writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+               if (event & BNXT_AGG_EVENT) {
+                       writel(DB_KEY_RX | rxr->rx_agg_prod,
+                              rxr->rx_agg_doorbell);
+                       writel(DB_KEY_RX | rxr->rx_agg_prod,
+                              rxr->rx_agg_doorbell);
+               }
+       }
+       return rx_pkts;
+}
+
+static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+{
+       struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       struct tx_cmp *txcmp;
+       struct rx_cmp_ext *rxcmp1;
+       u32 cp_cons, tmp_raw_cons;
+       u32 raw_cons = cpr->cp_raw_cons;
+       u32 rx_pkts = 0;
+       u8 event = 0;
+
+       while (1) {
+               int rc;
+
+               cp_cons = RING_CMP(raw_cons);
+               txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+               if (!TX_CMP_VALID(txcmp, raw_cons))
+                       break;
+
+               if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+                       tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
+                       cp_cons = RING_CMP(tmp_raw_cons);
+                       rxcmp1 = (struct rx_cmp_ext *)
+                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+                       if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+                               break;
+
+                       /* force an error to recycle the buffer */
+                       rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+                               cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+
+                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+                       if (likely(rc == -EIO))
+                               rx_pkts++;
+                       else if (rc == -EBUSY)  /* partial completion */
+                               break;
+               } else if (unlikely((TX_CMP_TYPE(txcmp)) ==
+                                       CMPL_BASE_TYPE_HWRM_DONE)) {
+                       bnxt_hwrm_handler(bp, txcmp);
+               } else {
+                       netdev_err(bp->dev,
+                                  "Invalid completion received on special ring\n");
+               }
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+
+               if (rx_pkts == budget)
+                       break;
+       }
+
+       cpr->cp_raw_cons = raw_cons;
+       BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+       writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+       writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+
+       if (event & BNXT_AGG_EVENT) {
+               writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
+               writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
+       }
+
+       if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+#ifdef HAVE_NEW_NAPI_COMPLETE_DONE
+               napi_complete_done(napi, rx_pkts);
+#else
+               napi_complete(napi);
+#endif
+               BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+       return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+       struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       int work_done = 0;
+
+       if (!bnxt_lock_napi(bnapi))
+               return budget;
+
+       while (1) {
+               work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+               if (work_done >= budget)
+                       break;
+
+               if (!bnxt_has_work(bp, cpr)) {
+#ifdef HAVE_NEW_NAPI_COMPLETE_DONE
+                       if (napi_complete_done(napi, work_done))
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
+#else
+                       napi_complete(napi);
+                       BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+#endif
+                       break;
+               }
+       }
+       mmiowb();
+       bnxt_unlock_napi(bnapi);
+       return work_done;
+}
+
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+       struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+       struct bnxt *bp = bnapi->bp;
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       int rx_work, budget = 4;
+
+       if (atomic_read(&bp->intr_sem) != 0)
+               return LL_FLUSH_FAILED;
+
+       if (!bp->link_info.link_up)
+               return LL_FLUSH_FAILED;
+
+       if (!bnxt_lock_poll(bnapi))
+               return LL_FLUSH_BUSY;
+
+       rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+       BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+       bnxt_unlock_poll(bnapi);
+       return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+       int i, max_idx;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->tx_ring)
+               return;
+
+       max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               int j;
+
+               for (j = 0; j < max_idx;) {
+                       struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+                       struct sk_buff *skb = tx_buf->skb;
+                       int k, last;
+
+                       if (!skb) {
+                               j++;
+                               continue;
+                       }
+
+                       tx_buf->skb = NULL;
+
+                       if (tx_buf->is_push) {
+                               dev_kfree_skb(skb);
+                               j += 2;
+                               continue;
+                       }
+
+                       dma_unmap_single(&pdev->dev,
+                                        dma_unmap_addr(tx_buf, mapping),
+                                        skb_headlen(skb),
+                                        PCI_DMA_TODEVICE);
+
+                       last = tx_buf->nr_frags;
+                       j += 2;
+                       for (k = 0; k < last; k++, j++) {
+                               int ring_idx = j & bp->tx_ring_mask;
+                               skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+                               tx_buf = &txr->tx_buf_ring[ring_idx];
+                               dma_unmap_page(
+                                       &pdev->dev,
+                                       dma_unmap_addr(tx_buf, mapping),
+                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+                       }
+                       dev_kfree_skb(skb);
+               }
+               netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+       }
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+       int i, max_idx, max_agg_idx;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->rx_ring)
+               return;
+
+       max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+       max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               int j;
+
+               if (rxr->rx_tpa) {
+                       for (j = 0; j < MAX_TPA; j++) {
+                               struct bnxt_tpa_info *tpa_info =
+                                                       &rxr->rx_tpa[j];
+#ifdef HAVE_BUILD_SKB
+                               u8 *data = tpa_info->data;
+#else
+                               struct sk_buff *data = tpa_info->data;
+#endif
+
+                               if (!data)
+                                       continue;
+
+                               dma_unmap_single(&pdev->dev, tpa_info->mapping,
+                                                bp->rx_buf_use_size,
+                                                bp->rx_dir);
+
+                               tpa_info->data = NULL;
+
+#ifdef HAVE_BUILD_SKB
+                               kfree(data);
+#else
+                               dev_kfree_skb_any(data);
+#endif
+                       }
+               }
+
+               for (j = 0; j < max_idx; j++) {
+                       struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+                       dma_addr_t mapping = rx_buf->mapping;
+#ifdef HAVE_BUILD_SKB
+                       void *data = rx_buf->data;
+#else
+                       struct sk_buff *data = rx_buf->data;
+#endif
+
+                       if (!data)
+                               continue;
+
+#ifdef HAVE_BUILD_SKB
+                       if (BNXT_RX_PAGE_MODE(bp)) {
+                               mapping -= bp->rx_dma_offset;
+                               dma_unmap_page(&pdev->dev, mapping,
+                                              PAGE_SIZE, bp->rx_dir);
+                               __free_page(data);
+                       } else {
+                               dma_unmap_single(&pdev->dev, mapping,
+                                                bp->rx_buf_use_size,
+                                                bp->rx_dir);
+                               kfree(data);
+                       }
+#else
+                       dma_unmap_single(&pdev->dev, mapping,
+                                        bp->rx_buf_use_size, bp->rx_dir);
+                       dev_kfree_skb_any(data);
+#endif
+                       rx_buf->data = NULL;
+               }
+
+               for (j = 0; j < max_agg_idx; j++) {
+                       struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+                               &rxr->rx_agg_ring[j];
+                       struct page *page = rx_agg_buf->page;
+
+                       if (!page)
+                               continue;
+
+                       dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
+                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+                       rx_agg_buf->page = NULL;
+                       __clear_bit(j, rxr->rx_agg_bmap);
+
+                       __free_page(page);
+               }
+               if (rxr->rx_page) {
+                       __free_page(rxr->rx_page);
+                       rxr->rx_page = NULL;
+               }
+       }
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+       bnxt_free_tx_skbs(bp);
+       bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+       struct pci_dev *pdev = bp->pdev;
+       int i;
+
+       for (i = 0; i < ring->nr_pages; i++) {
+               if (!ring->pg_arr[i])
+                       continue;
+
+               dma_free_coherent(&pdev->dev, ring->page_size,
+                                 ring->pg_arr[i], ring->dma_arr[i]);
+
+               ring->pg_arr[i] = NULL;
+       }
+       if (ring->pg_tbl) {
+               dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+                                 ring->pg_tbl, ring->pg_tbl_map);
+               ring->pg_tbl = NULL;
+       }
+       if (ring->vmem_size && *ring->vmem) {
+               vfree(*ring->vmem);
+               *ring->vmem = NULL;
+       }
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+       int i;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (ring->nr_pages > 1) {
+               ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+                                                 ring->nr_pages * 8,
+                                                 &ring->pg_tbl_map,
+                                                 GFP_KERNEL);
+               if (!ring->pg_tbl)
+                       return -ENOMEM;
+       }
+
+       for (i = 0; i < ring->nr_pages; i++) {
+               ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+                                                    ring->page_size,
+                                                    &ring->dma_arr[i],
+                                                    GFP_KERNEL);
+               if (!ring->pg_arr[i])
+                       return -ENOMEM;
+
+               if (ring->nr_pages > 1)
+                       ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+       }
+
+       if (ring->vmem_size) {
+               *ring->vmem = vzalloc(ring->vmem_size);
+               if (!(*ring->vmem))
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->rx_ring)
+               return;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               struct bnxt_ring_struct *ring;
+
+#ifdef HAVE_NDO_XDP
+               if (rxr->xdp_prog)
+                       bpf_prog_put(rxr->xdp_prog);
+#endif
+
+               kfree(rxr->rx_tpa);
+               rxr->rx_tpa = NULL;
+
+               kfree(rxr->rx_agg_bmap);
+               rxr->rx_agg_bmap = NULL;
+
+               ring = &rxr->rx_ring_struct;
+               bnxt_free_ring(bp, ring);
+
+               ring = &rxr->rx_agg_ring_struct;
+               bnxt_free_ring(bp, ring);
+       }
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+       int i, rc, agg_rings = 0, tpa_rings = 0;
+
+       if (!bp->rx_ring)
+               return -ENOMEM;
+
+       if (bp->flags & BNXT_FLAG_AGG_RINGS)
+               agg_rings = 1;
+
+       if (bp->flags & BNXT_FLAG_TPA)
+               tpa_rings = 1;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               struct bnxt_ring_struct *ring;
+
+               ring = &rxr->rx_ring_struct;
+
+               rc = bnxt_alloc_ring(bp, ring);
+               if (rc)
+                       return rc;
+
+               if (agg_rings) {
+                       u16 mem_size;
+
+                       ring = &rxr->rx_agg_ring_struct;
+                       rc = bnxt_alloc_ring(bp, ring);
+                       if (rc)
+                               return rc;
+
+                       rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+                       mem_size = rxr->rx_agg_bmap_size / 8;
+                       rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+                       if (!rxr->rx_agg_bmap)
+                               return -ENOMEM;
+
+                       if (tpa_rings) {
+                               rxr->rx_tpa = kcalloc(MAX_TPA,
+                                               sizeof(struct bnxt_tpa_info),
+                                               GFP_KERNEL);
+                               if (!rxr->rx_tpa)
+                                       return -ENOMEM;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+       int i;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->tx_ring)
+               return;
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               struct bnxt_ring_struct *ring;
+
+               if (txr->tx_push) {
+                       dma_free_coherent(&pdev->dev, bp->tx_push_size,
+                                         txr->tx_push, txr->tx_push_mapping);
+                       txr->tx_push = NULL;
+               }
+
+               ring = &txr->tx_ring_struct;
+
+               bnxt_free_ring(bp, ring);
+       }
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+       int i, j, rc;
+       struct pci_dev *pdev = bp->pdev;
+
+       bp->tx_push_size = 0;
+       if (bp->tx_push_thresh) {
+               int push_size;
+
+               push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+                                       bp->tx_push_thresh);
+
+               if (push_size > 256) {
+                       push_size = 0;
+                       bp->tx_push_thresh = 0;
+               }
+
+               bp->tx_push_size = push_size;
+       }
+
+       for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               struct bnxt_ring_struct *ring;
+
+               ring = &txr->tx_ring_struct;
+
+               rc = bnxt_alloc_ring(bp, ring);
+               if (rc)
+                       return rc;
+
+               if (bp->tx_push_size) {
+                       dma_addr_t mapping;
+
+                       /* One pre-allocated DMA buffer to backup
+                        * TX push operation
+                        */
+                       txr->tx_push = dma_alloc_coherent(&pdev->dev,
+                                               bp->tx_push_size,
+                                               &txr->tx_push_mapping,
+                                               GFP_KERNEL);
+
+                       if (!txr->tx_push)
+                               return -ENOMEM;
+
+                       mapping = txr->tx_push_mapping +
+                               sizeof(struct tx_push_bd);
+                       txr->data_mapping = cpu_to_le64(mapping);
+
+                       memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
+               }
+               ring->queue_id = bp->q_info[j].queue_id;
+               if (i < bp->tx_nr_rings_xdp)
+                       continue;
+               if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+                       j++;
+       }
+       return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               ring = &cpr->cp_ring_struct;
+
+               bnxt_free_ring(bp, ring);
+       }
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+       int i, rc;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               ring = &cpr->cp_ring_struct;
+
+               rc = bnxt_alloc_ring(bp, ring);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_tx_ring_info *txr;
+               struct bnxt_ring_struct *ring;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               ring = &cpr->cp_ring_struct;
+               ring->nr_pages = bp->cp_nr_pages;
+               ring->page_size = HW_CMPD_RING_SIZE;
+               ring->pg_arr = (void **)cpr->cp_desc_ring;
+               ring->dma_arr = cpr->cp_desc_mapping;
+               ring->vmem_size = 0;
+
+               rxr = bnapi->rx_ring;
+               if (!rxr)
+                       goto skip_rx;
+
+               ring = &rxr->rx_ring_struct;
+               ring->nr_pages = bp->rx_nr_pages;
+               ring->page_size = HW_RXBD_RING_SIZE;
+               ring->pg_arr = (void **)rxr->rx_desc_ring;
+               ring->dma_arr = rxr->rx_desc_mapping;
+               ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+               ring->vmem = (void **)&rxr->rx_buf_ring;
+
+               ring = &rxr->rx_agg_ring_struct;
+               ring->nr_pages = bp->rx_agg_nr_pages;
+               ring->page_size = HW_RXBD_RING_SIZE;
+               ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+               ring->dma_arr = rxr->rx_agg_desc_mapping;
+               ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+               ring->vmem = (void **)&rxr->rx_agg_ring;
+
+skip_rx:
+               txr = bnapi->tx_ring;
+               if (!txr)
+                       continue;
+
+               ring = &txr->tx_ring_struct;
+               ring->nr_pages = bp->tx_nr_pages;
+               ring->page_size = HW_RXBD_RING_SIZE;
+               ring->pg_arr = (void **)txr->tx_desc_ring;
+               ring->dma_arr = txr->tx_desc_mapping;
+               ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+               ring->vmem = (void **)&txr->tx_buf_ring;
+       }
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+       int i;
+       u32 prod;
+       struct rx_bd **rx_buf_ring;
+
+       rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+       for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+               int j;
+               struct rx_bd *rxbd;
+
+               rxbd = rx_buf_ring[i];
+               if (!rxbd)
+                       continue;
+
+               for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+                       rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+                       rxbd->rx_bd_opaque = prod;
+               }
+       }
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_ring_struct *ring;
+       u32 prod, type;
+       int i;
+
+       type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+               RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+       if (NET_IP_ALIGN == 2)
+               type |= RX_BD_FLAGS_SOP;
+
+       rxr = &bp->rx_ring[ring_nr];
+       ring = &rxr->rx_ring_struct;
+       bnxt_init_rxbd_pages(ring, type);
+
+#ifdef HAVE_NDO_XDP
+       if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+               rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
+               if (IS_ERR(rxr->xdp_prog)) {
+                       int rc = PTR_ERR(rxr->xdp_prog);
+
+                       rxr->xdp_prog = NULL;
+                       return rc;
+               }
+       }
+#endif
+
+       prod = rxr->rx_prod;
+       for (i = 0; i < bp->rx_ring_size; i++) {
+               if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+                       netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+                                   ring_nr, i, bp->rx_ring_size);
+                       break;
+               }
+               prod = NEXT_RX(prod);
+       }
+       rxr->rx_prod = prod;
+       ring->fw_ring_id = INVALID_HW_RING_ID;
+
+       ring = &rxr->rx_agg_ring_struct;
+       ring->fw_ring_id = INVALID_HW_RING_ID;
+
+       if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+               return 0;
+
+       type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+               RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+       bnxt_init_rxbd_pages(ring, type);
+
+       prod = rxr->rx_agg_prod;
+       for (i = 0; i < bp->rx_agg_ring_size; i++) {
+               if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+                       netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+                                   ring_nr, i, bp->rx_ring_size);
+                       break;
+               }
+               prod = NEXT_RX_AGG(prod);
+       }
+       rxr->rx_agg_prod = prod;
+
+       if (bp->flags & BNXT_FLAG_TPA) {
+               if (rxr->rx_tpa) {
+#ifdef HAVE_BUILD_SKB
+                       u8 *data;
+#else
+                       struct sk_buff *data;
+#endif
+                       dma_addr_t mapping;
+
+                       for (i = 0; i < MAX_TPA; i++) {
+                               data = __bnxt_alloc_rx_data(bp, &mapping,
+                                                           GFP_KERNEL);
+                               if (!data)
+                                       return -ENOMEM;
+
+                               rxr->rx_tpa[i].data = data;
+#ifdef HAVE_BUILD_SKB
+                               rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+#else
+                               rxr->rx_tpa[i].data_ptr = data->data +
+                                                         bp->rx_offset;
+#endif
+                               rxr->rx_tpa[i].mapping = mapping;
+                       }
+               } else {
+                       netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+               struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+       int i, rc = 0;
+
+       if (BNXT_RX_PAGE_MODE(bp)) {
+               bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+               bp->rx_dma_offset = XDP_PACKET_HEADROOM;
+       } else {
+               bp->rx_offset = BNXT_RX_OFFSET;
+               bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+       }
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               rc = bnxt_init_one_rx_ring(bp, i);
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+       u16 i;
+
+       bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+                                  MAX_SKB_FRAGS + 1);
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
+
+       return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+       kfree(bp->grp_info);
+       bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+       int i;
+
+       if (irq_re_init) {
+               bp->grp_info = kcalloc(bp->cp_nr_rings,
+                                      sizeof(struct bnxt_ring_grp_info),
+                                      GFP_KERNEL);
+               if (!bp->grp_info)
+                       return -ENOMEM;
+       }
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               if (irq_re_init)
+                       bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+               bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+               bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+               bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+               bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+       }
+       return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+       kfree(bp->vnic_info);
+       bp->vnic_info = NULL;
+       bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+       int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+       if (bp->flags & BNXT_FLAG_RFS)
+               num_vnics += bp->rx_nr_rings;
+#endif
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+               num_vnics++;
+
+       bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+                               GFP_KERNEL);
+       if (!bp->vnic_info)
+               return -ENOMEM;
+
+       bp->nr_vnics = num_vnics;
+       return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+               vnic->fw_vnic_id = INVALID_HW_RING_ID;
+               vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
+               vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+               vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+               if (bp->vnic_info[i].rss_hash_key) {
+                       if (i == 0)
+                               prandom_bytes(vnic->rss_hash_key,
+                                             HW_HASH_KEY_SIZE);
+                       else
+                               memcpy(vnic->rss_hash_key,
+                                      bp->vnic_info[0].rss_hash_key,
+                                      HW_HASH_KEY_SIZE);
+               }
+       }
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+       int pages;
+
+       pages = ring_size / desc_per_pg;
+
+       if (!pages)
+               return 1;
+
+       pages++;
+
+       while (pages & (pages - 1))
+               pages++;
+
+       return pages;
+}
+
+void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+       bp->flags &= ~BNXT_FLAG_TPA;
+       if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+               return;
+       if (bp->dev->features & NETIF_F_LRO)
+               bp->flags |= BNXT_FLAG_LRO;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39)
+       if (bp->dev->features & NETIF_F_GRO)
+               bp->flags |= BNXT_FLAG_GRO;
+#endif
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+       u32 ring_size, rx_size, rx_space;
+       u32 agg_factor = 0, agg_ring_size = 0;
+
+       /* 8 for CRC and VLAN */
+       rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+       rx_space = rx_size + NET_SKB_PAD +
+               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+       ring_size = bp->rx_ring_size;
+       bp->rx_agg_ring_size = 0;
+       bp->rx_agg_nr_pages = 0;
+
+       if (bp->flags & BNXT_FLAG_TPA)
+               agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
+
+       bp->flags &= ~BNXT_FLAG_JUMBO;
+       if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
+               u32 jumbo_factor;
+
+               bp->flags |= BNXT_FLAG_JUMBO;
+               jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+               if (jumbo_factor > agg_factor)
+                       agg_factor = jumbo_factor;
+       }
+       agg_ring_size = ring_size * agg_factor;
+
+       if (agg_ring_size) {
+               bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+                                                       RX_DESC_CNT);
+               if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+                       u32 tmp = agg_ring_size;
+
+                       bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+                       agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+                       netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+                                   tmp, agg_ring_size);
+               }
+               bp->rx_agg_ring_size = agg_ring_size;
+               bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+               rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+               rx_space = rx_size + NET_SKB_PAD +
+                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       }
+
+       bp->rx_buf_use_size = rx_size;
+       bp->rx_buf_size = rx_space;
+
+       bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+       bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+       ring_size = bp->tx_ring_size;
+       bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+       bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+       ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+       bp->cp_ring_size = ring_size;
+
+       bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+       if (bp->cp_nr_pages > MAX_CP_PAGES) {
+               bp->cp_nr_pages = MAX_CP_PAGES;
+               bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+               netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+                           ring_size, bp->cp_ring_size);
+       }
+       bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+       bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+       if (page_mode) {
+#ifdef BNXT_RX_PAGE_MODE_SUPPORT
+               if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
+                       return -EOPNOTSUPP;
+#ifdef HAVE_MIN_MTU
+               bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+#endif
+               bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+               bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+               bp->dev->hw_features &= ~NETIF_F_LRO;
+               bp->dev->features &= ~NETIF_F_LRO;
+               bp->rx_dir = DMA_BIDIRECTIONAL;
+               bp->rx_skb_func = bnxt_rx_page_skb;
+#else
+               return -EOPNOTSUPP;
+#endif
+       } else {
+#ifdef HAVE_MIN_MTU
+               bp->dev->max_mtu = BNXT_MAX_MTU;
+#endif
+               bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+               bp->rx_dir = DMA_FROM_DEVICE;
+               bp->rx_skb_func = bnxt_rx_skb;
+       }
+       return 0;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_vnic_info *vnic;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (!bp->vnic_info)
+               return;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               vnic = &bp->vnic_info[i];
+
+               kfree(vnic->fw_grp_ids);
+               vnic->fw_grp_ids = NULL;
+
+               kfree(vnic->uc_list);
+               vnic->uc_list = NULL;
+
+               if (vnic->mc_list) {
+                       dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+                                         vnic->mc_list, vnic->mc_list_mapping);
+                       vnic->mc_list = NULL;
+               }
+
+               if (vnic->rss_table) {
+                       dma_free_coherent(&pdev->dev, PAGE_SIZE,
+                                         vnic->rss_table,
+                                         vnic->rss_table_dma_addr);
+                       vnic->rss_table = NULL;
+               }
+
+               vnic->rss_hash_key = NULL;
+               vnic->flags = 0;
+       }
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+       int i, rc = 0, size;
+       struct bnxt_vnic_info *vnic;
+       struct pci_dev *pdev = bp->pdev;
+       int max_rings;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               vnic = &bp->vnic_info[i];
+
+               if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+                       int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+                       if (mem_size > 0) {
+                               vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+                               if (!vnic->uc_list) {
+                                       rc = -ENOMEM;
+                                       goto out;
+                               }
+                       }
+               }
+
+               if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+                       vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+                       vnic->mc_list =
+                               dma_alloc_coherent(&pdev->dev,
+                                                  vnic->mc_list_size,
+                                                  &vnic->mc_list_mapping,
+                                                  GFP_KERNEL);
+                       if (!vnic->mc_list) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
+               }
+
+               if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+                       max_rings = bp->rx_nr_rings;
+               else
+                       max_rings = 1;
+
+               vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+               if (!vnic->fw_grp_ids) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+                   !(vnic->flags & BNXT_VNIC_RSS_FLAG))
+                       continue;
+
+               /* Allocate rss table and hash key */
+               vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+                                                    &vnic->rss_table_dma_addr,
+                                                    GFP_KERNEL);
+               if (!vnic->rss_table) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+               vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+               vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+       }
+       return 0;
+
+out:
+       return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+       struct pci_dev *pdev = bp->pdev;
+
+       dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+                         bp->hwrm_cmd_resp_dma_addr);
+
+       bp->hwrm_cmd_resp_addr = NULL;
+       if (bp->hwrm_dbg_resp_addr) {
+               dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+                                 bp->hwrm_dbg_resp_addr,
+                                 bp->hwrm_dbg_resp_dma_addr);
+
+               bp->hwrm_dbg_resp_addr = NULL;
+       }
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+       struct pci_dev *pdev = bp->pdev;
+
+       bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+                                                  &bp->hwrm_cmd_resp_dma_addr,
+                                                  GFP_KERNEL);
+       if (!bp->hwrm_cmd_resp_addr)
+               return -ENOMEM;
+       bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+                                                   HWRM_DBG_REG_BUF_SIZE,
+                                                   &bp->hwrm_dbg_resp_dma_addr,
+                                                   GFP_KERNEL);
+       if (!bp->hwrm_dbg_resp_addr)
+               netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+       return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+       u32 size, i;
+       struct pci_dev *pdev = bp->pdev;
+
+       if (bp->hw_rx_port_stats) {
+               dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
+                                 bp->hw_rx_port_stats,
+                                 bp->hw_rx_port_stats_map);
+               bp->hw_rx_port_stats = NULL;
+               bp->flags &= ~BNXT_FLAG_PORT_STATS;
+       }
+
+       if (!bp->bnapi)
+               return;
+
+       size = sizeof(struct ctx_hw_stats);
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               if (cpr->hw_stats) {
+                       dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+                                         cpr->hw_stats_map);
+                       cpr->hw_stats = NULL;
+               }
+       }
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+       u32 size, i;
+       struct pci_dev *pdev = bp->pdev;
+
+       size = sizeof(struct ctx_hw_stats);
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+                                                  &cpr->hw_stats_map,
+                                                  GFP_KERNEL);
+               if (!cpr->hw_stats)
+                       return -ENOMEM;
+
+               cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+       }
+
+       if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
+               bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
+                                        sizeof(struct tx_port_stats) + 1024;
+
+               bp->hw_rx_port_stats =
+                       dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
+                                          &bp->hw_rx_port_stats_map,
+                                          GFP_KERNEL);
+               if (!bp->hw_rx_port_stats)
+                       return -ENOMEM;
+
+               bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
+                                      512;
+               bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
+                                          sizeof(struct rx_port_stats) + 512;
+               bp->flags |= BNXT_FLAG_PORT_STATS;
+       }
+       return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               struct bnxt_rx_ring_info *rxr;
+               struct bnxt_tx_ring_info *txr;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               cpr->cp_raw_cons = 0;
+
+               txr = bnapi->tx_ring;
+               if (txr) {
+                       txr->tx_prod = 0;
+                       txr->tx_cons = 0;
+               }
+
+               rxr = bnapi->rx_ring;
+               if (rxr) {
+                       rxr->rx_prod = 0;
+                       rxr->rx_agg_prod = 0;
+                       rxr->rx_sw_agg_prod = 0;
+                       rxr->rx_next_cons = 0;
+               }
+       }
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i;
+
+       /* Under rtnl_lock and all our NAPIs have been disabled.  It's
+        * safe to delete the hash table.
+        */
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct hlist_node *tmp, __maybe_unused *nxt;
+               struct bnxt_ntuple_filter *fltr;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               __hlist_for_each_entry_safe(fltr, nxt, tmp, head, hash) {
+                       hlist_del(&fltr->hash);
+                       kfree(fltr);
+               }
+       }
+       if (irq_reinit) {
+               kfree(bp->ntp_fltr_bmap);
+               bp->ntp_fltr_bmap = NULL;
+       }
+       bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i, rc = 0;
+
+       if (!(bp->flags & BNXT_FLAG_RFS))
+               return 0;
+
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+               INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+       bp->ntp_fltr_count = 0;
+       bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+                                   sizeof(long),
+                                   GFP_KERNEL);
+
+       if (!bp->ntp_fltr_bmap)
+               rc = -ENOMEM;
+
+       return rc;
+#else
+       return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+       bnxt_free_vnic_attributes(bp);
+       bnxt_free_tx_rings(bp);
+       bnxt_free_rx_rings(bp);
+       bnxt_free_cp_rings(bp);
+       bnxt_free_ntp_fltrs(bp, irq_re_init);
+       if (irq_re_init) {
+               bnxt_free_stats(bp);
+               bnxt_free_ring_grps(bp);
+               bnxt_free_vnics(bp);
+               kfree(bp->tx_ring_map);
+               bp->tx_ring_map = NULL;
+               kfree(bp->tx_ring);
+               bp->tx_ring = NULL;
+               kfree(bp->rx_ring);
+               bp->rx_ring = NULL;
+               kfree(bp->bnapi);
+               bp->bnapi = NULL;
+       } else {
+               bnxt_clear_ring_indices(bp);
+       }
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+       int i, j, rc, size, arr_size;
+       void *bnapi;
+
+       if (irq_re_init) {
+               /* Allocate bnapi mem pointer array and mem block for
+                * all queues
+                */
+               arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+                               bp->cp_nr_rings);
+               size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+               bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+               if (!bnapi)
+                       return -ENOMEM;
+
+               bp->bnapi = bnapi;
+               bnapi += arr_size;
+               for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+                       bp->bnapi[i] = bnapi;
+                       bp->bnapi[i]->index = i;
+                       bp->bnapi[i]->bp = bp;
+               }
+
+               bp->rx_ring = kcalloc(bp->rx_nr_rings,
+                                     sizeof(struct bnxt_rx_ring_info),
+                                     GFP_KERNEL);
+               if (!bp->rx_ring)
+                       return -ENOMEM;
+
+               for (i = 0; i < bp->rx_nr_rings; i++) {
+                       bp->rx_ring[i].bnapi = bp->bnapi[i];
+                       bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
+               }
+
+               bp->tx_ring = kcalloc(bp->tx_nr_rings,
+                                     sizeof(struct bnxt_tx_ring_info),
+                                     GFP_KERNEL);
+               if (!bp->tx_ring)
+                       return -ENOMEM;
+
+               bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
+                                         GFP_KERNEL);
+
+               if (!bp->tx_ring_map)
+                       return -ENOMEM;
+
+               if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+                       j = 0;
+               else
+                       j = bp->rx_nr_rings;
+
+               for (i = 0; i < bp->tx_nr_rings; i++, j++) {
+                       bp->tx_ring[i].bnapi = bp->bnapi[j];
+                       bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+                       bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
+                       if (i >= bp->tx_nr_rings_xdp) {
+                               bp->tx_ring[i].txq_index = i -
+                                       bp->tx_nr_rings_xdp;
+                               bp->bnapi[j]->tx_int = bnxt_tx_int;
+                       } else {
+                               bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
+                               bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+                       }
+               }
+
+               rc = bnxt_alloc_stats(bp);
+               if (rc)
+                       goto alloc_mem_err;
+
+               rc = bnxt_alloc_ntp_fltrs(bp);
+               if (rc)
+                       goto alloc_mem_err;
+
+               rc = bnxt_alloc_vnics(bp);
+               if (rc)
+                       goto alloc_mem_err;
+
+       }
+
+       bnxt_init_ring_struct(bp);
+
+       rc = bnxt_alloc_rx_rings(bp);
+       if (rc)
+               goto alloc_mem_err;
+
+       rc = bnxt_alloc_tx_rings(bp);
+       if (rc)
+               goto alloc_mem_err;
+
+       rc = bnxt_alloc_cp_rings(bp);
+       if (rc)
+               goto alloc_mem_err;
+
+       bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+                                 BNXT_VNIC_UCAST_FLAG;
+       rc = bnxt_alloc_vnic_attributes(bp);
+       if (rc)
+               goto alloc_mem_err;
+       return 0;
+
+alloc_mem_err:
+       bnxt_free_mem(bp, true);
+       return rc;
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+               if (ring->fw_ring_id != INVALID_HW_RING_ID)
+                       BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+       int i;
+
+       atomic_inc(&bp->intr_sem);
+
+       bnxt_disable_int(bp);
+       for (i = 0; i < bp->cp_nr_rings; i++)
+               synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+       int i;
+
+       atomic_set(&bp->intr_sem, 0);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+                           u16 cmpl_ring, u16 target_id)
+{
+       struct input *req = request;
+
+       req->req_type = cpu_to_le16(req_type);
+       req->cmpl_ring = cpu_to_le16(cmpl_ring);
+       req->target_id = cpu_to_le16(target_id);
+       req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
+                                int timeout, bool silent)
+{
+       int i, intr_process, rc, tmo_count;
+       struct input *req = msg;
+       u32 *data = msg;
+       __le32 *resp_len, *valid;
+       u16 cp_ring_id, len = 0;
+       struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+       req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
+#ifndef HSI_DBG_DISABLE
+       decode_hwrm_req(NULL, msg);
+#endif
+       memset(resp, 0, PAGE_SIZE);
+       cp_ring_id = le16_to_cpu(req->cmpl_ring);
+       intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+       /* Write request msg to hwrm channel */
+       __iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+       for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
+               writel(0, bp->bar0 + i);
+
+       /* currently supports only one outstanding message */
+       if (intr_process)
+               bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
+
+       /* Ring channel doorbell */
+       writel(1, bp->bar0 + 0x100);
+
+       if (!timeout)
+               timeout = DFLT_HWRM_CMD_TIMEOUT;
+
+       i = 0;
+       tmo_count = timeout * 40;
+       if (intr_process) {
+               /* Wait until hwrm response cmpl interrupt is processed */
+               while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+                      i++ < tmo_count) {
+                       usleep_range(25, 40);
+               }
+
+               if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+                       netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+                                  le16_to_cpu(req->req_type));
+                       return -1;
+               }
+       } else {
+               /* Check if response len is updated */
+               resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+               for (i = 0; i < tmo_count; i++) {
+                       len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+                             HWRM_RESP_LEN_SFT;
+                       if (len)
+                               break;
+                       usleep_range(25, 40);
+               }
+
+               if (i >= tmo_count) {
+                       netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+                                  timeout, le16_to_cpu(req->req_type),
+                                  le16_to_cpu(req->seq_id), len);
+                       return -1;
+               }
+
+               /* Last word of resp contains valid bit */
+               valid = bp->hwrm_cmd_resp_addr + len - 4;
+               for (i = 0; i < 5; i++) {
+                       if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+                               break;
+                       udelay(1);
+               }
+
+               if (i >= 5) {
+                       netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+                                  timeout, le16_to_cpu(req->req_type),
+                                  le16_to_cpu(req->seq_id), len, *valid);
+                       return -1;
+               }
+       }
+
+       rc = le16_to_cpu(resp->error_code);
+       if (rc && !silent)
+               netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+                          le16_to_cpu(resp->req_type),
+                          le16_to_cpu(resp->seq_id), rc);
+#ifndef HSI_DBG_DISABLE
+       decode_hwrm_resp(NULL, resp);
+#endif
+       return rc;
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+       return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+       int rc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+                            int timeout)
+{
+       int rc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+                                    int bmap_size)
+{
+       struct hwrm_func_drv_rgtr_input req = {0};
+       DECLARE_BITMAP(async_events_bmap, 256);
+       u32 *events = (u32 *)async_events_bmap;
+       int i;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+       req.enables =
+               cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+       memset(async_events_bmap, 0, sizeof(async_events_bmap));
+       for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
+               __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+
+       if (bmap && bmap_size) {
+               for (i = 0; i < bmap_size; i++) {
+                       if (test_bit(i, bmap))
+                               __set_bit(i, async_events_bmap);
+               }
+       }
+
+       for (i = 0; i < 8; i++)
+               req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+       struct hwrm_func_drv_rgtr_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+       req.enables =
+               cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+                           FUNC_DRV_RGTR_REQ_ENABLES_VER);
+
+       req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+       req.ver_maj = DRV_VER_MAJ;
+       req.ver_min = DRV_VER_MIN;
+       req.ver_upd = DRV_VER_UPD;
+
+       if (BNXT_PF(bp)) {
+               u32 data[8];
+               int i;
+
+               memset(data, 0, sizeof(data));
+               for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
+                       u16 cmd = bnxt_vf_req_snif[i];
+                       unsigned int bit, idx;
+
+                       idx = cmd / 32;
+                       bit = cmd % 32;
+                       data[idx] |= 1 << bit;
+               }
+
+               for (i = 0; i < 8; i++)
+                       req.vf_req_fwd[i] = cpu_to_le32(data[i]);
+
+               req.enables |=
+                       cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+       }
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
+{
+       struct hwrm_func_drv_unrgtr_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+       u32 rc = 0;
+       struct hwrm_tunnel_dst_port_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+       req.tunnel_type = tunnel_type;
+
+       switch (tunnel_type) {
+       case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+               req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+               break;
+       case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+               req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+               break;
+       default:
+               break;
+       }
+
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+                          rc);
+       return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+                                          u8 tunnel_type)
+{
+       u32 rc = 0;
+       struct hwrm_tunnel_dst_port_alloc_input req = {0};
+       struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+       req.tunnel_type = tunnel_type;
+       req.tunnel_dst_port_val = port;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+                          rc);
+               goto err_out;
+       }
+
+       switch (tunnel_type) {
+       case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
+               bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+               break;
+       case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
+               bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+               break;
+       default:
+               break;
+       }
+
+err_out:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+       struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+       req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+       req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+       req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+       req.mask = cpu_to_le32(vnic->rx_mask);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+                                           struct bnxt_ntuple_filter *fltr)
+{
+       struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+       req.ntuple_filter_id = fltr->filter_id;
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS                                    \
+       (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
+        CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
+
+#define BNXT_NTP_TUNNEL_FLTR_FLAG                              \
+               CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+                                            struct bnxt_ntuple_filter *fltr)
+{
+       int rc = 0;
+       struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+       struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+               bp->hwrm_cmd_resp_addr;
+       struct flow_keys *keys = &fltr->fkeys;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+       req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+
+       req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+       req.ethertype = htons(ETH_P_IP);
+       memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+       req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
+#ifdef NEW_FLOW_KEYS
+       req.ip_protocol = keys->basic.ip_proto;
+
+       if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
+               int i;
+
+               req.ethertype = htons(ETH_P_IPV6);
+               req.ip_addr_type =
+                       CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
+               *(struct in6_addr *)&req.src_ipaddr[0] =
+                       keys->addrs.v6addrs.src;
+               *(struct in6_addr *)&req.dst_ipaddr[0] =
+                       keys->addrs.v6addrs.dst;
+               for (i = 0; i < 4; i++) {
+                       req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+                       req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+               }
+       } else {
+               req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+               req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+               req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+               req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+       }
+#ifdef HAVE_NEW_FLOW_DISSECTOR
+       if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
+               req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+               req.tunnel_type =
+                       CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
+       }
+#endif
+
+       req.src_port = keys->ports.src;
+       req.src_port_mask = cpu_to_be16(0xffff);
+       req.dst_port = keys->ports.dst;
+       req.dst_port_mask = cpu_to_be16(0xffff);
+#else
+       req.ip_protocol = keys->ip_proto;
+
+       req.src_ipaddr[0] = keys->src;
+       req.src_ipaddr_mask[0] = 0xffffffff;
+       req.dst_ipaddr[0] = keys->dst;
+       req.dst_ipaddr_mask[0] = 0xffffffff;
+
+       req.src_port = keys->port16[0];
+       req.src_port_mask = 0xffff;
+       req.dst_port = keys->port16[1];
+       req.dst_port_mask = 0xffff;
+#endif
+
+       req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               fltr->filter_id = resp->ntuple_filter_id;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+                                    u8 *mac_addr)
+{
+       u32 rc = 0;
+       struct hwrm_cfa_l2_filter_alloc_input req = {0};
+       struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+       req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+       if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+               req.flags |=
+                       cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+       req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+       req.enables =
+               cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+                           CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+                           CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+       memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+       req.l2_addr_mask[0] = 0xff;
+       req.l2_addr_mask[1] = 0xff;
+       req.l2_addr_mask[2] = 0xff;
+       req.l2_addr_mask[3] = 0xff;
+       req.l2_addr_mask[4] = 0xff;
+       req.l2_addr_mask[5] = 0xff;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+                                                       resp->l2_filter_id;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+       u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+       int rc = 0;
+
+       /* Any associated ntuple filters will also be cleared by firmware. */
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < num_of_vnics; i++) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+               for (j = 0; j < vnic->uc_filter_count; j++) {
+                       struct hwrm_cfa_l2_filter_free_input req = {0};
+
+                       bnxt_hwrm_cmd_hdr_init(bp, &req,
+                                              HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+                       req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+                       rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                               HWRM_CMD_TIMEOUT);
+               }
+               vnic->uc_filter_count = 0;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_tpa_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+       if (tpa_flags) {
+               u16 mss = bp->dev->mtu - 40;
+               u32 nsegs, n, segs = 0, flags;
+
+               flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+                       VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+                       VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+                       VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+                       VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+               if (tpa_flags & BNXT_FLAG_GRO)
+                       flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+               req.flags = cpu_to_le32(flags);
+
+               req.enables =
+                       cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+                                   VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
+                                   VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
+
+               /* Number of segs are log2 units, and first packet is not
+                * included as part of this units.
+                */
+               if (mss <= BNXT_RX_PAGE_SIZE) {
+                       n = BNXT_RX_PAGE_SIZE / mss;
+                       nsegs = (MAX_SKB_FRAGS - 1) * n;
+               } else {
+                       n = mss / BNXT_RX_PAGE_SIZE;
+                       if (mss & (BNXT_RX_PAGE_SIZE - 1))
+                               n++;
+                       nsegs = (MAX_SKB_FRAGS - n) / n;
+               }
+
+               segs = ilog2(nsegs);
+               req.max_agg_segs = cpu_to_le16(segs);
+               req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+
+               req.min_agg_len = cpu_to_le32(512);
+       }
+       req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+       u32 i, j, max_rings;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_rss_cfg_input req = {0};
+
+       if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+       if (set_rss) {
+               req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+               if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
+                       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+                               max_rings = bp->rx_nr_rings - 1;
+                       else
+                               max_rings = bp->rx_nr_rings;
+               } else {
+                       max_rings = 1;
+               }
+
+               /* Fill the RSS indirection table with ring group ids */
+               for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+                       if (j == max_rings)
+                               j = 0;
+                       vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+               }
+
+               req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+               req.hash_key_tbl_addr =
+                       cpu_to_le64(vnic->rss_hash_key_dma_addr);
+       }
+       req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+       req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+                               VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+                               VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+       req.enables =
+               cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+                           VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+       /* thresholds not implemented in firmware yet */
+       req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+       req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+       req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+                                       u16 ctx_idx)
+{
+       struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+       req.rss_cos_lb_ctx_id =
+               cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
+
+       hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+       int i, j;
+
+       for (i = 0; i < bp->nr_vnics; i++) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+               for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
+                       if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+                               bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+               }
+       }
+       bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
+{
+       int rc;
+       struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+       struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+                                               bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+                              -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
+                       le16_to_cpu(resp->rss_cos_lb_ctx_id);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       return rc;
+}
+
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+       unsigned int ring = 0, grp_idx;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       struct hwrm_vnic_cfg_input req = {0};
+       u16 def_vlan = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+
+       req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
+       /* Only RSS support for now TBD: COS & LB */
+       if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
+               req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+               req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+                                          VNIC_CFG_REQ_ENABLES_MRU);
+       } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
+               req.rss_rule =
+                       cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+               req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+                                          VNIC_CFG_REQ_ENABLES_MRU);
+               req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
+       } else {
+               req.rss_rule = cpu_to_le16(0xffff);
+       }
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
+           (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
+               req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
+               req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
+       } else {
+               req.cos_rule = cpu_to_le16(0xffff);
+       }
+
+       if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+               ring = 0;
+       else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+               ring = vnic_id - 1;
+       else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+               ring = bp->rx_nr_rings - 1;
+
+       grp_idx = bp->rx_ring[ring].bnapi->index;
+       req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+       req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+       req.lb_rule = cpu_to_le16(0xffff);
+       req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+                             VLAN_HLEN);
+
+#ifdef CONFIG_BNXT_SRIOV
+       if (BNXT_VF(bp))
+               def_vlan = bp->vf.vlan;
+#endif
+       if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
+               req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+       if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+               req.flags |=
+                       cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+       u32 rc = 0;
+
+       if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+               struct hwrm_vnic_free_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+               req.vnic_id =
+                       cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+               rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+               if (rc)
+                       return rc;
+               bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+       }
+       return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+       u16 i;
+
+       for (i = 0; i < bp->nr_vnics; i++)
+               bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
+                               unsigned int start_rx_ring_idx,
+                               unsigned int nr_rings)
+{
+       int rc = 0;
+       unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
+       struct hwrm_vnic_alloc_input req = {0};
+       struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       /* map ring groups to this vnic */
+       for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
+               grp_idx = bp->rx_ring[i].bnapi->index;
+               if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
+                       netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+                                  j, nr_rings);
+                       break;
+               }
+               bp->vnic_info[vnic_id].fw_grp_ids[j] =
+                                       bp->grp_info[grp_idx].fw_grp_id;
+       }
+
+       bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
+       bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+       if (vnic_id == 0)
+               req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+       struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_vnic_qcaps_input req = {0};
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10600)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               if (resp->flags &
+                   cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
+                       bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+       u16 i;
+       u32 rc = 0;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct hwrm_ring_grp_alloc_input req = {0};
+               struct hwrm_ring_grp_alloc_output *resp =
+                                       bp->hwrm_cmd_resp_addr;
+               unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+               req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+               req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
+               req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
+               req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+
+               bp->grp_info[grp_idx].fw_grp_id =
+                       le32_to_cpu(resp->ring_group_id);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+       u16 i;
+       u32 rc = 0;
+       struct hwrm_ring_grp_free_input req = {0};
+
+       if (!bp->grp_info)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+                       continue;
+               req.ring_group_id =
+                       cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+               bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+                                   struct bnxt_ring_struct *ring,
+                                   u32 ring_type, u32 map_index,
+                                   u32 stats_ctx_id)
+{
+       int rc = 0, err = 0;
+       struct hwrm_ring_alloc_input req = {0};
+       struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 ring_id;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+       req.enables = 0;
+       if (ring->nr_pages > 1) {
+               req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+               /* Page size is in log2 units */
+               req.page_size = BNXT_PAGE_SHIFT;
+               req.page_tbl_depth = 1;
+       } else {
+               req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
+       }
+       req.fbo = 0;
+       /* Association of ring index with doorbell index and MSIX number */
+       req.logical_id = cpu_to_le16(map_index);
+
+       switch (ring_type) {
+       case HWRM_RING_ALLOC_TX:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+               /* Association of transmit ring with completion ring */
+               req.cmpl_ring_id =
+                       cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+               req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+               req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+               req.queue_id = cpu_to_le16(ring->queue_id);
+               break;
+       case HWRM_RING_ALLOC_RX:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+               req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+               break;
+       case HWRM_RING_ALLOC_AGG:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+               req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+               break;
+       case HWRM_RING_ALLOC_CMPL:
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+               req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+               if (bp->flags & BNXT_FLAG_USING_MSIX)
+                       req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+               break;
+       default:
+               netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+                          ring_type);
+               return -1;
+       }
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       err = le16_to_cpu(resp->error_code);
+       ring_id = le16_to_cpu(resp->ring_id);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       if (rc || err) {
+               switch (ring_type) {
+               case RING_FREE_REQ_RING_TYPE_L2_CMPL:
+                       netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+                                  rc, err);
+                       return -1;
+
+               case RING_FREE_REQ_RING_TYPE_RX:
+                       netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+                                  rc, err);
+                       return -1;
+
+               case RING_FREE_REQ_RING_TYPE_TX:
+                       netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+                                  rc, err);
+                       return -1;
+
+               default:
+                       netdev_err(bp->dev, "Invalid ring\n");
+                       return -1;
+               }
+       }
+       ring->fw_ring_id = ring_id;
+       return rc;
+}
+
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+       int rc;
+
+       if (BNXT_PF(bp)) {
+               struct hwrm_func_cfg_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+               req.fid = cpu_to_le16(0xffff);
+               req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+               req.async_event_cr = cpu_to_le16(idx);
+               rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       } else {
+               struct hwrm_func_vf_cfg_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+               req.enables =
+                       cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+               req.async_event_cr = cpu_to_le16(idx);
+               rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       }
+       return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+               cpr->cp_doorbell = bp->bar1 + i * 0x80;
+               rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
+                                             INVALID_STATS_CTX_ID);
+               if (rc)
+                       goto err_out;
+               BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+               bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+               if (!i) {
+                       rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+                       if (rc)
+                               netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+               }
+       }
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+               u32 map_idx = txr->bnapi->index;
+               u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
+
+               rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
+                                             map_idx, fw_stats_ctx);
+               if (rc)
+                       goto err_out;
+               txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
+       }
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+               u32 map_idx = rxr->bnapi->index;
+
+               rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
+                                             map_idx, INVALID_STATS_CTX_ID);
+               if (rc)
+                       goto err_out;
+               rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
+               writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+               bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+       }
+
+       if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+               for (i = 0; i < bp->rx_nr_rings; i++) {
+                       struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+                       struct bnxt_ring_struct *ring =
+                                               &rxr->rx_agg_ring_struct;
+                       u32 grp_idx = rxr->bnapi->index;
+                       u32 map_idx = grp_idx + bp->rx_nr_rings;
+
+                       rc = hwrm_ring_alloc_send_msg(bp, ring,
+                                                     HWRM_RING_ALLOC_AGG,
+                                                     map_idx,
+                                                     INVALID_STATS_CTX_ID);
+                       if (rc)
+                               goto err_out;
+
+                       rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
+                       writel(DB_KEY_RX | rxr->rx_agg_prod,
+                              rxr->rx_agg_doorbell);
+                       bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+               }
+       }
+err_out:
+       return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+                                  struct bnxt_ring_struct *ring,
+                                  u32 ring_type, int cmpl_ring_id)
+{
+       int rc;
+       struct hwrm_ring_free_input req = {0};
+       struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 error_code;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
+       req.ring_type = ring_type;
+       req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       error_code = le16_to_cpu(resp->error_code);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       if (rc || error_code) {
+               switch (ring_type) {
+               case RING_FREE_REQ_RING_TYPE_L2_CMPL:
+                       netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+                                  rc);
+                       return rc;
+               case RING_FREE_REQ_RING_TYPE_RX:
+                       netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+                                  rc);
+                       return rc;
+               case RING_FREE_REQ_RING_TYPE_TX:
+                       netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+                                  rc);
+                       return rc;
+               default:
+                       netdev_err(bp->dev, "Invalid ring\n");
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+               u32 grp_idx = txr->bnapi->index;
+               u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+
+               if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       hwrm_ring_free_send_msg(bp, ring,
+                                               RING_FREE_REQ_RING_TYPE_TX,
+                                               close_path ? cmpl_ring_id :
+                                               INVALID_HW_RING_ID);
+                       ring->fw_ring_id = INVALID_HW_RING_ID;
+               }
+       }
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+               u32 grp_idx = rxr->bnapi->index;
+               u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+
+               if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       hwrm_ring_free_send_msg(bp, ring,
+                                               RING_FREE_REQ_RING_TYPE_RX,
+                                               close_path ? cmpl_ring_id :
+                                               INVALID_HW_RING_ID);
+                       ring->fw_ring_id = INVALID_HW_RING_ID;
+                       bp->grp_info[grp_idx].rx_fw_ring_id =
+                               INVALID_HW_RING_ID;
+               }
+       }
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+               u32 grp_idx = rxr->bnapi->index;
+               u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+
+               if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       hwrm_ring_free_send_msg(bp, ring,
+                                               RING_FREE_REQ_RING_TYPE_RX,
+                                               close_path ? cmpl_ring_id :
+                                               INVALID_HW_RING_ID);
+                       ring->fw_ring_id = INVALID_HW_RING_ID;
+                       bp->grp_info[grp_idx].agg_fw_ring_id =
+                               INVALID_HW_RING_ID;
+               }
+       }
+
+       /* The completion rings are about to be freed.  After that the
+        * IRQ doorbell will not work anymore.  So we need to disable
+        * IRQ here.
+        */
+       bnxt_disable_int_sync(bp);
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+               if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       hwrm_ring_free_send_msg(bp, ring,
+                                               RING_FREE_REQ_RING_TYPE_L2_CMPL,
+                                               INVALID_HW_RING_ID);
+                       ring->fw_ring_id = INVALID_HW_RING_ID;
+                       bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+               }
+       }
+}
+
+/* Caller must hold bp->hwrm_cmd_lock */
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
+{
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_qcfg_input req = {0};
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+       req.fid = cpu_to_le16(fid);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+
+       return rc;
+}
+
+static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+{
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       if (BNXT_VF(bp))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+       req.num_tx_rings = cpu_to_le16(*tx_rings);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               return rc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
+       u32 buf_tmrs, u16 flags,
+       struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+       req->flags = cpu_to_le16(flags);
+       req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
+       req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
+       req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
+       req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
+       /* Minimum time between 2 interrupts set to buf_tmr x 2 */
+       req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
+       req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
+       req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+       int i, rc = 0;
+       struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
+                                                          req_tx = {0}, *req;
+       u16 max_buf, max_buf_irq;
+       u16 buf_tmr, buf_tmr_irq;
+       u32 flags;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+                              HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+       bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
+                              HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+       /* Each rx completion (2 records) should be DMAed immediately.
+        * DMA 1/4 of the completion buffers at a time.
+        */
+       max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
+       /* max_buf must not be zero */
+       max_buf = clamp_t(u16, max_buf, 1, 63);
+       max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
+       buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
+       /* buf timer set to 1/4 of interrupt timer */
+       buf_tmr = max_t(u16, buf_tmr / 4, 1);
+       buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
+       buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
+
+       flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+       /* RING_IDLE generates more IRQs for lower latency.  Enable it only
+        * if coal_ticks is less than 25 us.
+        */
+       if (bp->rx_coal_ticks < 25)
+               flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+       bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
+                                 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
+
+       /* max_buf must not be zero */
+       max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
+       max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
+       buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
+       /* buf timer set to 1/4 of interrupt timer */
+       buf_tmr = max_t(u16, buf_tmr / 4, 1);
+       buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
+       buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
+
+       flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+       bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
+                                 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+
+               req = &req_rx;
+               if (!bnapi->rx_ring)
+                       req = &req_tx;
+               req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+               rc = _hwrm_send_message(bp, req, sizeof(*req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+       int rc = 0, i;
+       struct hwrm_stat_ctx_free_input req = {0};
+
+       if (!bp->bnapi)
+               return 0;
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+                       req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+                       rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                               HWRM_CMD_TIMEOUT);
+                       if (rc)
+                               break;
+
+                       cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+       int rc = 0, i;
+       struct hwrm_stat_ctx_alloc_input req = {0};
+       struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+       req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+
+               cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+               bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+{
+       struct hwrm_func_qcfg_input req = {0};
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto func_qcfg_exit;
+
+#ifdef CONFIG_BNXT_SRIOV
+       if (BNXT_VF(bp)) {
+               struct bnxt_vf_info *vf = &bp->vf;
+
+               vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+       }
+#endif
+       if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
+                           (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
+                            FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)))
+               bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+
+       switch (resp->port_partition_type) {
+       case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+       case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
+       case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
+               bp->port_partition_type = resp->port_partition_type;
+               break;
+       }
+       if (bp->hwrm_spec_code < 0x10707 ||
+           resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
+               bp->br_mode = BRIDGE_MODE_VEB;
+       else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
+               bp->br_mode = BRIDGE_MODE_VEPA;
+       else
+               bp->br_mode = BRIDGE_MODE_UNDEF;
+
+func_qcfg_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+/* bp->hwrm_cmd_lock already held. */
+static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+       struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_mac_ptp_qcfg_input req = {0};
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10800 || ptp)
+               return 0;
+
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               return -EIO;
+
+       if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
+               return 0;
+
+       ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+       if (!ptp)
+               return -ENOMEM;
+
+       ptp->rx_regs[BNXT_PTP_RX_TS_L] = le32_to_cpu(resp->rx_ts_reg_off_lower);
+       ptp->rx_regs[BNXT_PTP_RX_TS_H] = le32_to_cpu(resp->rx_ts_reg_off_upper);
+       ptp->rx_regs[BNXT_PTP_RX_SEQ] = le32_to_cpu(resp->rx_ts_reg_off_seq_id);
+       ptp->rx_regs[BNXT_PTP_RX_FIFO] = le32_to_cpu(resp->rx_ts_reg_off_fifo);
+       ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+               le32_to_cpu(resp->rx_ts_reg_off_fifo_adv);
+       ptp->tx_regs[BNXT_PTP_TX_TS_L] = le32_to_cpu(resp->tx_ts_reg_off_lower);
+       ptp->tx_regs[BNXT_PTP_TX_TS_H] = le32_to_cpu(resp->tx_ts_reg_off_upper);
+       ptp->tx_regs[BNXT_PTP_TX_SEQ] = le32_to_cpu(resp->tx_ts_reg_off_seq_id);
+       ptp->tx_regs[BNXT_PTP_TX_FIFO] = le32_to_cpu(resp->tx_ts_reg_off_fifo);
+
+       ptp->bp = bp;
+       bp->ptp_cfg = ptp;
+
+       return 0;
+}
+
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_func_qcaps_input req = {0};
+       struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       u32 flags;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto hwrm_func_qcaps_exit;
+
+       flags = le32_to_cpu(resp->flags);
+       if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+               bp->flags |= BNXT_FLAG_ROCEV1_CAP;
+       if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+               bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+
+       bp->tx_push_thresh = 0;
+       if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+               bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+       if (BNXT_PF(bp)) {
+               struct bnxt_pf_info *pf = &bp->pf;
+
+               pf->fw_fid = le16_to_cpu(resp->fid);
+               pf->port_id = le16_to_cpu(resp->port_id);
+#if defined(HAVE_DEV_PORT) && !defined(RH_KABI_FILL_HOLE) && !defined(NET_DEVICE_EXTENDED_SIZE)
+               bp->dev->dev_port = pf->port_id;
+#endif
+               memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+               memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
+               pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+               pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+               pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+               pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+               pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+               if (!pf->max_hw_ring_grps)
+                       pf->max_hw_ring_grps = pf->max_tx_rings;
+               pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+               pf->max_vnics = le16_to_cpu(resp->max_vnics);
+               pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+               pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+               pf->max_vfs = le16_to_cpu(resp->max_vfs);
+               pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+               pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+               pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+               pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+               pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+               pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+               if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
+                       bp->flags |= BNXT_FLAG_WOL_CAP;
+               if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+                       __bnxt_hwrm_ptp_qcfg(bp);
+       } else {
+#ifdef CONFIG_BNXT_SRIOV
+               struct bnxt_vf_info *vf = &bp->vf;
+
+               vf->fw_fid = le16_to_cpu(resp->fid);
+
+               vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+               vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+               vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+               vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+               vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+               if (!vf->max_hw_ring_grps)
+                       vf->max_hw_ring_grps = vf->max_tx_rings;
+               vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+               vf->max_vnics = le16_to_cpu(resp->max_vnics);
+               vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+               memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+               mutex_unlock(&bp->hwrm_cmd_lock);
+
+               if (is_valid_ether_addr(vf->mac_addr)) {
+                       /* overwrite netdev dev_adr with admin VF MAC */
+                       memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+               } else {
+                       eth_hw_addr_random(bp->dev);
+                       rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+               }
+               return rc;
+#endif
+       }
+
+hwrm_func_qcaps_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+       struct hwrm_func_reset_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+       req.enables = 0;
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_queue_qportcfg_input req = {0};
+       struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       u8 i, *qptr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto qportcfg_exit;
+
+       if (!resp->max_configurable_queues) {
+               rc = -EINVAL;
+               goto qportcfg_exit;
+       }
+       bp->max_tc = resp->max_configurable_queues;
+       bp->max_lltc = resp->max_configurable_lossless_queues;
+       if (bp->max_tc > BNXT_MAX_QUEUE)
+               bp->max_tc = BNXT_MAX_QUEUE;
+
+       if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+               bp->max_tc = 1;
+
+       if (bp->max_lltc > bp->max_tc)
+               bp->max_lltc = bp->max_tc;
+
+       qptr = &resp->queue_id0;
+       for (i = 0; i < bp->max_tc; i++) {
+               bp->q_info[i].queue_id = *qptr++;
+               bp->q_info[i].queue_profile = *qptr++;
+       }
+
+qportcfg_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+       int rc;
+       struct hwrm_ver_get_input req = {0};
+       struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+       req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+       req.hwrm_intf_min = HWRM_VERSION_MINOR;
+       req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto hwrm_ver_get_exit;
+
+       memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+       bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
+                            resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
+       if (resp->hwrm_intf_maj < 1) {
+               netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
+                           resp->hwrm_intf_maj, resp->hwrm_intf_min,
+                           resp->hwrm_intf_upd);
+               netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
+       }
+
+       snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
+                resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+                resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+       bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+       if (!bp->hwrm_cmd_timeout)
+               bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+
+       if (resp->hwrm_intf_maj >= 1)
+               bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
+       bp->chip_num = le16_to_cpu(resp->chip_num);
+       if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
+           !resp->chip_metal)
+               bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
+
+hwrm_ver_get_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+int bnxt_hwrm_fw_set_time(struct bnxt *bp)
+{
+#if defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE)
+       struct hwrm_fw_set_time_input req = {0};
+       struct rtc_time tm;
+       struct timeval tv;
+
+       if (bp->hwrm_spec_code < 0x10400)
+               return -EOPNOTSUPP;
+
+       do_gettimeofday(&tv);
+       rtc_time_to_tm(tv.tv_sec, &tm);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
+       req.year = cpu_to_le16(1900 + tm.tm_year);
+       req.month = 1 + tm.tm_mon;
+       req.day = tm.tm_mday;
+       req.hour = tm.tm_hour;
+       req.minute = tm.tm_min;
+       req.second = tm.tm_sec;
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
+static int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+       int rc;
+       struct bnxt_pf_info *pf = &bp->pf;
+       struct hwrm_port_qstats_input req = {0};
+
+       if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
+       req.port_id = cpu_to_le16(pf->port_id);
+       req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
+       req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+       if (bp->vxlan_port_cnt) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+       }
+       bp->vxlan_port_cnt = 0;
+       if (bp->nge_port_cnt) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+       }
+       bp->nge_port_cnt = 0;
+}
+
+/* TODO: remove this once min aggregate packet size workaround is removed */
+static int bnxt_dbg_hwrm_wr_reg(struct bnxt *, u32, u32);
+static int bnxt_dbg_hwrm_rd_reg(struct bnxt *, u32, u16, u32 *);
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+       int rc, i;
+       u32 tpa_flags = 0;
+
+       if (set_tpa)
+               tpa_flags = bp->flags & BNXT_FLAG_TPA;
+       for (i = 0; i < bp->nr_vnics; i++) {
+               rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+                                  i, rc);
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->nr_vnics; i++)
+               bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+                                   bool irq_re_init)
+{
+       if (bp->vnic_info) {
+               bnxt_hwrm_clear_vnic_filter(bp);
+               /* clear all RSS setting before free vnic ctx */
+               bnxt_hwrm_clear_vnic_rss(bp);
+               bnxt_hwrm_vnic_ctx_free(bp);
+               /* before free the vnic, undo the vnic tpa settings */
+               if (bp->flags & BNXT_FLAG_TPA)
+                       bnxt_set_tpa(bp, false);
+               bnxt_hwrm_vnic_free(bp);
+       }
+       bnxt_hwrm_ring_free(bp, close_path);
+       bnxt_hwrm_ring_grp_free(bp);
+       if (irq_re_init) {
+               bnxt_hwrm_stat_ctx_free(bp);
+               bnxt_hwrm_free_tunnel_ports(bp);
+       }
+}
+
+#ifdef HAVE_NDO_BRIDGE_GETLINK
+static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
+{
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
+       if (br_mode == BRIDGE_MODE_VEB)
+               req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
+       else if (br_mode == BRIDGE_MODE_VEPA)
+               req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
+       else
+               return -EINVAL;
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               rc = -EIO;
+       return rc;
+}
+#endif
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+       int rc;
+
+       if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
+               goto skip_rss_ctx;
+
+       /* allocate context for vnic */
+       rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+                          vnic_id, rc);
+               goto vnic_setup_err;
+       }
+       bp->rsscos_nr_ctxs++;
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+               rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
+                                  vnic_id, rc);
+                       goto vnic_setup_err;
+               }
+               bp->rsscos_nr_ctxs++;
+       }
+
+skip_rss_ctx:
+       /* configure default vnic, ring grp */
+       rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+                          vnic_id, rc);
+               goto vnic_setup_err;
+       }
+
+       /* Enable RSS hashing on vnic */
+       rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+                          vnic_id, rc);
+               goto vnic_setup_err;
+       }
+
+       if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+               rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+                                  vnic_id, rc);
+               }
+       }
+
+vnic_setup_err:
+       return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int i, rc = 0;
+
+       for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_vnic_info *vnic;
+               u16 vnic_id = i + 1;
+               u16 ring_id = i;
+
+               if (vnic_id >= bp->nr_vnics)
+                       break;
+
+               vnic = &bp->vnic_info[vnic_id];
+               vnic->flags |= BNXT_VNIC_RFS_FLAG;
+               if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+                       vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
+               rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+                                  vnic_id, rc);
+                       break;
+               }
+               rc = bnxt_setup_vnic(bp, vnic_id);
+               if (rc)
+                       break;
+       }
+       return rc;
+#else
+       return 0;
+#endif
+}
+
+/* Allow PF and VF with default VLAN to be in promiscuous mode */
+static bool bnxt_promisc_ok(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+       if (BNXT_VF(bp) && !bp->vf.vlan)
+               return false;
+#endif
+       return true;
+}
+
+static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
+{
+       unsigned int rc = 0;
+
+       rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+       if (rc) {
+               netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
+                          rc);
+               return rc;
+       }
+
+       rc = bnxt_hwrm_vnic_cfg(bp, 1);
+       if (rc) {
+               netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
+                          rc);
+               return rc;
+       }
+       return rc;
+}
+
+static int bnxt_cfg_rx_mode(struct bnxt *);
+static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       int rc = 0;
+       unsigned int rx_nr_rings = bp->rx_nr_rings;
+
+       if (irq_re_init) {
+               rc = bnxt_hwrm_stat_ctx_alloc(bp);
+               if (rc) {
+                       netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+                                  rc);
+                       goto err_out;
+               }
+       }
+
+       rc = bnxt_hwrm_ring_alloc(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+               goto err_out;
+       }
+
+       rc = bnxt_hwrm_ring_grp_alloc(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+               goto err_out;
+       }
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+               rx_nr_rings--;
+
+       /* default vnic 0 */
+       rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+               goto err_out;
+       }
+
+       rc = bnxt_setup_vnic(bp, 0);
+       if (rc)
+               goto err_out;
+
+       if (bp->flags & BNXT_FLAG_RFS) {
+               rc = bnxt_alloc_rfs_vnics(bp);
+               if (rc)
+                       goto err_out;
+       }
+
+       if (bp->flags & BNXT_FLAG_TPA) {
+               rc = bnxt_set_tpa(bp, true);
+               if (rc)
+                       goto err_out;
+       }
+
+       if (BNXT_VF(bp))
+               bnxt_update_vf_mac(bp);
+
+       /* Filter for default vnic 0 */
+       rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+       if (rc) {
+               netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+               goto err_out;
+       }
+       vnic->uc_filter_count = 1;
+
+       vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+       if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+       if (bp->dev->flags & IFF_ALLMULTI) {
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+               vnic->mc_list_count = 0;
+       } else {
+               u32 mask = 0;
+
+               bnxt_mc_list_updated(bp, &mask);
+               vnic->rx_mask |= mask;
+       }
+
+       rc = bnxt_cfg_rx_mode(bp);
+       if (rc)
+               goto err_out;
+
+       rc = bnxt_hwrm_set_coal(bp);
+       if (rc)
+               netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+                               rc);
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+               rc = bnxt_setup_nitroa0_vnic(bp);
+               if (rc)
+                       netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
+                                  rc);
+       }
+
+       if (BNXT_VF(bp)) {
+               bnxt_hwrm_func_qcfg(bp);
+               netdev_update_features(bp->dev);
+       }
+
+       return 0;
+
+err_out:
+       bnxt_hwrm_resource_free(bp, 0, true);
+
+       return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+       bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+       return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+       bnxt_init_cp_rings(bp);
+       bnxt_init_rx_rings(bp);
+       bnxt_init_tx_rings(bp);
+       bnxt_init_ring_grps(bp, irq_re_init);
+       bnxt_init_vnics(bp);
+
+       return bnxt_init_chip(bp, irq_re_init);
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+       int rc;
+       struct net_device *dev = bp->dev;
+
+#ifdef VOID_NETIF_SET_NUM_TX
+       netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+                                    bp->tx_nr_rings_xdp);
+#else
+       rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+                                         bp->tx_nr_rings_xdp);
+       if (rc)
+               return rc;
+#endif
+       rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+       if (rc)
+               return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+       if (bp->flags & BNXT_FLAG_RFS)
+               dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+#endif
+
+       return rc;
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+                          bool shared)
+{
+       int _rx = *rx, _tx = *tx;
+
+       if (shared) {
+               *rx = min_t(int, _rx, max);
+               *tx = min_t(int, _tx, max);
+       } else {
+               if (max < 2)
+                       return -ENOMEM;
+
+               while (_rx + _tx > max) {
+                       if (_rx > _tx && _rx > 1)
+                               _rx--;
+                       else if (_tx > 1)
+                               _tx--;
+               }
+               *rx = _rx;
+               *tx = _tx;
+       }
+       return 0;
+}
+
+static void bnxt_setup_msix(struct bnxt *bp)
+{
+       const int len = sizeof(bp->irq_tbl[0].name);
+       struct net_device *dev = bp->dev;
+       int tcs, i;
+
+       tcs = netdev_get_num_tc(dev);
+       if (tcs > 1) {
+               int i, off, count;
+
+               for (i = 0; i < tcs; i++) {
+                       count = bp->tx_nr_rings_per_tc;
+                       off = i * count;
+                       netdev_set_tc_queue(dev, i, count, off);
+               }
+       }
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               char *attr;
+
+               if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+                       attr = "TxRx";
+               else if (i < bp->rx_nr_rings)
+                       attr = "rx";
+               else
+                       attr = "tx";
+
+               snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
+                        i);
+               bp->irq_tbl[i].handler = bnxt_msix;
+       }
+}
+
+static void bnxt_setup_inta(struct bnxt *bp)
+{
+       const int len = sizeof(bp->irq_tbl[0].name);
+
+       if (netdev_get_num_tc(bp->dev))
+               netdev_reset_tc(bp->dev);
+
+       snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
+                0);
+       bp->irq_tbl[0].handler = bnxt_inta;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+       int rc;
+
+       if (bp->flags & BNXT_FLAG_USING_MSIX)
+               bnxt_setup_msix(bp);
+       else
+               bnxt_setup_inta(bp);
+
+       rc = bnxt_set_real_num_queues(bp);
+       return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return bp->vf.max_rsscos_ctxs;
+#endif
+       return bp->pf.max_rsscos_ctxs;
+}
+
+static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return bp->vf.max_vnics;
+#endif
+       return bp->pf.max_vnics;
+}
+#endif
+
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return bp->vf.max_stat_ctxs;
+#endif
+       return bp->pf.max_stat_ctxs;
+}
+
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               bp->vf.max_stat_ctxs = max;
+       else
+#endif
+               bp->pf.max_stat_ctxs = max;
+}
+
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return bp->vf.max_cp_rings;
+#endif
+       return bp->pf.max_cp_rings;
+}
+
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               bp->vf.max_cp_rings = max;
+       else
+#endif
+               bp->pf.max_cp_rings = max;
+}
+
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return min_t(unsigned int, bp->vf.max_irqs,
+                            bp->vf.max_cp_rings);
+#endif
+       return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+}
+
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               bp->vf.max_irqs = max_irqs;
+       else
+#endif
+               bp->pf.max_irqs = max_irqs;
+}
+
+static int bnxt_init_msix(struct bnxt *bp)
+{
+       int i, total_vecs, rc = 0, min = 1;
+       struct msix_entry *msix_ent;
+
+       total_vecs = bnxt_get_max_func_irqs(bp);
+       msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+       if (!msix_ent)
+               return -ENOMEM;
+
+       for (i = 0; i < total_vecs; i++) {
+               msix_ent[i].entry = i;
+               msix_ent[i].vector = 0;
+       }
+
+       if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+               min = 2;
+
+#ifdef HAVE_MSIX_RANGE
+       total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+#else
+       while (total_vecs >= min) {
+               rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
+               if (!rc)
+                       break;
+               total_vecs = rc;
+       }
+#endif
+       if (total_vecs < 0) {
+               rc = -ENODEV;
+               goto msix_setup_exit;
+       }
+
+       bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+       if (bp->irq_tbl) {
+               for (i = 0; i < total_vecs; i++)
+                       bp->irq_tbl[i].vector = msix_ent[i].vector;
+
+               bp->total_irqs = total_vecs;
+               /* Trim rings based upon num of vectors allocated */
+               rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
+                                    total_vecs, min == 1);
+               if (rc)
+                       goto msix_setup_exit;
+
+               bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+               bp->cp_nr_rings = (min == 1) ?
+                                 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+                                 bp->tx_nr_rings + bp->rx_nr_rings;
+
+       } else {
+               rc = -ENOMEM;
+               goto msix_setup_exit;
+       }
+       bp->flags |= BNXT_FLAG_USING_MSIX;
+       kfree(msix_ent);
+       return 0;
+
+msix_setup_exit:
+       netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+       kfree(bp->irq_tbl);
+       bp->irq_tbl = NULL;
+       pci_disable_msix(bp->pdev);
+       kfree(msix_ent);
+       return rc;
+}
+
+static int bnxt_init_inta(struct bnxt *bp)
+{
+       bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+       if (!bp->irq_tbl)
+               return -ENOMEM;
+
+       bp->total_irqs = 1;
+       bp->rx_nr_rings = 1;
+       bp->tx_nr_rings = 1;
+       bp->cp_nr_rings = 1;
+       bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+       bp->flags |= BNXT_FLAG_SHARED_RINGS;
+       bp->irq_tbl[0].vector = bp->pdev->irq;
+       return 0;
+}
+
+static int bnxt_init_int_mode(struct bnxt *bp)
+{
+       int rc = 0;
+
+       if (bp->flags & BNXT_FLAG_MSIX_CAP)
+               rc = bnxt_init_msix(bp);
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
+               /* fallback to INTA */
+               rc = bnxt_init_inta(bp);
+       }
+       return rc;
+}
+
+static void bnxt_clear_int_mode(struct bnxt *bp)
+{
+       if (bp->flags & BNXT_FLAG_USING_MSIX)
+               pci_disable_msix(bp->pdev);
+
+       kfree(bp->irq_tbl);
+       bp->irq_tbl = NULL;
+       bp->flags &= ~BNXT_FLAG_USING_MSIX;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+       struct bnxt_irq *irq;
+       int i;
+
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+       bp->dev->rx_cpu_rmap = NULL;
+#endif
+       if (!bp->irq_tbl)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               irq = &bp->irq_tbl[i];
+               if (irq->requested)
+                       free_irq(irq->vector, bp->bnapi[i]);
+               irq->requested = 0;
+       }
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+       int i, j, rc = 0;
+       unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+       struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+               flags = IRQF_SHARED;
+
+       for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+               if (rmap && bp->bnapi[i]->rx_ring) {
+                       rc = irq_cpu_rmap_add(rmap, irq->vector);
+                       if (rc)
+                               netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+                                           j);
+                       j++;
+               }
+#endif
+               rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+                                bp->bnapi[i]);
+               if (rc)
+                       break;
+
+               irq->requested = 1;
+       }
+       return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+
+               napi_hash_del(&bnapi->napi);
+               netif_napi_del(&bnapi->napi);
+       }
+       /* We called napi_hash_del() before netif_napi_del(), we need
+        * to respect an RCU grace period before freeing napi structures.
+        */
+       synchronize_net();
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+       int i;
+       unsigned int cp_nr_rings = bp->cp_nr_rings;
+       struct bnxt_napi *bnapi;
+
+       if (bp->flags & BNXT_FLAG_USING_MSIX) {
+               if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+                       cp_nr_rings--;
+               for (i = 0; i < cp_nr_rings; i++) {
+                       bnapi = bp->bnapi[i];
+                       netif_napi_add(bp->dev, &bnapi->napi,
+                                      bnxt_poll, 64);
+                       napi_hash_add(&bnapi->napi);
+               }
+               if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+                       bnapi = bp->bnapi[cp_nr_rings];
+                       netif_napi_add(bp->dev, &bnapi->napi,
+                                      bnxt_poll_nitroa0, 64);
+                       napi_hash_add(&bnapi->napi);
+               }
+       } else {
+               bnapi = bp->bnapi[0];
+               netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+               napi_hash_add(&bnapi->napi);
+       }
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               napi_disable(&bp->bnapi[i]->napi);
+               bnxt_disable_poll(bp->bnapi[i]);
+       }
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               if (bnapi->in_reset)
+                       cpr->sw_stats.rx_resets++;
+               bnapi->in_reset = false;
+               bnxt_enable_poll(bnapi);
+               napi_enable(&bnapi->napi);
+       }
+}
+
+void bnxt_tx_disable(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_tx_ring_info *txr;
+       struct netdev_queue *txq;
+
+       if (bp->tx_ring) {
+               for (i = 0; i < bp->tx_nr_rings; i++) {
+                       txr = &bp->tx_ring[i];
+                       txq = netdev_get_tx_queue(bp->dev, i);
+                       txr->dev_state = BNXT_DEV_STATE_CLOSING;
+               }
+       }
+       /* Stop all TX queues */
+       netif_tx_disable(bp->dev);
+       netif_carrier_off(bp->dev);
+}
+
+void bnxt_tx_enable(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_tx_ring_info *txr;
+       struct netdev_queue *txq;
+
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               txr = &bp->tx_ring[i];
+               txq = netdev_get_tx_queue(bp->dev, i);
+               txr->dev_state = 0;
+       }
+       netif_tx_wake_all_queues(bp->dev);
+       if (bp->link_info.link_up)
+               netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+       if (bp->link_info.link_up) {
+               const char *duplex;
+               const char *flow_ctrl;
+               u16 speed, fec;
+
+               netif_carrier_on(bp->dev);
+               if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+                       duplex = "full";
+               else
+                       duplex = "half";
+               if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+                       flow_ctrl = "ON - receive & transmit";
+               else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+                       flow_ctrl = "ON - transmit";
+               else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+                       flow_ctrl = "ON - receive";
+               else
+                       flow_ctrl = "none";
+               speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+               netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+                           speed, duplex, flow_ctrl);
+               if (bp->flags & BNXT_FLAG_EEE_CAP)
+                       netdev_info(bp->dev, "EEE is %s\n",
+                                   bp->eee.eee_active ? "active" :
+                                                        "not active");
+               fec = bp->link_info.fec_cfg;
+               if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
+                       netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+                                   (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
+                                   (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
+                                    (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
+       } else {
+               netif_carrier_off(bp->dev);
+               netdev_err(bp->dev, "NIC Link is Down\n");
+       }
+}
+
+static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
+{
+       int rc = 0;
+       struct hwrm_port_phy_qcaps_input req = {0};
+       struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (bp->hwrm_spec_code < 0x10201)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto hwrm_phy_qcaps_exit;
+
+       if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
+               struct ethtool_eee *eee = &bp->eee;
+               u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
+
+               bp->flags |= BNXT_FLAG_EEE_CAP;
+               eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+               bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
+                                PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
+               bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
+                                PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
+       }
+       if (resp->supported_speeds_auto_mode)
+               link_info->support_auto_speeds =
+                       le16_to_cpu(resp->supported_speeds_auto_mode);
+
+hwrm_phy_qcaps_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+       int rc = 0;
+       struct bnxt_link_info *link_info = &bp->link_info;
+       struct hwrm_port_phy_qcfg_input req = {0};
+       struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       u8 link_up = link_info->link_up;
+       u16 diff;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               return rc;
+       }
+
+       memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+       link_info->phy_link_status = resp->link;
+       link_info->duplex = resp->duplex_cfg;
+       if (bp->hwrm_spec_code >= 0x10800)
+               link_info->duplex = resp->duplex_state;
+       link_info->pause = resp->pause;
+       link_info->auto_mode = resp->auto_mode;
+       link_info->auto_pause_setting = resp->auto_pause;
+       link_info->lp_pause = resp->link_partner_adv_pause;
+       link_info->force_pause_setting = resp->force_pause;
+       link_info->duplex_setting = resp->duplex_cfg;
+       if (link_info->phy_link_status == BNXT_LINK_LINK)
+               link_info->link_speed = le16_to_cpu(resp->link_speed);
+       else
+               link_info->link_speed = 0;
+       link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+       link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+       link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+       link_info->lp_auto_link_speeds =
+               le16_to_cpu(resp->link_partner_adv_speeds);
+       link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+       link_info->phy_ver[0] = resp->phy_maj;
+       link_info->phy_ver[1] = resp->phy_min;
+       link_info->phy_ver[2] = resp->phy_bld;
+       link_info->media_type = resp->media_type;
+       link_info->phy_type = resp->phy_type;
+       link_info->transceiver = resp->xcvr_pkg_type;
+       link_info->phy_addr = resp->eee_config_phy_addr &
+                             PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
+       link_info->module_status = resp->module_status;
+
+       if (bp->flags & BNXT_FLAG_EEE_CAP) {
+               struct ethtool_eee *eee = &bp->eee;
+               u16 fw_speeds;
+
+               eee->eee_active = 0;
+               if (resp->eee_config_phy_addr &
+                   PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
+                       eee->eee_active = 1;
+                       fw_speeds = le16_to_cpu(
+                               resp->link_partner_adv_eee_link_speed_mask);
+                       eee->lp_advertised =
+                               _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+               }
+
+               /* Pull initial EEE config */
+               if (!chng_link_state) {
+                       if (resp->eee_config_phy_addr &
+                           PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
+                               eee->eee_enabled = 1;
+
+                       fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
+                       eee->advertised =
+                               _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+
+                       if (resp->eee_config_phy_addr &
+                           PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
+                               __le32 tmr;
+
+                               eee->tx_lpi_enabled = 1;
+                               tmr = resp->xcvr_identifier_type_tx_lpi_timer;
+                               eee->tx_lpi_timer = le32_to_cpu(tmr) &
+                                       PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
+                       }
+               }
+       }
+
+       link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
+       if (bp->hwrm_spec_code >= 0x10504)
+               link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
+
+       /* TODO: need to add more logic to report VF link */
+       if (chng_link_state) {
+               if (link_info->phy_link_status == BNXT_LINK_LINK)
+                       link_info->link_up = 1;
+               else
+                       link_info->link_up = 0;
+               if (link_up != link_info->link_up)
+                       bnxt_report_link(bp);
+       } else {
+               /* alwasy link down if not require to update link state */
+               link_info->link_up = 0;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       diff = link_info->support_auto_speeds ^ link_info->advertising;
+       if ((link_info->support_auto_speeds | diff) !=
+           link_info->support_auto_speeds) {
+               /* An advertised speed is no longer supported, so we need to
+                * update the advertisement settings.  Caller holds RTNL
+                * so we can modify link settings.
+                */
+               link_info->advertising = link_info->support_auto_speeds;
+               if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+                       bnxt_hwrm_set_link_setting(bp, true, false);
+       }
+       return 0;
+}
+
+static void bnxt_get_port_module_status(struct bnxt *bp)
+{
+       struct bnxt_link_info *link_info = &bp->link_info;
+       struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
+       u8 module_status;
+
+       if (bnxt_update_link(bp, true))
+               return;
+
+       module_status = link_info->module_status;
+       switch (module_status) {
+       case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
+       case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+       case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
+               netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
+                           bp->pf.port_id);
+               if (bp->hwrm_spec_code >= 0x10201) {
+                       netdev_warn(bp->dev, "Module part number %s\n",
+                                   resp->phy_vendor_partnumber);
+               }
+               if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
+                       netdev_warn(bp->dev, "TX is disabled\n");
+               if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
+                       netdev_warn(bp->dev, "SFP+ module is shutdown\n");
+       }
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+       if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+               if (bp->hwrm_spec_code >= 0x10201)
+                       req->auto_pause =
+                               PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+                       req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+                       req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
+               req->enables |=
+                       cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+       } else {
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+                       req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+               if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+                       req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+               req->enables |=
+                       cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+               if (bp->hwrm_spec_code >= 0x10201) {
+                       req->auto_pause = req->force_pause;
+                       req->enables |= cpu_to_le32(
+                               PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+               }
+       }
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+                                     struct hwrm_port_phy_cfg_input *req)
+{
+       u8 autoneg = bp->link_info.autoneg;
+       u16 fw_link_speed = bp->link_info.req_link_speed;
+       u16 advertising = bp->link_info.advertising;
+
+       if (autoneg & BNXT_AUTONEG_SPEED) {
+               req->auto_mode |=
+                       PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
+
+               req->enables |= cpu_to_le32(
+                       PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+               req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+               req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+               req->flags |=
+                       cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+       } else {
+               req->force_link_speed = cpu_to_le16(fw_link_speed);
+               req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+       }
+
+       /* tell chimp that the setting takes effect immediately */
+       req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+       bnxt_hwrm_set_pause_common(bp, &req);
+
+       if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+           bp->link_info.force_link_chng)
+               bnxt_hwrm_set_link_common(bp, &req);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+               /* since changing of pause setting doesn't trigger any link
+                * change event, the driver needs to update the current pause
+                * result upon successfully return of the phy_cfg command
+                */
+               bp->link_info.pause =
+               bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+               bp->link_info.auto_pause_setting = 0;
+               if (!bp->link_info.force_link_chng)
+                       bnxt_report_link(bp);
+       }
+       bp->link_info.force_link_chng = false;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_hwrm_set_eee(struct bnxt *bp,
+                             struct hwrm_port_phy_cfg_input *req)
+{
+       struct ethtool_eee *eee = &bp->eee;
+
+       if (eee->eee_enabled) {
+               u16 eee_speeds;
+               u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
+
+               if (eee->tx_lpi_enabled)
+                       flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
+               else
+                       flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
+
+               req->flags |= cpu_to_le32(flags);
+               eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
+               req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
+               req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
+       } else {
+               req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
+       }
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+       if (set_pause)
+               bnxt_hwrm_set_pause_common(bp, &req);
+
+       bnxt_hwrm_set_link_common(bp, &req);
+
+       if (set_eee)
+               bnxt_hwrm_set_eee(bp, &req);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+
+       if (!BNXT_SINGLE_PF(bp))
+               return 0;
+
+       if (pci_num_vf(bp->pdev))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+       req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+       struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_led_qcaps_input req = {0};
+       struct bnxt_pf_info *pf = &bp->pf;
+       int rc;
+
+       if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
+       req.port_id = cpu_to_le16(pf->port_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               return rc;
+       }
+       if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+               int i;
+
+               bp->num_leds = resp->num_leds;
+               memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
+                                                bp->num_leds);
+               for (i = 0; i < bp->num_leds; i++) {
+                       struct bnxt_led_info *led = &bp->leds[i];
+                       __le16 caps = led->led_state_caps;
+
+                       if (!led->led_group_id ||
+                           !BNXT_LED_ALT_BLINK_CAP(caps)) {
+                               bp->num_leds = 0;
+                               break;
+                       }
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return 0;
+}
+
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
+{
+       struct hwrm_wol_filter_alloc_input req = {0};
+       struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+       req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+       memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               bp->wol_filter_id = resp->wol_filter_id;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
+{
+       struct hwrm_wol_filter_free_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+       req.wol_filter_id = bp->wol_filter_id;
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       return rc;
+}
+
+static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
+{
+       struct hwrm_wol_filter_qcfg_input req = {0};
+       struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 next_handle = 0;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       req.handle = cpu_to_le16(handle);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               next_handle = le16_to_cpu(resp->next_handle);
+               if (next_handle != 0) {
+                       if (resp->wol_type ==
+                           WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
+                               bp->wol = 1;
+                               bp->wol_filter_id = resp->wol_filter_id;
+                       }
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return next_handle;
+}
+
+static void bnxt_get_wol_settings(struct bnxt *bp)
+{
+       u16 handle = 0;
+
+       if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
+               return;
+
+       do {
+               handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
+       } while (handle && handle != 0xffff);
+}
+
+static bool bnxt_eee_config_ok(struct bnxt *bp)
+{
+       struct ethtool_eee *eee = &bp->eee;
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+               return true;
+
+       if (eee->eee_enabled) {
+               u32 advertising =
+                       _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+
+               if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+                       eee->eee_enabled = 0;
+                       return false;
+               }
+               if (eee->advertised & ~advertising) {
+                       eee->advertised = advertising & eee->supported;
+                       return false;
+               }
+       }
+       return true;
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+       int rc;
+       bool update_link = false;
+       bool update_pause = false;
+       bool update_eee = false;
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       rc = bnxt_update_link(bp, true);
+       if (rc) {
+               netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+       if (!BNXT_SINGLE_PF(bp))
+               return 0;
+
+       if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+           (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
+           link_info->req_flow_ctrl)
+               update_pause = true;
+       if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+           link_info->force_pause_setting != link_info->req_flow_ctrl)
+               update_pause = true;
+       if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+               if (BNXT_AUTO_MODE(link_info->auto_mode))
+                       update_link = true;
+               if (link_info->req_link_speed != link_info->force_link_speed)
+                       update_link = true;
+               if (link_info->req_duplex != link_info->duplex_setting)
+                       update_link = true;
+       } else {
+               if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+                       update_link = true;
+               if (link_info->advertising != link_info->auto_link_speeds)
+                       update_link = true;
+       }
+
+       /* The last close may have shutdown the link, so need to call
+        * PHY_CFG to bring it back up.
+        */
+       if (!netif_carrier_ok(bp->dev))
+               update_link = true;
+
+       if (!bnxt_eee_config_ok(bp))
+               update_eee = true;
+
+       if (update_link)
+               rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
+       else if (update_pause)
+               rc = bnxt_hwrm_set_pause(bp);
+       if (rc) {
+               netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+
+       return rc;
+}
+
+/* Common routine to pre-map certain register block to different GRC window.
+ * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
+ * in PF and 3 windows in VF that can be customized to map in different
+ * register blocks.
+ */
+static void bnxt_preset_reg_win(struct bnxt *bp)
+{
+       if (BNXT_PF(bp)) {
+               /* CAG registers map to GRC window #4 */
+               writel(BNXT_CAG_REG_BASE,
+                      bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
+       }
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+       int rc = 0;
+
+       bnxt_preset_reg_win(bp);
+       netif_carrier_off(bp->dev);
+       if (irq_re_init) {
+               rc = bnxt_setup_int_mode(bp);
+               if (rc) {
+                       netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+                                  rc);
+                       return rc;
+               }
+       }
+       if ((bp->flags & BNXT_FLAG_RFS) &&
+           !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+               /* disable RFS if falling back to INTA */
+               bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+               bp->flags &= ~BNXT_FLAG_RFS;
+       }
+
+       rc = bnxt_alloc_mem(bp, irq_re_init);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+               goto open_err_free_mem;
+       }
+
+       if (irq_re_init) {
+               bnxt_init_napi(bp);
+               rc = bnxt_request_irq(bp);
+               if (rc) {
+                       netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+                       goto open_err;
+               }
+       }
+
+       bnxt_enable_napi(bp);
+
+       rc = bnxt_init_nic(bp, irq_re_init);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+               goto open_err;
+       }
+
+       if (link_re_init) {
+               rc = bnxt_update_phy_setting(bp);
+               if (rc)
+                       netdev_warn(bp->dev, "failed to update phy settings\n");
+       }
+
+       if (irq_re_init) {
+#if defined(HAVE_NDO_UDP_TUNNEL)
+               udp_tunnel_get_rx_info(bp->dev);
+#elif defined(HAVE_NDO_ADD_VXLAN)
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+               vxlan_get_rx_port(bp->dev);
+#endif
+               if (!bnxt_hwrm_tunnel_dst_port_alloc(
+                               bp, htons(0x17c1),
+                               TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+                       bp->nge_port_cnt = 1;
+#endif
+       }
+
+       set_bit(BNXT_STATE_OPEN, &bp->state);
+       bnxt_enable_int(bp);
+       /* Enable TX queues */
+       bnxt_tx_enable(bp);
+       mod_timer(&bp->timer, jiffies + bp->current_interval);
+       /* Poll link status and check for SFP+ module status */
+       bnxt_get_port_module_status(bp);
+
+       return 0;
+
+open_err:
+       bnxt_disable_napi(bp);
+       bnxt_del_napi(bp);
+
+open_err_free_mem:
+       bnxt_free_skbs(bp);
+       bnxt_free_irq(bp);
+       bnxt_free_mem(bp, true);
+       return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+       int rc = 0;
+
+       rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+       if (rc) {
+               netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+               dev_close(bp->dev);
+       }
+       return rc;
+}
+
+/* rtnl_lock held, open the NIC half way by allocating all resources, but
+ * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
+ * self tests.
+ */
+int bnxt_half_open_nic(struct bnxt *bp)
+{
+       int rc = 0;
+
+       rc = bnxt_alloc_mem(bp, false);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+               goto half_open_err;
+       }
+       rc = bnxt_init_nic(bp, false);
+       if (rc) {
+               netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+               goto half_open_err;
+       }
+       return 0;
+
+half_open_err:
+       bnxt_free_skbs(bp);
+       bnxt_free_mem(bp, false);
+       dev_close(bp->dev);
+       return rc;
+}
+
+/* rtnl_lock held, this call can only be made after a previous successful
+ * call to bnxt_half_open_nic().
+ */
+void bnxt_half_close_nic(struct bnxt *bp)
+{
+       bnxt_hwrm_resource_free(bp, false, false);
+       bnxt_free_skbs(bp);
+       bnxt_free_mem(bp, false);
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+#ifndef PCIE_SRIOV_CONFIGURE
+       int rc;
+
+       rc = __bnxt_open_nic(bp, true, true);
+       if (!rc)
+               bnxt_start_sriov(bp, num_vfs);
+       return rc;
+#else
+
+       return __bnxt_open_nic(bp, true, true);
+#endif
+}
+
+static bool bnxt_drv_busy(struct bnxt *bp)
+{
+       return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
+               test_bit(BNXT_STATE_READ_STATS, &bp->state));
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+       int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+       if (bp->sriov_cfg) {
+               rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+                                                     !bp->sriov_cfg,
+                                                     BNXT_SRIOV_CFG_WAIT_TMO);
+               if (rc)
+                       netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+       }
+#endif
+       /* Change device state to avoid TX queue wake up's */
+       bnxt_tx_disable(bp);
+
+       clear_bit(BNXT_STATE_OPEN, &bp->state);
+       smp_mb__after_atomic();
+       while (bnxt_drv_busy(bp))
+               msleep(20);
+
+       /* Flush rings and and disable interrupts */
+       bnxt_shutdown_nic(bp, irq_re_init);
+
+       /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+       bnxt_disable_napi(bp);
+       del_timer_sync(&bp->timer);
+       bnxt_free_skbs(bp);
+
+       if (irq_re_init) {
+               bnxt_free_irq(bp);
+               bnxt_del_napi(bp);
+       }
+       bnxt_free_mem(bp, irq_re_init);
+       return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       bnxt_close_nic(bp, true, true);
+       bnxt_hwrm_shutdown_link(bp);
+       return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       switch (cmd) {
+       case SIOCGMIIPHY:
+               /* fallthru */
+       case SIOCGMIIREG: {
+               if (!netif_running(dev))
+                       return -EAGAIN;
+
+               return 0;
+       }
+
+       case SIOCSMIIREG:
+               if (!netif_running(dev))
+                       return -EAGAIN;
+
+               return 0;
+
+#ifdef HAVE_IEEE1588_SUPPORT
+       case SIOCSHWTSTAMP:
+               return bnxt_hwtstamp_set(dev, ifr);
+
+       case SIOCGHWTSTAMP:
+               return bnxt_hwtstamp_get(dev, ifr);
+#endif
+
+       default:
+               /* do nothing */
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+#ifdef NETDEV_GET_STATS64
+#ifdef NETDEV_GET_STATS64_VOID
+static void
+#else
+static struct rtnl_link_stats64 *
+#endif
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       u32 i;
+       struct bnxt *bp = netdev_priv(dev);
+
+       set_bit(BNXT_STATE_READ_STATS, &bp->state);
+       /* Make sure bnxt_close_nic() sees that we are reading stats before
+        * we check the BNXT_STATE_OPEN flag.
+        */
+       smp_mb__after_atomic();
+       if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+               clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+#ifdef NETDEV_GET_STATS64_VOID
+               return;
+#else
+               return stats;
+#endif
+       }
+
+       /* TODO check if we need to synchronize with bnxt_close path */
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+               stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+               stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+               stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+               stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+               stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+               stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+               stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+               stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+               stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+               stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+               stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+               stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+               stats->rx_missed_errors +=
+                       le64_to_cpu(hw_stats->rx_discard_pkts);
+
+               stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+               stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+       }
+
+       if (bp->flags & BNXT_FLAG_PORT_STATS) {
+               struct rx_port_stats *rx = bp->hw_rx_port_stats;
+               struct tx_port_stats *tx = bp->hw_tx_port_stats;
+
+               stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
+               stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
+               stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
+                                         le64_to_cpu(rx->rx_ovrsz_frames) +
+                                         le64_to_cpu(rx->rx_runt_frames);
+               stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
+                                  le64_to_cpu(rx->rx_jbr_frames);
+               stats->collisions = le64_to_cpu(tx->tx_total_collisions);
+               stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
+               stats->tx_errors = le64_to_cpu(tx->tx_err);
+       }
+       clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+#ifndef NETDEV_GET_STATS64_VOID
+       return stats;
+#endif
+}
+#else
+static struct net_device_stats *bnxt_get_stats(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       int i;
+
+       set_bit(BNXT_STATE_READ_STATS, &bp->state);
+       smp_mb__after_atomic();
+       if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+               clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+               return stats;
+       }
+
+       memset(stats, 0, sizeof(struct net_device_stats));
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+               stats->rx_packets += GET_NET_STATS(hw_stats->rx_ucast_pkts);
+               stats->rx_packets += GET_NET_STATS(hw_stats->rx_mcast_pkts);
+               stats->rx_packets += GET_NET_STATS(hw_stats->rx_bcast_pkts);
+
+               stats->tx_packets += GET_NET_STATS(hw_stats->tx_ucast_pkts);
+               stats->tx_packets += GET_NET_STATS(hw_stats->tx_mcast_pkts);
+               stats->tx_packets += GET_NET_STATS(hw_stats->tx_bcast_pkts);
+
+               stats->rx_bytes += GET_NET_STATS(hw_stats->rx_ucast_bytes);
+               stats->rx_bytes += GET_NET_STATS(hw_stats->rx_mcast_bytes);
+               stats->rx_bytes += GET_NET_STATS(hw_stats->rx_bcast_bytes);
+
+               stats->tx_bytes += GET_NET_STATS(hw_stats->tx_ucast_bytes);
+               stats->tx_bytes += GET_NET_STATS(hw_stats->tx_mcast_bytes);
+               stats->tx_bytes += GET_NET_STATS(hw_stats->tx_bcast_bytes);
+
+               stats->rx_missed_errors +=
+                       GET_NET_STATS(hw_stats->rx_discard_pkts);
+               stats->multicast += GET_NET_STATS(hw_stats->rx_mcast_pkts);
+               stats->tx_dropped += GET_NET_STATS(hw_stats->tx_drop_pkts);
+       }
+
+       if (bp->flags & BNXT_FLAG_PORT_STATS) {
+               struct rx_port_stats *rx = bp->hw_rx_port_stats;
+               struct tx_port_stats *tx = bp->hw_tx_port_stats;
+
+               stats->rx_crc_errors = GET_NET_STATS(rx->rx_fcs_err_frames);
+               stats->rx_frame_errors = GET_NET_STATS(rx->rx_align_err_frames);
+               stats->rx_length_errors = GET_NET_STATS(rx->rx_undrsz_frames) +
+                                         GET_NET_STATS(rx->rx_ovrsz_frames) +
+                                         GET_NET_STATS(rx->rx_runt_frames);
+               stats->rx_errors = GET_NET_STATS(rx->rx_false_carrier_frames) +
+                                  GET_NET_STATS(rx->rx_jbr_frames);
+               stats->collisions = GET_NET_STATS(tx->tx_total_collisions);
+               stats->tx_fifo_errors = GET_NET_STATS(tx->tx_fifo_underruns);
+               stats->tx_errors = GET_NET_STATS(tx->tx_err);
+       }
+
+       clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+       return &dev->stats;
+}
+#endif
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+#ifdef HAVE_DEV_ADDR_LIST
+       struct dev_addr_list *ha;
+#else
+       struct netdev_hw_addr *ha;
+#endif
+       u8 *haddr;
+       int mc_count = 0;
+       bool update = false;
+       int off = 0;
+
+       netdev_for_each_mc_addr(ha, dev) {
+               if (mc_count >= BNXT_MAX_MC_ADDRS) {
+                       *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+                       vnic->mc_list_count = 0;
+                       return false;
+               }
+#ifdef HAVE_DEV_ADDR_LIST
+               haddr = ha->da_addr;
+#else
+               haddr = ha->addr;
+#endif
+               if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+                       memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+                       update = true;
+               }
+               off += ETH_ALEN;
+               mc_count++;
+       }
+       if (mc_count)
+               *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+       if (mc_count != vnic->mc_list_count) {
+               vnic->mc_list_count = mc_count;
+               update = true;
+       }
+       return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       struct netdev_hw_addr *ha;
+       int off = 0;
+
+       if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+               return true;
+
+       netdev_for_each_uc_addr(ha, dev) {
+               if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+                       return true;
+
+               off += ETH_ALEN;
+       }
+       return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       u32 mask = vnic->rx_mask;
+       bool mc_update = false;
+       bool uc_update;
+
+       if (!netif_running(dev))
+               return;
+
+       mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+       if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+       uc_update = bnxt_uc_list_updated(bp);
+
+       if (dev->flags & IFF_ALLMULTI) {
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+               vnic->mc_list_count = 0;
+       } else {
+               mc_update = bnxt_mc_list_updated(bp, &mask);
+       }
+
+       if (mask != vnic->rx_mask || uc_update || mc_update) {
+               vnic->rx_mask = mask;
+
+               set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+}
+
+static int bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+       struct net_device *dev = bp->dev;
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       struct netdev_hw_addr *ha;
+       int i, off = 0, rc;
+       bool uc_update;
+
+       netif_addr_lock_bh(dev);
+       uc_update = bnxt_uc_list_updated(bp);
+       netif_addr_unlock_bh(dev);
+
+       if (!uc_update)
+               goto skip_uc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 1; i < vnic->uc_filter_count; i++) {
+               struct hwrm_cfa_l2_filter_free_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+                                      -1);
+
+               req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+
+       vnic->uc_filter_count = 1;
+
+       netif_addr_lock_bh(dev);
+       if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+       } else {
+               netdev_for_each_uc_addr(ha, dev) {
+                       memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+                       off += ETH_ALEN;
+                       vnic->uc_filter_count++;
+               }
+       }
+       netif_addr_unlock_bh(dev);
+
+       for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+               rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+               if (rc) {
+                       netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+                                  rc);
+                       vnic->uc_filter_count = i;
+                       return rc;
+               }
+       }
+
+skip_uc:
+       rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+       if (rc)
+               netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+                          rc);
+
+       return rc;
+}
+
+/* If the chip and firmware supports RFS */
+static bool bnxt_rfs_supported(struct bnxt *bp)
+{
+       if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+               return true;
+       if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+               return true;
+       return false;
+}
+
+/* If runtime conditions support RFS */
+static bool bnxt_rfs_capable(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+       int vnics, max_vnics, max_rss_ctxs;
+
+       if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
+               return false;
+
+       vnics = 1 + bp->rx_nr_rings;
+       max_vnics = bnxt_get_max_func_vnics(bp);
+       max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
+
+       /* RSS contexts not a limiting factor */
+       if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+               max_rss_ctxs = max_vnics;
+       if (vnics > max_vnics || vnics > max_rss_ctxs) {
+               netdev_warn(bp->dev,
+                           "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+                           min(max_rss_ctxs - 1, max_vnics - 1));
+               return false;
+       }
+
+       return true;
+#else
+       return false;
+#endif
+}
+
+#ifdef NETDEV_FEATURE_CONTROL
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+                                          netdev_features_t features)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+               features &= ~NETIF_F_NTUPLE;
+
+       /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+        * turned on or off together.
+        */
+       if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+           (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+                                     NETIF_F_HW_VLAN_STAG_RX);
+               else
+                       features |= NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_STAG_RX;
+       }
+#ifdef CONFIG_BNXT_SRIOV
+       if (BNXT_VF(bp)) {
+               if (bp->vf.vlan) {
+                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+                                     NETIF_F_HW_VLAN_STAG_RX);
+               }
+       }
+#endif
+       return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u32 flags = bp->flags;
+       u32 changes;
+       int rc = 0;
+       bool re_init = false;
+       bool update_tpa = false;
+
+       flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39)
+       if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+               flags |= BNXT_FLAG_GRO;
+#endif
+       if (features & NETIF_F_LRO)
+               flags |= BNXT_FLAG_LRO;
+
+       if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+               flags &= ~BNXT_FLAG_TPA;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               flags |= BNXT_FLAG_STRIP_VLAN;
+
+       if (features & NETIF_F_NTUPLE)
+               flags |= BNXT_FLAG_RFS;
+
+       changes = flags ^ bp->flags;
+       if (changes & BNXT_FLAG_TPA) {
+               update_tpa = true;
+               if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+                   (flags & BNXT_FLAG_TPA) == 0)
+                       re_init = true;
+       }
+
+       if (changes & ~BNXT_FLAG_TPA)
+               re_init = true;
+
+       if (flags != bp->flags) {
+               u32 old_flags = bp->flags;
+
+               bp->flags = flags;
+
+               if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+                       if (update_tpa)
+                               bnxt_set_ring_params(bp);
+                       return rc;
+               }
+
+               if (re_init) {
+                       bnxt_close_nic(bp, false, false);
+                       if (update_tpa)
+                               bnxt_set_ring_params(bp);
+
+                       return bnxt_open_nic(bp, false, false);
+               }
+               if (update_tpa) {
+                       rc = bnxt_set_tpa(bp,
+                                         (flags & BNXT_FLAG_TPA) ?
+                                         true : false);
+                       if (rc)
+                               bp->flags = old_flags;
+               }
+       }
+       return rc;
+}
+#endif
+
+#define CHIMP_REG_VIEW_ADDR     0xb1000000
+static int bnxt_dbg_hwrm_wr_reg(struct bnxt *bp, u32 reg_off, u32 reg_val)
+{
+       struct hwrm_dbg_write_direct_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_WRITE_DIRECT, -1, -1);
+       req.write_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+       /* TODO: support reg write to one register for now */
+       req.write_len32 = cpu_to_le32(1);
+       req.write_data[0] = cpu_to_le32(reg_val);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off,
+                               u16 num_words, u32 *reg_buf)
+{
+       int rc, i;
+       struct hwrm_dbg_read_direct_input req = {0};
+       struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
+       __le32 *dbg_reg_buf = (__le32 *)bp->hwrm_dbg_resp_addr;
+
+       if (!bp->hwrm_dbg_resp_addr)
+               return -ENOMEM;
+       num_words = min_t(u16, num_words, HWRM_DBG_REG_BUF_SIZE / 4);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
+       req.host_dest_addr = cpu_to_le64(bp->hwrm_dbg_resp_dma_addr);
+       req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
+       req.read_len32 = cpu_to_le32(num_words);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_dbg_rd_reg failed. rc:%d\n", rc);
+               goto dbg_rd_reg_exit;
+       }
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_dbg_rd_reg error %d\n",
+                          resp->error_code);
+               rc = -1;
+               goto dbg_rd_reg_exit;
+       }
+       for (i = 0; i < num_words; i++)
+               reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
+
+dbg_rd_reg_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_dbg_dump_hw_ring(struct bnxt *bp, u32 index)
+{
+       u32 val[12] = {0xDEADDEAD};
+       u32 fw_ring_id;
+       struct bnxt_napi *bnapi;
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_rx_ring_info *rxr;
+       struct bnxt_cp_ring_info *cpr;
+
+       if (!netif_msg_hw(bp))
+               return;
+
+       bnapi = bp->bnapi[index];
+       txr = bnapi->tx_ring;
+       rxr = bnapi->rx_ring;
+       cpr = &bnapi->cp_ring;
+
+       if (!txr)
+               goto skip_txr;
+
+       /* TBD prod/cons */
+       fw_ring_id = txr->tx_ring_struct.fw_ring_id;
+       if (fw_ring_id != INVALID_HW_RING_ID) {
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDETBD_REG_BD_PRODUCER_IDX + fw_ring_id * 4, 1,
+                       &val[0]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDETBD_REG_BD_REQ_CONSUMER_IDX + fw_ring_id * 4, 1,
+                       &val[1]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDETBD_REG_BD_CMPL_CONSUMER_IDX + fw_ring_id * 4, 1,
+                       &val[2]);
+       }
+
+skip_txr:
+       if (!rxr)
+               goto skip_rxr;
+
+       /* RBD prod/cons */
+       fw_ring_id = rxr->rx_ring_struct.fw_ring_id;
+       if (fw_ring_id != INVALID_HW_RING_ID) {
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDERBD_REG_BD_PRODUCER_IDX + fw_ring_id * 4, 1,
+                       &val[3]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDERBD_REG_BD_REQ_CONSUMER_IDX + fw_ring_id * 4, 1,
+                       &val[4]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDERBD_REG_BD_CMPL_CONSUMER_IDX + fw_ring_id * 4, 1,
+                       &val[5]);
+       }
+       /* AGG RBD prod/cons */
+       fw_ring_id = rxr->rx_agg_ring_struct.fw_ring_id;
+       if (fw_ring_id != INVALID_HW_RING_ID) {
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDERBD_REG_BD_PRODUCER_IDX + fw_ring_id * 4, 1,
+                       &val[6]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDERBD_REG_BD_REQ_CONSUMER_IDX + fw_ring_id * 4, 1,
+                       &val[7]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       BDERBD_REG_BD_CMPL_CONSUMER_IDX + fw_ring_id * 4, 1,
+                       &val[8]);
+       }
+
+skip_rxr:
+       /* CAG prod/cons/vector ctrl */
+       fw_ring_id = cpr->cp_ring_struct.fw_ring_id;
+       if (fw_ring_id != INVALID_HW_RING_ID) {
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       CAG_REG_CAG_PRODUCER_INDEX_REG + fw_ring_id * 4, 1,
+                       &val[9]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       CAG_REG_CAG_CONSUMER_INDEX_REG + fw_ring_id * 4, 1,
+                       &val[10]);
+               bnxt_dbg_hwrm_rd_reg(bp,
+                       CAG_REG_CAG_VECTOR_CTRL + fw_ring_id * 4, 1, &val[11]);
+       }
+       netdev_info(bp->dev, "[%d]: TBD{prod: %x cons: %x %x} "
+                   "RBD{prod: %x cons: %x %x} "
+                   "RBD AGG{prod: %x cons: %x %x} "
+                   "CAG{prod: %x cons: %x vec: %x}\n", index,
+                   val[0], val[1], val[2],
+                   val[3], val[4], val[5],
+                   val[6], val[7], val[8],
+                   val[9], val[10], val[11]);
+}
+
+static void bnxt_dbg_dump_hw_states(struct bnxt *bp)
+{
+       int rc, i;
+       u32 val[32] = {0xDEADDEAD};
+       u32 dbg_sel;
+
+       if (!netif_msg_hw(bp))
+               return;
+
+       /* dump tdc interrupt status */
+       rc = bnxt_dbg_hwrm_rd_reg(bp, TDC_REG_INT_STS_0, 1, val);
+       if (!rc)
+               netdev_info(bp->dev, "TDC_REG_INT_STS_0: %x\n", val[0]);
+       /* dump tdc debug bus */
+       netdev_info(bp->dev, "TDC debug bus dump:\n");
+       dbg_sel = 0x80000000;
+       for (i = 0; i < 5; i++) {
+               rc = bnxt_dbg_hwrm_wr_reg(bp, TDC_REG_TDC_DEBUG_CNTL, dbg_sel);
+               if (rc)
+                       break;
+               rc = bnxt_dbg_hwrm_rd_reg(bp, TDC_REG_TDC_DEBUG_STATUS, 1, val);
+               if (rc)
+                       break;
+               netdev_info(bp->dev, "\tdbg_sel %08x: %08x\n", dbg_sel, val[0]);
+               dbg_sel++;
+       }
+       /* dump tdi debug bus */
+       netdev_info(bp->dev, "TDI debug bus dump:\n");
+       dbg_sel = 0xf;
+       rc = bnxt_dbg_hwrm_wr_reg(bp, TDI_REG_DBG_DWORD_ENABLE, dbg_sel);
+       if (!rc) {
+               rc = bnxt_dbg_hwrm_rd_reg(bp, TDI_REG_DBG_OUT_DATA, 1, val);
+               if (!rc)
+                       netdev_info(bp->dev, "\tTDI_REG_DBG_DWORD_ENABLE (%x): "
+                                   "%08x\n", dbg_sel, val[0]);
+               for (dbg_sel = 2; dbg_sel < 0x12; dbg_sel++) {
+                       rc = bnxt_dbg_hwrm_wr_reg(bp, TDI_REG_DBG_SELECT,
+                                                 dbg_sel);
+                       if (rc)
+                               break;
+                       rc = bnxt_dbg_hwrm_rd_reg(bp, TDI_REG_DBG_OUT_DATA,
+                                                 8, val);
+                       if (rc)
+                               break;
+                       netdev_info(bp->dev, "\tTDI_REG_DBG_OUT_DATA: "
+                                   "%08x %08x %08x %08x "
+                                   "%08x %08x %08x %08x\n",
+                                   val[0], val[1], val[2], val[3],
+                                   val[4], val[5], val[6], val[7]);
+               }
+       }
+       /* dump te_dec port and cmd credits */
+       rc = bnxt_dbg_hwrm_rd_reg(bp, TE_DEC_REG_PORT_CURRENT_CREDIT_REG,
+                                 HWRM_DBG_REG_BUF_SIZE/4, val);
+       if (!rc) {
+               netdev_info(bp->dev, "TE_DEC_REG_PORT_CURRENT_CREDIT_REG: "
+                           "%x %x %x\n", val[0], val[1], val[2]);
+               netdev_info(bp->dev, "TE_DEC_REG_PORT_CURRENT_CMD_CREDIT_REG: "
+                           "%x %x %x\n", val[16], val[17], val[18]);
+       }
+       /* dump partial RDI debug bus */
+       netdev_info(bp->dev, "RDI debug bus dump:\n");
+       dbg_sel = 0x80000000;
+       for (i = 0; i < 3; i++) {
+               rc = bnxt_dbg_hwrm_wr_reg(bp, RDI_REG_RDI_DEBUG_CONTROL_REG,
+                                         dbg_sel);
+               if (rc)
+                       break;
+               rc = bnxt_dbg_hwrm_rd_reg(bp, RDI_REG_RDI_DEBUG_STATUS_REG,
+                                         1, val);
+               if (rc)
+                       break;
+               netdev_info(bp->dev, "\tdbg_sel %x: %08x\n", dbg_sel, val[0]);
+               dbg_sel++;
+       }
+       dbg_sel = 0x80001000;
+       rc = bnxt_dbg_hwrm_wr_reg(bp, RDI_REG_RDI_DEBUG_CONTROL_REG,
+                                 dbg_sel);
+       if (!rc)
+               rc = bnxt_dbg_hwrm_rd_reg(bp, RDI_REG_RDI_DEBUG_STATUS_REG,
+                                 1, val);
+       if (!rc)
+               netdev_info(bp->dev, "\tdbg_sel %x: %08x\n", dbg_sel, val[0]);
+}
+
+static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
+{
+       struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+       int i = bnapi->index;
+
+       if (!txr)
+               return;
+
+       netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+                   i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
+                   txr->tx_cons);
+}
+
+static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
+{
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       int i = bnapi->index;
+
+       if (!rxr)
+               return;
+
+       netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+                   i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
+                   rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
+                   rxr->rx_sw_agg_prod);
+}
+
+static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       int i = bnapi->index;
+
+       netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+                   i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+       int i;
+       struct bnxt_napi *bnapi;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               bnapi = bp->bnapi[i];
+               if (netif_msg_drv(bp)) {
+                       bnxt_dump_tx_sw_state(bnapi);
+                       bnxt_dump_rx_sw_state(bnapi);
+                       bnxt_dump_cp_sw_state(bnapi);
+               }
+               bnxt_dbg_dump_hw_ring(bp, i);
+       }
+       bnxt_dbg_dump_hw_states(bp);
+}
+
+static void bnxt_reset_task(struct bnxt *bp, bool silent)
+{
+       if (!silent) {
+               bnxt_dbg_dump_states(bp);
+               usleep_range(10, 50);
+               bnxt_dbg_dump_states(bp);
+       }
+
+       if (netif_running(bp->dev)) {
+               int rc;
+
+               if (!silent)
+                       bnxt_ulp_stop(bp);
+               bnxt_close_nic(bp, false, false);
+               rc = bnxt_open_nic(bp, false, false);
+               if (!silent && !rc)
+                       bnxt_ulp_start(bp);
+       }
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
+       set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+       schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int i;
+
+       /* Only process tx rings/combined rings in netpoll mode. */
+       for (i = 0; i < bp->tx_nr_rings; i++) {
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+               napi_schedule(&txr->bnapi->napi);
+       }
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+       struct bnxt *bp = (struct bnxt *)data;
+       struct net_device *dev = bp->dev;
+
+       if (!netif_running(dev))
+               return;
+
+       if (atomic_read(&bp->intr_sem) != 0)
+               goto bnxt_restart_timer;
+
+       if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
+           bp->stats_coal_ticks) {
+               set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+bnxt_restart_timer:
+       mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+{
+       /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+        * set.  If the device is being closed, bnxt_close() may be holding
+        * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
+        * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+        */
+       clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+       rtnl_lock();
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
+       set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+       rtnl_unlock();
+}
+
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+       bnxt_rtnl_lock_sp(bp);
+       if (test_bit(BNXT_STATE_OPEN, &bp->state))
+               bnxt_reset_task(bp, silent);
+       bnxt_rtnl_unlock_sp(bp);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+       struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+
+       set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+       smp_mb__after_atomic();
+       if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+               clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+               return;
+       }
+
+       if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+               bnxt_cfg_rx_mode(bp);
+
+       if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+               bnxt_cfg_ntp_filters(bp);
+       if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+               bnxt_hwrm_exec_fwd_req(bp);
+       if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+               bnxt_hwrm_tunnel_dst_port_alloc(
+                       bp, bp->vxlan_port,
+                       TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+       }
+       if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+       }
+       if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+               bnxt_hwrm_tunnel_dst_port_alloc(
+                       bp, bp->nge_port,
+                       TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+       }
+       if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+               bnxt_hwrm_tunnel_dst_port_free(
+                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+       }
+       if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+               netdev_info(bp->dev, "Receive PF driver unload event!");
+
+       if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+               bnxt_hwrm_port_qstats(bp);
+
+       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+        * must be the last functions to be called before exiting.
+        */
+       if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+               int rc = 0;
+
+               if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+                                      &bp->sp_event))
+                       bnxt_hwrm_phy_qcaps(bp);
+
+               bnxt_rtnl_lock_sp(bp);
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       rc = bnxt_update_link(bp, true);
+               bnxt_rtnl_unlock_sp(bp);
+               if (rc)
+                       netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+                                  rc);
+       }
+       if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+               bnxt_rtnl_lock_sp(bp);
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       bnxt_get_port_module_status(bp);
+               bnxt_rtnl_unlock_sp(bp);
+       }
+       if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+               bnxt_reset(bp, false);
+
+       if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
+               bnxt_reset(bp, true);
+
+       smp_mb__before_atomic();
+       clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+}
+
+/* Under rtnl_lock */
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
+                      int tx_xdp)
+{
+       int max_rx, max_tx, tx_sets = 1;
+       int tx_rings_needed;
+       int rc;
+
+       if (tcs)
+               tx_sets = tcs;
+
+       rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
+       if (rc)
+               return rc;
+
+       if (max_rx < rx)
+               return -ENOMEM;
+
+       tx_rings_needed = tx * tx_sets + tx_xdp;
+       if (max_tx < tx_rings_needed)
+               return -ENOMEM;
+
+       if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
+           tx_rings_needed < (tx * tx_sets + tx_xdp))
+               return -ENOMEM;
+       return 0;
+}
+
+static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
+{
+       if (bp->bar2) {
+               pci_iounmap(pdev, bp->bar2);
+               bp->bar2 = NULL;
+       }
+
+       if (bp->bar1) {
+               pci_iounmap(pdev, bp->bar1);
+               bp->bar1 = NULL;
+       }
+
+       if (bp->bar0) {
+               pci_iounmap(pdev, bp->bar0);
+               bp->bar0 = NULL;
+       }
+}
+
+static void bnxt_cleanup_pci(struct bnxt *bp)
+{
+       bnxt_unmap_bars(bp, bp->pdev);
+       pci_release_regions(bp->pdev);
+       pci_disable_device(bp->pdev);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+       int rc;
+       struct bnxt *bp = netdev_priv(dev);
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       /* enable device (incl. PCI PM wakeup), and bus-mastering */
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+               goto init_err;
+       }
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev,
+                       "Cannot find PCI device base address, aborting\n");
+               rc = -ENODEV;
+               goto init_err_disable;
+       }
+
+       rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+               goto init_err_disable;
+       }
+
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+               dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+               goto init_err_disable;
+       }
+
+       pci_set_master(pdev);
+
+       bp->dev = dev;
+       bp->pdev = pdev;
+
+       bp->bar0 = pci_ioremap_bar(pdev, 0);
+       if (!bp->bar0) {
+               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+               rc = -ENOMEM;
+               goto init_err_release;
+       }
+
+       bp->bar1 = pci_ioremap_bar(pdev, 2);
+       if (!bp->bar1) {
+               dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+               rc = -ENOMEM;
+               goto init_err_release;
+       }
+
+       bp->bar2 = pci_ioremap_bar(pdev, 4);
+       if (!bp->bar2) {
+               dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+               rc = -ENOMEM;
+               goto init_err_release;
+       }
+
+       pci_enable_pcie_error_reporting(pdev);
+
+       INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+       spin_lock_init(&bp->ntp_fltr_lock);
+
+       bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+       bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+       /* tick values in micro seconds */
+       bp->rx_coal_ticks = 12;
+       bp->rx_coal_bufs = 30;
+       bp->rx_coal_ticks_irq = 1;
+       bp->rx_coal_bufs_irq = 2;
+
+       bp->tx_coal_ticks = 25;
+       bp->tx_coal_bufs = 30;
+       bp->tx_coal_ticks_irq = 2;
+       bp->tx_coal_bufs_irq = 2;
+
+       bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
+
+       init_timer(&bp->timer);
+       bp->timer.data = (unsigned long)bp;
+       bp->timer.function = bnxt_timer;
+       bp->current_interval = BNXT_TIMER_INTERVAL;
+
+       clear_bit(BNXT_STATE_OPEN, &bp->state);
+       return 0;
+
+init_err_release:
+       bnxt_unmap_bars(bp, pdev);
+       pci_release_regions(pdev);
+
+init_err_disable:
+       pci_disable_device(pdev);
+
+init_err:
+       return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       rc = bnxt_approve_mac(bp, addr->sa_data);
+       if (rc)
+               return rc;
+
+       if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+               return 0;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       if (netif_running(dev)) {
+               bnxt_close_nic(bp, false, false);
+               rc = bnxt_open_nic(bp, false, false);
+       }
+
+       return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+#ifndef HAVE_MIN_MTU
+       if (new_mtu < 60 || new_mtu > 9500)
+               return -EINVAL;
+
+       if (BNXT_RX_PAGE_MODE(bp) && new_mtu > BNXT_MAX_PAGE_MODE_MTU)
+               return -EINVAL;
+#endif
+
+       if (netif_running(dev))
+               bnxt_close_nic(bp, false, false);
+
+       dev->mtu = new_mtu;
+       bnxt_set_ring_params(bp);
+
+       if (netif_running(dev))
+               return bnxt_open_nic(bp, false, false);
+
+       return 0;
+}
+
+#if defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB)
+int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       bool sh = false;
+       int rc;
+
+       if (tc > bp->max_tc) {
+               netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
+                          tc, bp->max_tc);
+               return -EINVAL;
+       }
+
+       if (netdev_get_num_tc(dev) == tc)
+               return 0;
+
+       if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+               sh = true;
+
+       rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+                               sh, tc, bp->tx_nr_rings_xdp);
+       if (rc)
+               return rc;
+
+       /* Needs to close the device and do hw resource re-allocations */
+       if (netif_running(bp->dev))
+               bnxt_close_nic(bp, true, false);
+
+       if (tc) {
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+               netdev_set_num_tc(dev, tc);
+       } else {
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+               netdev_reset_tc(dev);
+       }
+       bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+                              bp->tx_nr_rings + bp->rx_nr_rings;
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+
+       if (netif_running(bp->dev))
+               return bnxt_open_nic(bp, true, false);
+
+       return 0;
+}
+
+#ifdef HAVE_TC_TO_NETDEV
+#ifdef HAVE_CHAIN_INDEX
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
+                        __be16 proto, struct tc_to_netdev *ntc)
+#else
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+                        struct tc_to_netdev *ntc)
+#endif
+{
+       if (ntc->type != TC_SETUP_MQPRIO)
+               return -EINVAL;
+
+#ifdef HAVE_MQPRIO_QOPT
+       ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
+#else
+       return bnxt_setup_mq_tc(dev, ntc->tc);
+#endif
+}
+#endif
+
+#endif
+
+#ifdef CONFIG_RFS_ACCEL
+#ifdef NEW_FLOW_KEYS
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+                           struct bnxt_ntuple_filter *f2)
+{
+       struct flow_keys *keys1 = &f1->fkeys;
+       struct flow_keys *keys2 = &f2->fkeys;
+
+       if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+           keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+           keys1->ports.ports == keys2->ports.ports &&
+           keys1->basic.ip_proto == keys2->basic.ip_proto &&
+           keys1->basic.n_proto == keys2->basic.n_proto &&
+#ifdef HAVE_NEW_FLOW_DISSECTOR
+           keys1->control.flags == keys2->control.flags &&
+#endif
+           ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
+           ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
+               return true;
+
+       return false;
+}
+
+#else
+
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+                           struct bnxt_ntuple_filter *f2)
+{
+       struct flow_keys *keys1 = &f1->fkeys;
+       struct flow_keys *keys2 = &f2->fkeys;
+
+       if (keys1->src == keys2->src &&
+           keys1->dst == keys2->dst &&
+           keys1->ports == keys2->ports &&
+           keys1->ip_proto == keys2->ip_proto &&
+#ifdef HAVE_N_PROTO
+           keys1->n_proto == keys2->n_proto &&
+#endif
+           ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
+           ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
+               return true;
+
+       return false;
+}
+#endif  /* NEW_FLOW_KEYS */
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                             u16 rxq_index, u32 flow_id)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ntuple_filter *fltr, *new_fltr;
+       struct flow_keys *fkeys;
+       struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+       int rc = 0, idx, bit_id, l2_idx = 0;
+       struct hlist_head *head;
+       struct hlist_node __maybe_unused *node;
+
+#ifdef HAVE_INNER_NETWORK_OFFSET
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+#endif
+
+       if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+               struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+               int off = 0, j;
+
+               netif_addr_lock_bh(dev);
+               for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
+                       if (ether_addr_equal(eth->h_dest,
+                                            vnic->uc_list + off)) {
+                               l2_idx = j + 1;
+                               break;
+                       }
+               }
+               netif_addr_unlock_bh(dev);
+               if (!l2_idx)
+                       return -EINVAL;
+       }
+       new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+       if (!new_fltr)
+               return -ENOMEM;
+
+       fkeys = &new_fltr->fkeys;
+#ifdef NEW_FLOW_KEYS
+       if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+
+       if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
+            fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
+           ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+            (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+       if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
+           bp->hwrm_spec_code < 0x10601) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+#ifdef HAVE_NEW_FLOW_DISSECTOR
+       if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
+           bp->hwrm_spec_code < 0x10601) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+#endif
+#else
+       if (!skb_flow_dissect(skb, fkeys)) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+
+#ifdef HAVE_N_PROTO
+       if ((fkeys->n_proto != htons(ETH_P_IP)) ||
+           ((fkeys->ip_proto != IPPROTO_TCP) &&
+            (fkeys->ip_proto != IPPROTO_UDP))) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+#else
+       if ((skb->protocol != htons(ETH_P_IP)) ||
+           ((fkeys->ip_proto != IPPROTO_TCP) &&
+            (fkeys->ip_proto != IPPROTO_UDP))) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+#endif
+#endif
+
+       memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
+       memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+#ifdef HAVE_GET_HASH_RAW
+       idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+#else
+       idx = skb->rxhash & BNXT_NTP_FLTR_HASH_MASK;
+#endif
+       head = &bp->ntp_fltr_hash_tbl[idx];
+       rcu_read_lock();
+       __hlist_for_each_entry_rcu(fltr, node, head, hash) {
+               if (bnxt_fltr_match(fltr, new_fltr)) {
+                       rcu_read_unlock();
+                       rc = 0;
+                       goto err_free;
+               }
+       }
+       rcu_read_unlock();
+
+       spin_lock_bh(&bp->ntp_fltr_lock);
+       bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+                                        BNXT_NTP_FLTR_MAX_FLTR, 0);
+       if (bit_id < 0) {
+               spin_unlock_bh(&bp->ntp_fltr_lock);
+               rc = -ENOMEM;
+               goto err_free;
+       }
+
+       new_fltr->sw_id = (u16)bit_id;
+       new_fltr->flow_id = flow_id;
+       new_fltr->l2_fltr_idx = l2_idx;
+       new_fltr->rxq = rxq_index;
+       hlist_add_head_rcu(&new_fltr->hash, head);
+       bp->ntp_fltr_count++;
+       spin_unlock_bh(&bp->ntp_fltr_lock);
+
+       set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+       schedule_work(&bp->sp_task);
+
+       return new_fltr->sw_id;
+
+err_free:
+       kfree(new_fltr);
+       return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct hlist_node *tmp, __maybe_unused *nxt;
+               struct bnxt_ntuple_filter *fltr;
+               int rc;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               __hlist_for_each_entry_safe(fltr, nxt, tmp, head, hash) {
+                       bool del = false;
+
+                       if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+                               if (rps_may_expire_flow(bp->dev, fltr->rxq,
+                                                       fltr->flow_id,
+                                                       fltr->sw_id)) {
+                                       bnxt_hwrm_cfa_ntuple_filter_free(bp,
+                                                                        fltr);
+                                       del = true;
+                               }
+                       } else {
+                               rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+                                                                      fltr);
+                               if (rc)
+                                       del = true;
+                               else
+                                       set_bit(BNXT_FLTR_VALID, &fltr->state);
+                       }
+
+                       if (del) {
+                               spin_lock_bh(&bp->ntp_fltr_lock);
+                               hlist_del_rcu(&fltr->hash);
+                               bp->ntp_fltr_count--;
+                               spin_unlock_bh(&bp->ntp_fltr_lock);
+                               synchronize_rcu();
+                               clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+                               kfree(fltr);
+                       }
+               }
+       }
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+#ifdef HAVE_NDO_ADD_VXLAN
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+                               __be16 port)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return;
+
+       if (sa_family != AF_INET6 && sa_family != AF_INET)
+               return;
+
+       if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+               return;
+
+       bp->vxlan_port_cnt++;
+       if (bp->vxlan_port_cnt == 1) {
+               bp->vxlan_port = port;
+               set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+                               __be16 port)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return;
+
+       if (sa_family != AF_INET6 && sa_family != AF_INET)
+               return;
+
+       if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+               bp->vxlan_port_cnt--;
+
+               if (bp->vxlan_port_cnt == 0) {
+                       set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+                       schedule_work(&bp->sp_task);
+               }
+       }
+}
+#elif defined(HAVE_NDO_UDP_TUNNEL)
+static void bnxt_udp_tunnel_add(struct net_device *dev,
+                               struct udp_tunnel_info *ti)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
+               return;
+
+       if (!netif_running(dev))
+               return;
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
+                       return;
+
+               bp->vxlan_port_cnt++;
+               if (bp->vxlan_port_cnt == 1) {
+                       bp->vxlan_port = ti->port;
+                       set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+                       schedule_work(&bp->sp_task);
+               }
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (bp->nge_port_cnt && bp->nge_port != ti->port)
+                       return;
+
+               bp->nge_port_cnt++;
+               if (bp->nge_port_cnt == 1) {
+                       bp->nge_port = ti->port;
+                       set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
+               }
+               break;
+       default:
+               return;
+       }
+
+       schedule_work(&bp->sp_task);
+}
+
+static void bnxt_udp_tunnel_del(struct net_device *dev,
+                               struct udp_tunnel_info *ti)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
+               return;
+
+       if (!netif_running(dev))
+               return;
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
+                       return;
+               bp->vxlan_port_cnt--;
+
+               if (bp->vxlan_port_cnt != 0)
+                       return;
+
+               set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (!bp->nge_port_cnt || bp->nge_port != ti->port)
+                       return;
+               bp->nge_port_cnt--;
+
+               if (bp->nge_port_cnt != 0)
+                       return;
+
+               set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
+               break;
+       default:
+               return;
+       }
+
+       schedule_work(&bp->sp_task);
+}
+#endif
+
+#ifdef OLD_VLAN
+static void bnxt_vlan_rx_register(struct net_device *dev,
+                                 struct vlan_group *vlgrp)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!netif_running(dev)) {
+               bp->vlgrp = vlgrp;
+               return;
+       }
+       bnxt_disable_napi(bp);
+       bnxt_disable_int_sync(bp);
+       bp->vlgrp = vlgrp;
+       bnxt_enable_napi(bp);
+       bnxt_enable_int(bp);
+}
+#endif
+
+#ifdef HAVE_NDO_BRIDGE_GETLINK
+static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                              struct net_device *dev, u32 filter_mask,
+                              int nlflags)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
+                                      nlflags, filter_mask, NULL);
+}
+
+static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+                              u16 flags)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct nlattr *attr, *br_spec;
+       int rem, rc = 0;
+
+       if (bp->hwrm_spec_code < 0x10707 || !BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+       if (!br_spec)
+               return -EINVAL;
+
+       nla_for_each_nested(attr, br_spec, rem) {
+               u16 mode;
+
+               if (nla_type(attr) != IFLA_BRIDGE_MODE)
+                       continue;
+
+               if (nla_len(attr) < sizeof(mode))
+                       return -EINVAL;
+
+               mode = nla_get_u16(attr);
+               if (mode == bp->br_mode)
+                       break;
+
+               rc = bnxt_hwrm_set_br_mode(bp, mode);
+               if (!rc)
+                       bp->br_mode = mode;
+               break;
+       }
+       return rc;
+}
+#endif
+
+static const struct net_device_ops bnxt_netdev_ops = {
+       .ndo_open               = bnxt_open,
+       .ndo_start_xmit         = bnxt_start_xmit,
+       .ndo_stop               = bnxt_close,
+#ifdef NETDEV_GET_STATS64
+       .ndo_get_stats64        = bnxt_get_stats64,
+#else
+       .ndo_get_stats          = bnxt_get_stats,
+#endif
+       .ndo_set_rx_mode        = bnxt_set_rx_mode,
+       .ndo_do_ioctl           = bnxt_ioctl,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = bnxt_change_mac_addr,
+       .ndo_change_mtu         = bnxt_change_mtu,
+#ifdef NETDEV_FEATURE_CONTROL
+       .ndo_fix_features       = bnxt_fix_features,
+       .ndo_set_features       = bnxt_set_features,
+#endif
+       .ndo_tx_timeout         = bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+#ifdef HAVE_NDO_GET_VF_CONFIG
+       .ndo_get_vf_config      = bnxt_get_vf_config,
+       .ndo_set_vf_mac         = bnxt_set_vf_mac,
+       .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
+       .ndo_set_vf_rate        = bnxt_set_vf_bw,
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
+#endif
+#ifdef HAVE_VF_SPOOFCHK
+       .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
+#endif
+#endif
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = bnxt_poll_controller,
+#endif
+#ifdef HAVE_SETUP_TC
+#ifdef HAVE_TC_TO_NETDEV
+       .ndo_setup_tc           = bnxt_setup_tc,
+#else
+       .ndo_setup_tc           = bnxt_setup_mq_tc,
+#endif
+#endif
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
+#endif
+#if defined(HAVE_NDO_ADD_VXLAN)
+       .ndo_add_vxlan_port     = bnxt_add_vxlan_port,
+       .ndo_del_vxlan_port     = bnxt_del_vxlan_port,
+#elif defined(HAVE_NDO_UDP_TUNNEL)
+       .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
+       .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
+#endif
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+       .ndo_busy_poll          = bnxt_busy_poll,
+#endif
+#ifdef OLD_VLAN
+       .ndo_vlan_rx_register   = bnxt_vlan_rx_register,
+#endif
+#ifdef HAVE_NDO_XDP
+       .ndo_xdp                = bnxt_xdp,
+#endif
+#ifdef HAVE_NDO_BRIDGE_GETLINK
+       .ndo_bridge_getlink     = bnxt_bridge_getlink,
+       .ndo_bridge_setlink     = bnxt_bridge_setlink,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (BNXT_PF(bp))
+               bnxt_sriov_disable(bp);
+
+       bnxt_ptp_free(bp);
+       pci_disable_pcie_error_reporting(pdev);
+       unregister_netdev(dev);
+       cancel_work_sync(&bp->sp_task);
+       bp->sp_event = 0;
+
+       bnxt_clear_int_mode(bp);
+       bnxt_hwrm_func_drv_unrgtr(bp);
+       bnxt_free_hwrm_resources(bp);
+       bnxt_ethtool_free(bp);
+       bnxt_dcb_free(bp);
+       kfree(bp->edev);
+       bp->edev = NULL;
+       kfree(bp->ptp_cfg);
+       bp->ptp_cfg = NULL;
+#ifdef HAVE_NDO_XDP
+       if (bp->xdp_prog)
+               bpf_prog_put(bp->xdp_prog);
+#endif
+       bnxt_cleanup_pci(bp);
+       free_netdev(dev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+       int rc = 0;
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       rc = bnxt_hwrm_phy_qcaps(bp);
+       if (rc) {
+               netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+
+       rc = bnxt_update_link(bp, false);
+       if (rc) {
+               netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+                          rc);
+               return rc;
+       }
+
+       /* Older firmware does not have supported_auto_speeds, so assume
+        * that all supported speeds can be autonegotiated.
+        */
+       if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
+               link_info->support_auto_speeds = link_info->support_speeds;
+
+       /*initialize the ethool setting copy with NVM settings */
+       if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+               link_info->autoneg = BNXT_AUTONEG_SPEED;
+               if (bp->hwrm_spec_code >= 0x10201) {
+                       if (link_info->auto_pause_setting &
+                           PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+                               link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+               } else {
+                       link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+               }
+               link_info->advertising = link_info->auto_link_speeds;
+       } else {
+               link_info->req_link_speed = link_info->force_link_speed;
+               link_info->req_duplex = link_info->duplex_setting;
+       }
+       if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+               link_info->req_flow_ctrl =
+                       link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+       else
+               link_info->req_flow_ctrl = link_info->force_pause_setting;
+
+       return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+       u16 ctrl;
+#ifndef HAVE_MSIX_CAP
+       int msix_cap;
+
+       msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+       if (!msix_cap)
+               return 1;
+
+       pci_read_config_word(pdev, msix_cap + PCI_MSIX_FLAGS, &ctrl);
+#else
+       if (!pdev->msix_cap)
+               return 1;
+
+       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+#endif
+       return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+                               int *max_cp)
+{
+       int max_ring_grps = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+       if (!BNXT_PF(bp)) {
+               *max_tx = bp->vf.max_tx_rings;
+               *max_rx = bp->vf.max_rx_rings;
+               *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+               *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
+               max_ring_grps = bp->vf.max_hw_ring_grps;
+       } else
+#endif
+       {
+               *max_tx = bp->pf.max_tx_rings;
+               *max_rx = bp->pf.max_rx_rings;
+               *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+               *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
+               max_ring_grps = bp->pf.max_hw_ring_grps;
+       }
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
+               *max_cp -= 1;
+               *max_rx -= 2;
+       }
+       if (bp->flags & BNXT_FLAG_AGG_RINGS)
+               *max_rx >>= 1;
+       *max_rx = min_t(int, *max_rx, max_ring_grps);
+}
+
+int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
+{
+       int rx, tx, cp;
+
+       _bnxt_get_max_rings(bp, &rx, &tx, &cp);
+       if (!rx || !tx || !cp)
+               return -ENOMEM;
+
+       *max_rx = rx;
+       *max_tx = tx;
+       return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
+}
+
+static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+                              bool shared)
+{
+       int rc;
+
+       rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+       if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
+               /* Not enough rings, try disabling agg rings. */
+               bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+               rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+               if (rc)
+                       return rc;
+               bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+               bp->dev->hw_features &= ~NETIF_F_LRO;
+               bp->dev->features &= ~NETIF_F_LRO;
+               bnxt_set_ring_params(bp);
+       }
+#ifdef CONFIG_BNXT_RE
+       if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+               int max_cp, max_stat, max_irq;
+
+               /* Reserve minimum resources for RoCE */
+               max_cp = bnxt_get_max_func_cp_rings(bp);
+               max_stat = bnxt_get_max_func_stat_ctxs(bp);
+               max_irq = bnxt_get_max_func_irqs(bp);
+               if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
+                   max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
+                   max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
+                       return 0;
+
+               max_cp -= BNXT_MIN_ROCE_CP_RINGS;
+               max_irq -= BNXT_MIN_ROCE_CP_RINGS;
+               max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
+               max_cp = min_t(int, max_cp, max_irq);
+               max_cp = min_t(int, max_cp, max_stat);
+               rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
+               if (rc)
+                       rc = 0;
+       }
+#endif
+       return rc;
+}
+
+static int bnxt_set_dflt_rings(struct bnxt *bp)
+{
+       int dflt_rings, max_rx_rings, max_tx_rings, rc;
+       bool sh = true;
+
+       if (sh)
+               bp->flags |= BNXT_FLAG_SHARED_RINGS;
+       dflt_rings = netif_get_num_default_rss_queues();
+       rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
+       if (rc)
+               return rc;
+       bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+       bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+
+       rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+       if (rc)
+               netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+       bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+                              bp->tx_nr_rings + bp->rx_nr_rings;
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+               bp->rx_nr_rings++;
+               bp->cp_nr_rings++;
+       }
+       return rc;
+}
+
+void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+{
+       ASSERT_RTNL();
+       bnxt_hwrm_func_qcaps(bp);
+       bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+}
+
+static void bnxt_parse_log_pcie_link(struct bnxt *bp)
+{
+       enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+       enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+
+       if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+           speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+               netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
+       else
+               netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
+                           speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+                           speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+                           speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+                           "Unknown", width);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       static int version_printed;
+       struct net_device *dev;
+       struct bnxt *bp;
+       int rc, max_irqs;
+
+       if (pci_is_bridge(pdev))
+               return -ENODEV;
+
+       if (version_printed++ == 0)
+               pr_info("%s", version);
+
+       max_irqs = bnxt_get_max_irq(pdev);
+       dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+       if (!dev)
+               return -ENOMEM;
+
+       bp = netdev_priv(dev);
+
+       if (bnxt_vf_pciid(ent->driver_data))
+               bp->flags |= BNXT_FLAG_VF;
+
+#ifndef HAVE_MSIX_CAP
+       if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+#else
+       if (pdev->msix_cap)
+#endif
+               bp->flags |= BNXT_FLAG_MSIX_CAP;
+
+       rc = bnxt_init_board(pdev, dev);
+       if (rc < 0)
+               goto init_err_free;
+
+       dev->netdev_ops = &bnxt_netdev_ops;
+       dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+       dev->ethtool_ops = &bnxt_ethtool_ops;
+       pci_set_drvdata(pdev, dev);
+
+       rc = bnxt_alloc_hwrm_resources(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
+       mutex_init(&bp->hwrm_cmd_lock);
+       rc = bnxt_hwrm_ver_get(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
+       rc = bnxt_hwrm_func_reset(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
+       bnxt_hwrm_fw_set_time(bp);
+
+       dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+                          NETIF_F_TSO | NETIF_F_TSO6 |
+                          NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+                          NETIF_F_GSO_IPXIP4 |
+                          NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+                          NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
+                          NETIF_F_RXCSUM | NETIF_F_GRO;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39)
+       if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+               dev->hw_features |= NETIF_F_LRO;
+#endif
+
+#ifdef NETDEV_HW_ENC_FEATURES
+       dev->hw_enc_features =
+                       NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+                       NETIF_F_TSO | NETIF_F_TSO6 |
+                       NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+                       NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+                       NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+#endif
+#ifdef HAVE_GSO_PARTIAL_FEATURES
+       dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                   NETIF_F_GSO_GRE_CSUM;
+#endif
+
+       dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+       dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+       dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef HAVE_MIN_MTU
+       /* MTU range: 60 - 9500 */
+       dev->min_mtu = ETH_ZLEN;
+       dev->max_mtu = BNXT_MAX_MTU;
+#endif
+
+#ifdef CONFIG_BNXT_SRIOV
+       init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+       bp->gro_func = bnxt_gro_func_5730x;
+       if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
+               bp->gro_func = bnxt_gro_func_5731x;
+
+       rc = bnxt_hwrm_func_drv_rgtr(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
+       rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+       if (rc)
+               goto init_err_pci_clean;
+
+       bp->ulp_probe = bnxt_ulp_probe;
+
+       /* Get the MAX capabilities for this function */
+       rc = bnxt_hwrm_func_qcaps(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+                          rc);
+               rc = -1;
+               goto init_err_pci_clean;
+       }
+
+       rc = bnxt_hwrm_queue_qportcfg(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+                          rc);
+               rc = -1;
+               goto init_err_pci_clean;
+       }
+
+       bnxt_hwrm_func_qcfg(bp);
+       bnxt_hwrm_port_led_qcaps(bp);
+       bnxt_ethtool_init(bp);
+       bnxt_dcb_init(bp);
+
+       bnxt_set_rx_skb_mode(bp, false);
+       bnxt_set_tpa_flags(bp);
+       bnxt_set_ring_params(bp);
+       bnxt_set_max_func_irqs(bp, max_irqs);
+       rc = bnxt_set_dflt_rings(bp);
+       if (rc) {
+               netdev_err(bp->dev, "Not enough rings available.\n");
+               rc = -ENOMEM;
+               goto init_err_pci_clean;
+       }
+
+       /* Default RSS hash cfg. */
+       bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+                          VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+                          VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+                          VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+       if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
+           !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
+           bp->hwrm_spec_code >= 0x10501) {
+               bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+               bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+                                   VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+       }
+
+       bnxt_hwrm_vnic_qcaps(bp);
+       if (bnxt_rfs_supported(bp)) {
+               dev->hw_features |= NETIF_F_NTUPLE;
+               if (bnxt_rfs_capable(bp)) {
+                       bp->flags |= BNXT_FLAG_RFS;
+                       dev->features |= NETIF_F_NTUPLE;
+               }
+       }
+
+       if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+               bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+       rc = bnxt_probe_phy(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
+       bnxt_get_wol_settings(bp);
+       if (bp->flags & BNXT_FLAG_WOL_CAP)
+               device_set_wakeup_enable(&pdev->dev, bp->wol);
+       else
+               device_set_wakeup_capable(&pdev->dev, false);
+
+       rc = bnxt_init_int_mode(bp);
+       if (rc)
+               goto init_err_pci_clean;
+
+       rc = register_netdev(dev);
+       if (rc)
+               goto init_err_clr_int;
+
+       netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+                   board_info[ent->driver_data].name,
+                   (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+       bnxt_parse_log_pcie_link(bp);
+       if (bnxt_ptp_init(bp)) {
+               netdev_warn(dev, "PTP initialization failed.\n");
+               kfree(bp->ptp_cfg);
+               bp->ptp_cfg = NULL;
+       }
+
+       return 0;
+
+init_err_clr_int:
+       bnxt_clear_int_mode(bp);
+
+init_err_pci_clean:
+       bnxt_cleanup_pci(bp);
+
+init_err_free:
+       free_netdev(dev);
+       return rc;
+}
+
+static void bnxt_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp;
+
+       if (!dev)
+               return;
+
+       rtnl_lock();
+       bp = netdev_priv(dev);
+       if (!bp)
+               goto shutdown_exit;
+
+       if (netif_running(dev))
+               dev_close(dev);
+
+       if (system_state == SYSTEM_POWER_OFF) {
+               bnxt_clear_int_mode(bp);
+               pci_wake_from_d3(pdev, bp->wol);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+
+shutdown_exit:
+       rtnl_unlock();
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bnxt_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       rtnl_lock();
+       if (netif_running(dev)) {
+               netif_device_detach(dev);
+               rc = bnxt_close(dev);
+       }
+       bnxt_hwrm_func_drv_unrgtr(bp);
+       rtnl_unlock();
+       return rc;
+}
+
+static int bnxt_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       rtnl_lock();
+       if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+               rc = -ENODEV;
+               goto resume_exit;
+       }
+       rc = bnxt_hwrm_func_reset(bp);
+       if (rc) {
+               rc = -EBUSY;
+               goto resume_exit;
+       }
+       bnxt_get_wol_settings(bp);
+       if (netif_running(dev)) {
+               rc = bnxt_open(dev);
+               if (!rc)
+                       netif_device_attach(dev);
+       }
+
+resume_exit:
+       rtnl_unlock();
+       return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
+#define BNXT_PM_OPS (&bnxt_pm_ops)
+
+#else
+
+#define BNXT_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * bnxt_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+                                              pci_channel_state_t state)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(netdev);
+
+       netdev_info(netdev, "PCI I/O error detected\n");
+
+       rtnl_lock();
+       netif_device_detach(netdev);
+
+       bnxt_ulp_stop(bp);
+
+       if (state == pci_channel_io_perm_failure) {
+               rtnl_unlock();
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       if (netif_running(netdev))
+               bnxt_close(netdev);
+
+       pci_disable_device(pdev);
+       rtnl_unlock();
+
+       /* Request a slot slot reset. */
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnxt_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
+{
+
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(netdev);
+       int err = 0;
+       pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+
+       netdev_info(bp->dev, "PCI Slot Reset\n");
+
+       rtnl_lock();
+
+       if (pci_enable_device(pdev)) {
+               dev_err(&pdev->dev,
+                       "Cannot re-enable PCI device after reset.\n");
+       } else {
+               pci_set_master(pdev);
+
+               err = bnxt_hwrm_func_reset(bp);
+               if (!err && netif_running(netdev))
+                       err = bnxt_open(netdev);
+
+               if (!err) {
+                       result = PCI_ERS_RESULT_RECOVERED;
+                       bnxt_ulp_start(bp);
+               }
+       }
+
+       if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
+               dev_close(netdev);
+
+       rtnl_unlock();
+
+       err = pci_cleanup_aer_uncorrect_error_status(pdev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+                        err); /* non-fatal, continue */
+       }
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnxt_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void bnxt_io_resume(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+
+       rtnl_lock();
+
+       netif_device_attach(netdev);
+
+       rtnl_unlock();
+}
+
+static const struct pci_error_handlers bnxt_err_handler = {
+       .error_detected = bnxt_io_error_detected,
+       .slot_reset     = bnxt_io_slot_reset,
+       .resume         = bnxt_io_resume
+};
+
+static struct pci_driver bnxt_pci_driver = {
+       .name           = DRV_MODULE_NAME,
+       .id_table       = bnxt_pci_tbl,
+       .probe          = bnxt_init_one,
+       .remove         = bnxt_remove_one,
+       .shutdown       = bnxt_shutdown,
+       .driver.pm      = BNXT_PM_OPS,
+       .err_handler    = &bnxt_err_handler,
+#if defined(CONFIG_BNXT_SRIOV) && defined(PCIE_SRIOV_CONFIGURE)
+       .sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+#if defined(HAVE_MODULE_PCI_DRIVER) && defined(PCIE_SRIOV_CONFIGURE)
+module_pci_driver(bnxt_pci_driver);
+#else
+static int __init bnxt_init(void)
+{
+       bnxt_sriov_init(num_vfs);
+       return pci_register_driver(&bnxt_pci_driver);
+}
+
+static void __exit bnxt_exit(void)
+{
+       bnxt_sriov_exit();
+       pci_unregister_driver(&bnxt_pci_driver);
+}
+
+module_init(bnxt_init);
+module_exit(bnxt_exit);
+#endif
diff --git a/ubuntu/bnxt/bnxt.h b/ubuntu/bnxt/bnxt.h
new file mode 100644 (file)
index 0000000..d166ffc
--- /dev/null
@@ -0,0 +1,1421 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME                "bnxt_en"
+#define DRV_MODULE_VERSION     "1.8.1"
+
+#define DRV_VER_MAJ    1
+#define DRV_VER_MIN    8
+#define DRV_VER_UPD    1
+
+#include <linux/interrupt.h>
+
+struct tx_bd {
+       __le32 tx_bd_len_flags_type;
+       #define TX_BD_TYPE                                      (0x3f << 0)
+        #define TX_BD_TYPE_SHORT_TX_BD                          (0x00 << 0)
+        #define TX_BD_TYPE_LONG_TX_BD                           (0x10 << 0)
+       #define TX_BD_FLAGS_PACKET_END                          (1 << 6)
+       #define TX_BD_FLAGS_NO_CMPL                             (1 << 7)
+       #define TX_BD_FLAGS_BD_CNT                              (0x1f << 8)
+        #define TX_BD_FLAGS_BD_CNT_SHIFT                        8
+       #define TX_BD_FLAGS_LHINT                               (3 << 13)
+        #define TX_BD_FLAGS_LHINT_SHIFT                         13
+        #define TX_BD_FLAGS_LHINT_512_AND_SMALLER               (0 << 13)
+        #define TX_BD_FLAGS_LHINT_512_TO_1023                   (1 << 13)
+        #define TX_BD_FLAGS_LHINT_1024_TO_2047                  (2 << 13)
+        #define TX_BD_FLAGS_LHINT_2048_AND_LARGER               (3 << 13)
+       #define TX_BD_FLAGS_COAL_NOW                            (1 << 15)
+       #define TX_BD_LEN                                       (0xffff << 16)
+        #define TX_BD_LEN_SHIFT                                 16
+
+       u32 tx_bd_opaque;
+       __le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+       __le32 tx_bd_hsize_lflags;
+       #define TX_BD_FLAGS_TCP_UDP_CHKSUM                      (1 << 0)
+       #define TX_BD_FLAGS_IP_CKSUM                            (1 << 1)
+       #define TX_BD_FLAGS_NO_CRC                              (1 << 2)
+       #define TX_BD_FLAGS_STAMP                               (1 << 3)
+       #define TX_BD_FLAGS_T_IP_CHKSUM                         (1 << 4)
+       #define TX_BD_FLAGS_LSO                                 (1 << 5)
+       #define TX_BD_FLAGS_IPID_FMT                            (1 << 6)
+       #define TX_BD_FLAGS_T_IPID                              (1 << 7)
+       #define TX_BD_HSIZE                                     (0xff << 16)
+        #define TX_BD_HSIZE_SHIFT                               16
+
+       __le32 tx_bd_mss;
+       __le32 tx_bd_cfa_action;
+       #define TX_BD_CFA_ACTION                                (0xffff << 16)
+        #define TX_BD_CFA_ACTION_SHIFT                          16
+
+       __le32 tx_bd_cfa_meta;
+       #define TX_BD_CFA_META_MASK                             0xfffffff
+       #define TX_BD_CFA_META_VID_MASK                         0xfff
+       #define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
+        #define TX_BD_CFA_META_PRI_SHIFT                        12
+       #define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
+        #define TX_BD_CFA_META_TPID_SHIFT                       16
+       #define TX_BD_CFA_META_KEY                              (0xf << 28)
+        #define TX_BD_CFA_META_KEY_SHIFT                        28
+       #define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
+};
+
+struct rx_bd {
+       __le32 rx_bd_len_flags_type;
+       #define RX_BD_TYPE                                      (0x3f << 0)
+        #define RX_BD_TYPE_RX_PACKET_BD                         0x4
+        #define RX_BD_TYPE_RX_BUFFER_BD                         0x5
+        #define RX_BD_TYPE_RX_AGG_BD                            0x6
+        #define RX_BD_TYPE_16B_BD_SIZE                          (0 << 4)
+        #define RX_BD_TYPE_32B_BD_SIZE                          (1 << 4)
+        #define RX_BD_TYPE_48B_BD_SIZE                          (2 << 4)
+        #define RX_BD_TYPE_64B_BD_SIZE                          (3 << 4)
+       #define RX_BD_FLAGS_SOP                                 (1 << 6)
+       #define RX_BD_FLAGS_EOP                                 (1 << 7)
+       #define RX_BD_FLAGS_BUFFERS                             (3 << 8)
+        #define RX_BD_FLAGS_1_BUFFER_PACKET                     (0 << 8)
+        #define RX_BD_FLAGS_2_BUFFER_PACKET                     (1 << 8)
+        #define RX_BD_FLAGS_3_BUFFER_PACKET                     (2 << 8)
+        #define RX_BD_FLAGS_4_BUFFER_PACKET                     (3 << 8)
+       #define RX_BD_LEN                                       (0xffff << 16)
+        #define RX_BD_LEN_SHIFT                                 16
+
+       u32 rx_bd_opaque;
+       __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+       __le32 tx_cmp_flags_type;
+       #define CMP_TYPE                                        (0x3f << 0)
+        #define CMP_TYPE_TX_L2_CMP                              0
+        #define CMP_TYPE_RX_L2_CMP                              17
+        #define CMP_TYPE_RX_AGG_CMP                             18
+        #define CMP_TYPE_RX_L2_TPA_START_CMP                    19
+        #define CMP_TYPE_RX_L2_TPA_END_CMP                      21
+        #define CMP_TYPE_STATUS_CMP                             32
+        #define CMP_TYPE_REMOTE_DRIVER_REQ                      34
+        #define CMP_TYPE_REMOTE_DRIVER_RESP                     36
+        #define CMP_TYPE_ERROR_STATUS                           48
+        #define CMPL_BASE_TYPE_STAT_EJECT                       0x1aUL
+        #define CMPL_BASE_TYPE_HWRM_DONE                        0x20UL
+        #define CMPL_BASE_TYPE_HWRM_FWD_REQ                     0x22UL
+        #define CMPL_BASE_TYPE_HWRM_FWD_RESP                    0x24UL
+        #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT                 0x2eUL
+
+       #define TX_CMP_FLAGS_ERROR                              (1 << 6)
+       #define TX_CMP_FLAGS_PUSH                               (1 << 7)
+
+       u32 tx_cmp_opaque;
+       __le32 tx_cmp_errors_v;
+       #define TX_CMP_V                                        (1 << 0)
+       #define TX_CMP_ERRORS_BUFFER_ERROR                      (7 << 1)
+        #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR             0
+        #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT           2
+        #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG         4
+        #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS          5
+        #define TX_CMP_ERRORS_ZERO_LENGTH_PKT                   (1 << 4)
+        #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN                  (1 << 5)
+        #define TX_CMP_ERRORS_DMA_ERROR                         (1 << 6)
+        #define TX_CMP_ERRORS_HINT_TOO_SHORT                    (1 << 7)
+
+       __le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+       __le32 rx_cmp_len_flags_type;
+       #define RX_CMP_CMP_TYPE                                 (0x3f << 0)
+       #define RX_CMP_FLAGS_ERROR                              (1 << 6)
+       #define RX_CMP_FLAGS_PLACEMENT                          (7 << 7)
+       #define RX_CMP_FLAGS_RSS_VALID                          (1 << 10)
+       #define RX_CMP_FLAGS_UNUSED                             (1 << 11)
+        #define RX_CMP_FLAGS_ITYPES_SHIFT                       12
+        #define RX_CMP_FLAGS_ITYPES_MASK                        0xf000
+        #define RX_CMP_FLAGS_ITYPE_UNKNOWN                      (0 << 12)
+        #define RX_CMP_FLAGS_ITYPE_IP                           (1 << 12)
+        #define RX_CMP_FLAGS_ITYPE_TCP                          (2 << 12)
+        #define RX_CMP_FLAGS_ITYPE_UDP                          (3 << 12)
+        #define RX_CMP_FLAGS_ITYPE_FCOE                         (4 << 12)
+        #define RX_CMP_FLAGS_ITYPE_ROCE                         (5 << 12)
+        #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS                    (8 << 12)
+        #define RX_CMP_FLAGS_ITYPE_PTP_W_TS                     (9 << 12)
+       #define RX_CMP_LEN                                      (0xffff << 16)
+        #define RX_CMP_LEN_SHIFT                                16
+
+       u32 rx_cmp_opaque;
+       __le32 rx_cmp_misc_v1;
+       #define RX_CMP_V1                                       (1 << 0)
+       #define RX_CMP_AGG_BUFS                                 (0x1f << 1)
+        #define RX_CMP_AGG_BUFS_SHIFT                           1
+       #define RX_CMP_RSS_HASH_TYPE                            (0x7f << 9)
+        #define RX_CMP_RSS_HASH_TYPE_SHIFT                      9
+       #define RX_CMP_PAYLOAD_OFFSET                           (0xff << 16)
+        #define RX_CMP_PAYLOAD_OFFSET_SHIFT                     16
+
+       __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp)                               \
+       ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RSS_PROFILE_ID_MASK    0x1f
+
+#define RX_CMP_HASH_TYPE(rxcmp)                                        \
+       (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+         RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+struct rx_cmp_ext {
+       __le32 rx_cmp_flags2;
+       #define RX_CMP_FLAGS2_IP_CS_CALC                        0x1
+       #define RX_CMP_FLAGS2_L4_CS_CALC                        (0x1 << 1)
+       #define RX_CMP_FLAGS2_T_IP_CS_CALC                      (0x1 << 2)
+       #define RX_CMP_FLAGS2_T_L4_CS_CALC                      (0x1 << 3)
+       #define RX_CMP_FLAGS2_META_FORMAT_VLAN                  (0x1 << 4)
+       __le32 rx_cmp_meta_data;
+       #define RX_CMP_FLAGS2_METADATA_VID_MASK                 0xfff
+       #define RX_CMP_FLAGS2_METADATA_TPID_MASK                0xffff0000
+        #define RX_CMP_FLAGS2_METADATA_TPID_SFT                 16
+       __le32 rx_cmp_cfa_code_errors_v2;
+       #define RX_CMP_V                                        (1 << 0)
+       #define RX_CMPL_ERRORS_MASK                             (0x7fff << 1)
+        #define RX_CMPL_ERRORS_SFT                              1
+       #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK                (0x7 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER           (0x0 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT         (0x1 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP         (0x2 << 1)
+        #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT          (0x3 << 1)
+       #define RX_CMPL_ERRORS_IP_CS_ERROR                      (0x1 << 4)
+       #define RX_CMPL_ERRORS_L4_CS_ERROR                      (0x1 << 5)
+       #define RX_CMPL_ERRORS_T_IP_CS_ERROR                    (0x1 << 6)
+       #define RX_CMPL_ERRORS_T_L4_CS_ERROR                    (0x1 << 7)
+       #define RX_CMPL_ERRORS_CRC_ERROR                        (0x1 << 8)
+       #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK                 (0x7 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR             (0x0 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION     (0x1 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN     (0x2 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR   (0x3 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR     (0x4 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR    (0x5 << 9)
+        #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL         (0x6 << 9)
+       #define RX_CMPL_ERRORS_PKT_ERROR_MASK                   (0xf << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR               (0x0 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION         (0x1 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN         (0x2 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL             (0x3 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR         (0x4 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR        (0x5 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN         (0x6 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+        #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN         (0x8 << 12)
+
+       #define RX_CMPL_CFA_CODE_MASK                           (0xffff << 16)
+        #define RX_CMPL_CFA_CODE_SFT                            16
+
+       __le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS                                               \
+       cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS                                              \
+       (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS                                          \
+       (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1)                                                \
+           (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) &&           \
+            !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1)                                           \
+           ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &                    \
+            RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+       __le32 rx_agg_cmp_len_flags_type;
+       #define RX_AGG_CMP_TYPE                                 (0x3f << 0)
+       #define RX_AGG_CMP_LEN                                  (0xffff << 16)
+        #define RX_AGG_CMP_LEN_SHIFT                            16
+       u32 rx_agg_cmp_opaque;
+       __le32 rx_agg_cmp_v;
+       #define RX_AGG_CMP_V                                    (1 << 0)
+       __le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+       __le32 rx_tpa_start_cmp_len_flags_type;
+       #define RX_TPA_START_CMP_TYPE                           (0x3f << 0)
+       #define RX_TPA_START_CMP_FLAGS                          (0x3ff << 6)
+        #define RX_TPA_START_CMP_FLAGS_SHIFT                    6
+       #define RX_TPA_START_CMP_FLAGS_PLACEMENT                (0x7 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT          7
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO          (0x1 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS            (0x2 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO      (0x5 << 7)
+        #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS        (0x6 << 7)
+       #define RX_TPA_START_CMP_FLAGS_RSS_VALID                (0x1 << 10)
+       #define RX_TPA_START_CMP_FLAGS_ITYPES                   (0xf << 12)
+        #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT             12
+        #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP                (0x2 << 12)
+       #define RX_TPA_START_CMP_LEN                            (0xffff << 16)
+        #define RX_TPA_START_CMP_LEN_SHIFT                      16
+
+       u32 rx_tpa_start_cmp_opaque;
+       __le32 rx_tpa_start_cmp_misc_v1;
+       #define RX_TPA_START_CMP_V1                             (0x1 << 0)
+       #define RX_TPA_START_CMP_RSS_HASH_TYPE                  (0x7f << 9)
+        #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT            9
+       #define RX_TPA_START_CMP_AGG_ID                         (0x7f << 25)
+        #define RX_TPA_START_CMP_AGG_ID_SHIFT                   25
+
+       __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start)                             \
+       ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &              \
+        cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start)                              \
+       (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &      \
+          RX_TPA_START_CMP_RSS_HASH_TYPE) >>                           \
+         RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_AGG_ID(rx_tpa_start)                                 \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &       \
+        RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+       __le32 rx_tpa_start_cmp_flags2;
+       #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC              (0x1 << 0)
+       #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC              (0x1 << 1)
+       #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC            (0x1 << 2)
+       #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC            (0x1 << 3)
+       #define RX_TPA_START_CMP_FLAGS2_IP_TYPE                 (0x1 << 8)
+
+       __le32 rx_tpa_start_cmp_metadata;
+       __le32 rx_tpa_start_cmp_cfa_code_v2;
+       #define RX_TPA_START_CMP_V2                             (0x1 << 0)
+       #define RX_TPA_START_CMP_CFA_CODE                       (0xffff << 16)
+        #define RX_TPA_START_CMPL_CFA_CODE_SHIFT                16
+       __le32 rx_tpa_start_cmp_hdr_info;
+};
+
+struct rx_tpa_end_cmp {
+       __le32 rx_tpa_end_cmp_len_flags_type;
+       #define RX_TPA_END_CMP_TYPE                             (0x3f << 0)
+       #define RX_TPA_END_CMP_FLAGS                            (0x3ff << 6)
+        #define RX_TPA_END_CMP_FLAGS_SHIFT                      6
+       #define RX_TPA_END_CMP_FLAGS_PLACEMENT                  (0x7 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT            7
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO            (0x1 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS              (0x2 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO        (0x5 << 7)
+        #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS          (0x6 << 7)
+       #define RX_TPA_END_CMP_FLAGS_RSS_VALID                  (0x1 << 10)
+       #define RX_TPA_END_CMP_FLAGS_ITYPES                     (0xf << 12)
+        #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT               12
+        #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP                  (0x2 << 12)
+       #define RX_TPA_END_CMP_LEN                              (0xffff << 16)
+        #define RX_TPA_END_CMP_LEN_SHIFT                        16
+
+       u32 rx_tpa_end_cmp_opaque;
+       __le32 rx_tpa_end_cmp_misc_v1;
+       #define RX_TPA_END_CMP_V1                               (0x1 << 0)
+       #define RX_TPA_END_CMP_AGG_BUFS                         (0x3f << 1)
+        #define RX_TPA_END_CMP_AGG_BUFS_SHIFT                   1
+       #define RX_TPA_END_CMP_TPA_SEGS                         (0xff << 8)
+        #define RX_TPA_END_CMP_TPA_SEGS_SHIFT                   8
+       #define RX_TPA_END_CMP_PAYLOAD_OFFSET                   (0xff << 16)
+        #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT             16
+       #define RX_TPA_END_CMP_AGG_ID                           (0x7f << 25)
+        #define RX_TPA_END_CMP_AGG_ID_SHIFT                     25
+
+       __le32 rx_tpa_end_cmp_tsdelta;
+       #define RX_TPA_END_GRO_TS                               (0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end)                                     \
+       ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &           \
+        RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end)                                   \
+       ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &           \
+        RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO                         \
+       cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &          \
+                   RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end)                                                \
+       ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &                  \
+        RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end)                                     \
+       (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta &                      \
+           cpu_to_le32(RX_TPA_END_GRO_TS)))
+
+struct rx_tpa_end_cmp_ext {
+       __le32 rx_tpa_end_cmp_dup_acks;
+       #define RX_TPA_END_CMP_TPA_DUP_ACKS                     (0xf << 0)
+
+       __le32 rx_tpa_end_cmp_seg_len;
+       #define RX_TPA_END_CMP_TPA_SEG_LEN                      (0xffff << 0)
+
+       __le32 rx_tpa_end_cmp_errors_v2;
+       #define RX_TPA_END_CMP_V2                               (0x1 << 0)
+       #define RX_TPA_END_CMP_ERRORS                           (0x3 << 1)
+       #define RX_TPA_END_CMPL_ERRORS_SHIFT                     1
+
+       u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define TPA_END_ERRORS(rx_tpa_end_ext)                                 \
+       ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &                   \
+        cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
+#define DB_IDX_MASK                                            0xffffff
+#define DB_IDX_VALID                                           (0x1 << 26)
+#define DB_IRQ_DIS                                             (0x1 << 27)
+#define DB_KEY_TX                                              (0x0 << 28)
+#define DB_KEY_RX                                              (0x1 << 28)
+#define DB_KEY_CP                                              (0x2 << 28)
+#define DB_KEY_ST                                              (0x3 << 28)
+#define DB_KEY_TX_PUSH                                         (0x4 << 28)
+#define DB_LONG_TX_PUSH                                                (0x2 << 24)
+
+#define INVALID_HW_RING_ID     ((u16)-1)
+
+/* The hardware supports certain page sizes.  Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT        12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT        PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT        13
+#else
+#define BNXT_PAGE_SHIFT        16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
+#define BNXT_MAX_MTU           9500
+#define BNXT_MAX_PAGE_MODE_MTU \
+       ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -       \
+        XDP_PACKET_HEADROOM)
+
+#define BNXT_MIN_PKT_SIZE      52
+
+#define BNXT_DEFAULT_RX_RING_SIZE      511
+#define BNXT_DEFAULT_TX_RING_SIZE      511
+
+#define MAX_TPA                64
+
+#if (BNXT_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES   1
+#define MAX_RX_AGG_PAGES       4
+#define MAX_TX_PAGES   1
+#define MAX_CP_PAGES   8
+#else
+#define MAX_RX_PAGES   8
+#define MAX_RX_AGG_PAGES       32
+#define MAX_TX_PAGES   8
+#define MAX_CP_PAGES   64
+#endif
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT           (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT       (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT           (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x)     (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x)      ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x)     (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x)      ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x)     (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x)      ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons)                                  \
+       (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==        \
+        !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons)                                 \
+       (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+        !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons)                                \
+       (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+        !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp)                                     \
+       (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp)                                     \
+       (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx)           (((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx)       (((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx)           (((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n)    ((idx) + (n))
+#define NEXT_RAW_CMP(idx)      ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx)          ((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx)          RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define BNXT_HWRM_MAX_REQ_LEN          (bp->hwrm_max_req_len)
+#define DFLT_HWRM_CMD_TIMEOUT          500
+#define HWRM_CMD_TIMEOUT               (bp->hwrm_cmd_timeout)
+#define HWRM_RESET_TIMEOUT             ((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK                0xffff
+#define HWRM_RESP_LEN_OFFSET           4
+#define HWRM_RESP_LEN_MASK             0xffff0000
+#define HWRM_RESP_LEN_SFT              16
+#define HWRM_RESP_VALID_MASK           0xff000000
+#define HWRM_SEQ_ID_INVALID            -1
+#define BNXT_HWRM_REQ_MAX_SIZE         128
+#define BNXT_HWRM_REQS_PER_PAGE                (BNXT_PAGE_SIZE /       \
+                                        BNXT_HWRM_REQ_MAX_SIZE)
+#define BNXT_RX_EVENT  1
+#define BNXT_AGG_EVENT 2
+#define BNXT_TX_EVENT  4
+
+struct bnxt_sw_tx_bd {
+       struct sk_buff          *skb;
+       DEFINE_DMA_UNMAP_ADDR(mapping);
+       u8                      is_gso;
+       u8                      is_push;
+       union {
+               unsigned short          nr_frags;
+               u16                     rx_prod;
+       };
+};
+
+struct bnxt_sw_rx_bd {
+       void                    *data;
+       u8                      *data_ptr;
+       dma_addr_t              mapping;
+};
+
+struct bnxt_sw_rx_agg_bd {
+       struct page             *page;
+       unsigned int            offset;
+       dma_addr_t              mapping;
+};
+
+struct bnxt_ring_struct {
+       int                     nr_pages;
+       int                     page_size;
+       void                    **pg_arr;
+       dma_addr_t              *dma_arr;
+
+       __le64                  *pg_tbl;
+       dma_addr_t              pg_tbl_map;
+
+       int                     vmem_size;
+       void                    **vmem;
+
+       u16                     fw_ring_id; /* Ring id filled by Chimp FW */
+       u8                      queue_id;
+};
+
+struct tx_push_bd {
+       __le32                  doorbell;
+       __le32                  tx_bd_len_flags_type;
+       u32                     tx_bd_opaque;
+       struct tx_bd_ext        txbd2;
+};
+
+struct tx_push_buffer {
+       struct tx_push_bd       push_bd;
+       u32                     data[25];
+};
+
+struct bnxt_tx_ring_info {
+       struct bnxt_napi        *bnapi;
+       u16                     tx_prod;
+       u16                     tx_cons;
+       u16                     txq_index;
+       void __iomem            *tx_doorbell;
+
+       struct tx_bd            *tx_desc_ring[MAX_TX_PAGES];
+       struct bnxt_sw_tx_bd    *tx_buf_ring;
+
+       dma_addr_t              tx_desc_mapping[MAX_TX_PAGES];
+
+       struct tx_push_buffer   *tx_push;
+       dma_addr_t              tx_push_mapping;
+       __le64                  data_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+       u32                     dev_state;
+
+       struct bnxt_ring_struct tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+       void                    *data;
+       u8                      *data_ptr;
+       dma_addr_t              mapping;
+       u16                     len;
+       unsigned short          gso_type;
+       u32                     flags2;
+       u32                     metadata;
+       enum pkt_hash_types     hash_type;
+       u32                     rss_hash;
+       u32                     hdr_info;
+
+#define BNXT_TPA_L4_SIZE(hdr_info)     \
+       (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info)        \
+       (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info)        \
+       (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info)        \
+       ((hdr_info) & 0x1ff)
+};
+
+struct bnxt_rx_ring_info {
+       struct bnxt_napi        *bnapi;
+       u16                     rx_prod;
+       u16                     rx_agg_prod;
+       u16                     rx_sw_agg_prod;
+       u16                     rx_next_cons;
+       void __iomem            *rx_doorbell;
+       void __iomem            *rx_agg_doorbell;
+
+#ifdef HAVE_NDO_XDP
+       struct bpf_prog         *xdp_prog;
+#endif
+
+       struct rx_bd            *rx_desc_ring[MAX_RX_PAGES];
+       struct bnxt_sw_rx_bd    *rx_buf_ring;
+
+       struct rx_bd            *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+       struct bnxt_sw_rx_agg_bd        *rx_agg_ring;
+
+       unsigned long           *rx_agg_bmap;
+       u16                     rx_agg_bmap_size;
+
+       struct page             *rx_page;
+       unsigned int            rx_page_offset;
+
+       dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
+       dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+       struct bnxt_tpa_info    *rx_tpa;
+
+       struct bnxt_ring_struct rx_ring_struct;
+       struct bnxt_ring_struct rx_agg_ring_struct;
+};
+
+struct bnxt_sw_stats {
+       u64                     rx_l4_csum_errors;
+       u64                     rx_resets;
+};
+
+struct bnxt_cp_ring_info {
+       u32                     cp_raw_cons;
+       void __iomem            *cp_doorbell;
+
+       struct tx_cmp           *cp_desc_ring[MAX_CP_PAGES];
+
+       dma_addr_t              cp_desc_mapping[MAX_CP_PAGES];
+
+       struct ctx_hw_stats     *hw_stats;
+       dma_addr_t              hw_stats_map;
+       u32                     hw_stats_ctx_id;
+
+       struct bnxt_sw_stats    sw_stats;
+
+       struct bnxt_ring_struct cp_ring_struct;
+};
+
+struct bnxt_napi {
+       struct napi_struct      napi;
+       struct bnxt             *bp;
+
+       int                     index;
+       struct bnxt_cp_ring_info        cp_ring;
+       struct bnxt_rx_ring_info        *rx_ring;
+       struct bnxt_tx_ring_info        *tx_ring;
+
+       void                    (*tx_int)(struct bnxt *, struct bnxt_napi *,
+                                         int);
+       u32                     flags;
+#define BNXT_NAPI_FLAG_XDP     0x1
+
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+       atomic_t                poll_state;
+#endif
+       bool                    in_reset;
+};
+
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+       BNXT_STATE_IDLE = 0,
+       BNXT_STATE_NAPI,
+       BNXT_STATE_POLL,
+       BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+       irq_handler_t   handler;
+       unsigned int    vector;
+       u8              requested;
+       char            name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX     0x1
+#define HWRM_RING_ALLOC_RX     0x2
+#define HWRM_RING_ALLOC_AGG    0x4
+#define HWRM_RING_ALLOC_CMPL   0x8
+
+#define INVALID_STATS_CTX_ID   -1
+
+struct bnxt_ring_grp_info {
+       u16     fw_stats_ctx;
+       u16     fw_grp_id;
+       u16     rx_fw_ring_id;
+       u16     agg_fw_ring_id;
+       u16     cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+       u16             fw_vnic_id; /* returned by Chimp during alloc */
+#define BNXT_MAX_CTX_PER_VNIC  2
+       u16             fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
+       u16             fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS      4
+       __le64          fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+                               /* index 0 always dev_addr */
+       u16             uc_filter_count;
+       u8              *uc_list;
+
+       u16             *fw_grp_ids;
+       dma_addr_t      rss_table_dma_addr;
+       __le16          *rss_table;
+       dma_addr_t      rss_hash_key_dma_addr;
+       u64             *rss_hash_key;
+       u32             rx_mask;
+
+       u8              *mc_list;
+       int             mc_list_size;
+       int             mc_list_count;
+       dma_addr_t      mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS      16
+
+       u32             flags;
+#define BNXT_VNIC_RSS_FLAG     1
+#define BNXT_VNIC_RFS_FLAG     2
+#define BNXT_VNIC_MCAST_FLAG   4
+#define BNXT_VNIC_UCAST_FLAG   8
+#define BNXT_VNIC_RFS_NEW_RSS_FLAG     0x10
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+       u16     fw_fid;
+       u8      mac_addr[ETH_ALEN];
+       u16     max_rsscos_ctxs;
+       u16     max_cp_rings;
+       u16     max_tx_rings;
+       u16     max_rx_rings;
+       u16     max_hw_ring_grps;
+       u16     max_l2_ctxs;
+       u16     max_irqs;
+       u16     max_vnics;
+       u16     max_stat_ctxs;
+       u16     vlan;
+       u32     flags;
+#define BNXT_VF_QOS            0x1
+#define BNXT_VF_SPOOFCHK       0x2
+#define BNXT_VF_LINK_FORCED    0x4
+#define BNXT_VF_LINK_UP                0x8
+       u32     func_flags; /* func cfg flags */
+       u32     min_tx_rate;
+       u32     max_tx_rate;
+       void    *hwrm_cmd_req_addr;
+       dma_addr_t      hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID      1
+#define BNXT_FIRST_VF_FID      128
+       u16     fw_fid;
+       u16     port_id;
+       u8      mac_addr[ETH_ALEN];
+       u16     max_rsscos_ctxs;
+       u16     max_cp_rings;
+       u16     max_tx_rings; /* HW assigned max tx rings for this PF */
+       u16     max_rx_rings; /* HW assigned max rx rings for this PF */
+       u16     max_hw_ring_grps;
+       u16     max_irqs;
+       u16     max_l2_ctxs;
+       u16     max_vnics;
+       u16     max_stat_ctxs;
+       u32     first_vf_id;
+       u16     active_vfs;
+       u16     max_vfs;
+       u32     max_encap_records;
+       u32     max_decap_records;
+       u32     max_tx_em_flows;
+       u32     max_tx_wm_flows;
+       u32     max_rx_em_flows;
+       u32     max_rx_wm_flows;
+       unsigned long   *vf_event_bmap;
+       u16     hwrm_cmd_req_pages;
+       void                    *hwrm_cmd_req_addr[4];
+       dma_addr_t              hwrm_cmd_req_dma_addr[4];
+       struct bnxt_vf_info     *vf;
+};
+
+#ifdef CONFIG_RFS_ACCEL
+struct bnxt_ntuple_filter {
+       struct hlist_node       hash;
+       u8                      dst_mac_addr[ETH_ALEN];
+       u8                      src_mac_addr[ETH_ALEN];
+       struct flow_keys        fkeys;
+       __le64                  filter_id;
+       u16                     sw_id;
+       u8                      l2_fltr_idx;
+       u16                     rxq;
+       u32                     flow_id;
+       unsigned long           state;
+#define BNXT_FLTR_VALID                0
+#define BNXT_FLTR_UPDATE       1
+};
+#endif
+
+struct bnxt_link_info {
+       u8                      phy_type;
+       u8                      media_type;
+       u8                      transceiver;
+       u8                      phy_addr;
+       u8                      phy_link_status;
+#define BNXT_LINK_NO_LINK      PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL       PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK         PORT_PHY_QCFG_RESP_LINK_LINK
+       u8                      wire_speed;
+       u8                      loop_back;
+       u8                      link_up;
+       u8                      duplex;
+#define BNXT_LINK_DUPLEX_HALF  PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
+#define BNXT_LINK_DUPLEX_FULL  PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+       u8                      pause;
+#define BNXT_LINK_PAUSE_TX     PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX     PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH   (PORT_PHY_QCFG_RESP_PAUSE_RX | \
+                                PORT_PHY_QCFG_RESP_PAUSE_TX)
+       u8                      lp_pause;
+       u8                      auto_pause_setting;
+       u8                      force_pause_setting;
+       u8                      duplex_setting;
+       u8                      auto_mode;
+#define BNXT_AUTO_MODE(mode)   ((mode) > BNXT_LINK_AUTO_NONE && \
+                                (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD  PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK     PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+#define PHY_VER_LEN            3
+       u8                      phy_ver[PHY_VER_LEN];
+       u16                     link_speed;
+#define BNXT_LINK_SPEED_100MB  PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB    PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB    PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB  PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB   PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB   PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB   PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB   PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB   PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+       u16                     support_speeds;
+       u16                     auto_link_speeds;       /* fw adv setting */
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+       u16                     support_auto_speeds;
+       u16                     lp_auto_link_speeds;
+       u16                     force_link_speed;
+       u32                     preemphasis;
+       u8                      module_status;
+       u16                     fec_cfg;
+#define BNXT_FEC_AUTONEG       PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R    PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
+#define BNXT_FEC_ENC_RS                PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
+
+       /* copy of requested setting from ethtool cmd */
+       u8                      autoneg;
+#define BNXT_AUTONEG_SPEED             1
+#define BNXT_AUTONEG_FLOW_CTRL         2
+       u8                      req_duplex;
+       u8                      req_flow_ctrl;
+       u16                     req_link_speed;
+       u16                     advertising;    /* user adv setting */
+       bool                    force_link_chng;
+
+       /* a copy of phy_qcfg output used to report link
+        * info to VF
+        */
+       struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE 8
+
+struct bnxt_queue_info {
+       u8      queue_id;
+       u8      queue_profile;
+};
+
+#define BNXT_MAX_LED                   4
+
+struct bnxt_led_info {
+       u8      led_id;
+       u8      led_type;
+       u8      led_group_id;
+       u8      unused;
+       __le16  led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x)      ((x) &  \
+       cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
+
+       __le16  led_color_caps;
+};
+
+#define BNXT_MAX_TEST  8
+
+struct bnxt_test_info {
+       u8 offline_mask;
+       u16 timeout;
+       char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
+};
+
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_GRCPF_REG_SYNC_TIME       0x480
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ   0x488
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK   0xffffffUL
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_SFT   0
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_MSK   0x1f000000UL
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT   24
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_MSK  0x20000000UL
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT  29
+#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
+#define BNXT_CAG_REG_BASE              0x300000
+
+struct bnxt {
+       void __iomem            *bar0;
+       void __iomem            *bar1;
+       void __iomem            *bar2;
+
+       u32                     reg_base;
+       u16                     chip_num;
+#define CHIP_NUM_57301         0x16c8
+#define CHIP_NUM_57302         0x16c9
+#define CHIP_NUM_57304         0x16ca
+#define CHIP_NUM_58700         0x16cd
+#define CHIP_NUM_57402         0x16d0
+#define CHIP_NUM_57404         0x16d1
+#define CHIP_NUM_57406         0x16d2
+
+#define CHIP_NUM_57311         0x16ce
+#define CHIP_NUM_57312         0x16cf
+#define CHIP_NUM_57314         0x16df
+#define CHIP_NUM_57412         0x16d6
+#define CHIP_NUM_57414         0x16d7
+#define CHIP_NUM_57416         0x16d8
+#define CHIP_NUM_57417         0x16d9
+
+#define BNXT_CHIP_NUM_5730X(chip_num)          \
+       ((chip_num) >= CHIP_NUM_57301 &&        \
+        (chip_num) <= CHIP_NUM_57304)
+
+#define BNXT_CHIP_NUM_5740X(chip_num)          \
+       ((chip_num) >= CHIP_NUM_57402 &&        \
+        (chip_num) <= CHIP_NUM_57406)
+
+#define BNXT_CHIP_NUM_5731X(chip_num)          \
+       ((chip_num) == CHIP_NUM_57311 ||        \
+        (chip_num) == CHIP_NUM_57312 ||        \
+        (chip_num) == CHIP_NUM_57314)
+
+#define BNXT_CHIP_NUM_5741X(chip_num)          \
+       ((chip_num) >= CHIP_NUM_57412 &&        \
+        (chip_num) <= CHIP_NUM_57417)
+
+#define BNXT_CHIP_NUM_57X0X(chip_num)          \
+       (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
+
+#define BNXT_CHIP_NUM_57X1X(chip_num)          \
+       (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
+
+       struct net_device       *dev;
+       struct pci_dev          *pdev;
+
+       atomic_t                intr_sem;
+
+       u32                     flags;
+       #define BNXT_FLAG_DCB_ENABLED   0x1
+       #define BNXT_FLAG_VF            0x2
+       #define BNXT_FLAG_LRO           0x4
+#ifdef CONFIG_INET
+       #define BNXT_FLAG_GRO           0x8
+#else
+       /* Cannot support hardware GRO if CONFIG_INET is not set */
+       #define BNXT_FLAG_GRO           0x0
+#endif
+       #define BNXT_FLAG_TPA           (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+       #define BNXT_FLAG_JUMBO         0x10
+       #define BNXT_FLAG_STRIP_VLAN    0x20
+       #define BNXT_FLAG_AGG_RINGS     (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+                                        BNXT_FLAG_LRO)
+       #define BNXT_FLAG_USING_MSIX    0x40
+       #define BNXT_FLAG_MSIX_CAP      0x80
+       #define BNXT_FLAG_RFS           0x100
+       #define BNXT_FLAG_SHARED_RINGS  0x200
+       #define BNXT_FLAG_PORT_STATS    0x400
+       #define BNXT_FLAG_UDP_RSS_CAP   0x800
+       #define BNXT_FLAG_EEE_CAP       0x1000
+       #define BNXT_FLAG_NEW_RSS_CAP   0x2000
+       #define BNXT_FLAG_WOL_CAP       0x4000
+       #define BNXT_FLAG_ROCEV1_CAP    0x8000
+       #define BNXT_FLAG_ROCEV2_CAP    0x10000
+       #define BNXT_FLAG_ROCE_CAP      (BNXT_FLAG_ROCEV1_CAP | \
+                                        BNXT_FLAG_ROCEV2_CAP)
+       #define BNXT_FLAG_NO_AGG_RINGS  0x20000
+       #define BNXT_FLAG_RX_PAGE_MODE  0x40000
+       #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
+       #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
+
+       #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |             \
+                                           BNXT_FLAG_RFS |             \
+                                           BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp)            (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp)            ((bp)->flags & BNXT_FLAG_VF)
+#define BNXT_NPAR(bp)          ((bp)->port_partition_type)
+#define BNXT_SINGLE_PF(bp)     (BNXT_PF(bp) && !BNXT_NPAR(bp))
+#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+#define BNXT_RX_PAGE_MODE(bp)  ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+
+       struct bnxt_en_dev      *edev;
+       struct bnxt_en_dev *    (*ulp_probe)(struct net_device *);
+
+       struct bnxt_napi        **bnapi;
+
+#ifdef OLD_VLAN
+       struct vlan_group       *vlgrp;
+#endif
+
+       struct bnxt_rx_ring_info        *rx_ring;
+       struct bnxt_tx_ring_info        *tx_ring;
+       u16                     *tx_ring_map;
+
+       struct sk_buff *        (*gro_func)(struct bnxt_tpa_info *, int, int,
+                                           struct sk_buff *);
+
+       struct sk_buff *        (*rx_skb_func)(struct bnxt *,
+                                              struct bnxt_rx_ring_info *,
+                                              u16, void *, u8 *, dma_addr_t,
+                                              unsigned int);
+
+       u32                     rx_buf_size;
+       u32                     rx_buf_use_size;        /* useable size */
+       u16                     rx_offset;
+       u16                     rx_dma_offset;
+       enum dma_data_direction rx_dir;
+       u32                     rx_ring_size;
+       u32                     rx_agg_ring_size;
+       u32                     rx_copy_thresh;
+       u32                     rx_ring_mask;
+       u32                     rx_agg_ring_mask;
+       int                     rx_nr_pages;
+       int                     rx_agg_nr_pages;
+       int                     rx_nr_rings;
+       int                     rsscos_nr_ctxs;
+
+       u32                     tx_ring_size;
+       u32                     tx_ring_mask;
+       int                     tx_nr_pages;
+       int                     tx_nr_rings;
+       int                     tx_nr_rings_per_tc;
+       int                     tx_nr_rings_xdp;
+
+       int                     tx_wake_thresh;
+       int                     tx_push_thresh;
+       int                     tx_push_size;
+
+       u32                     cp_ring_size;
+       u32                     cp_ring_mask;
+       u32                     cp_bit;
+       int                     cp_nr_pages;
+       int                     cp_nr_rings;
+
+       int                     num_stat_ctxs;
+
+       /* grp_info indexed by completion ring index */
+       struct bnxt_ring_grp_info       *grp_info;
+       struct bnxt_vnic_info   *vnic_info;
+       int                     nr_vnics;
+       u32                     rss_hash_cfg;
+
+       u8                      max_tc;
+       u8                      max_lltc;       /* lossless TCs */
+       struct bnxt_queue_info  q_info[BNXT_MAX_QUEUE];
+
+       unsigned int            current_interval;
+#define BNXT_TIMER_INTERVAL    HZ
+
+       struct timer_list       timer;
+
+       unsigned long           state;
+#define BNXT_STATE_OPEN                0
+#define BNXT_STATE_IN_SP_TASK  1
+#define BNXT_STATE_READ_STATS  2
+
+       struct bnxt_irq *irq_tbl;
+       int                     total_irqs;
+       u8                      mac_addr[ETH_ALEN];
+
+#ifdef CONFIG_BNXT_DCB
+       struct ieee_pfc         *ieee_pfc;
+       struct ieee_ets         *ieee_ets;
+       u8                      dcbx_cap;
+       u8                      default_pri;
+#endif /* CONFIG_BNXT_DCB */
+
+       u32                     msg_enable;
+
+       u32                     hwrm_spec_code;
+       u16                     hwrm_cmd_seq;
+       u32                     hwrm_intr_seq_id;
+       void                    *hwrm_cmd_resp_addr;
+       dma_addr_t              hwrm_cmd_resp_dma_addr;
+       void                    *hwrm_dbg_resp_addr;
+       dma_addr_t              hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE  128
+
+       struct rx_port_stats    *hw_rx_port_stats;
+       struct tx_port_stats    *hw_tx_port_stats;
+       dma_addr_t              hw_rx_port_stats_map;
+       dma_addr_t              hw_tx_port_stats_map;
+       int                     hw_port_stats_size;
+
+       u16                     hwrm_max_req_len;
+       int                     hwrm_cmd_timeout;
+       struct mutex            hwrm_cmd_lock;  /* serialize hwrm messages */
+       struct hwrm_ver_get_output      ver_resp;
+#define FW_VER_STR_LEN         32
+#define BC_HWRM_STR_LEN                21
+#define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+       char                    fw_ver_str[FW_VER_STR_LEN];
+       __be16                  vxlan_port;
+       u8                      vxlan_port_cnt;
+       __le16                  vxlan_fw_dst_port_id;
+       __be16                  nge_port;
+       u8                      nge_port_cnt;
+       __le16                  nge_fw_dst_port_id;
+       u8                      port_partition_type;
+       u16                     br_mode;
+
+       u16                     rx_coal_ticks;
+       u16                     rx_coal_ticks_irq;
+       u16                     rx_coal_bufs;
+       u16                     rx_coal_bufs_irq;
+       u16                     tx_coal_ticks;
+       u16                     tx_coal_ticks_irq;
+       u16                     tx_coal_bufs;
+       u16                     tx_coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x)     ((x) * 25 / 2)
+
+       u32                     stats_coal_ticks;
+#define BNXT_DEF_STATS_COAL_TICKS       1000000
+#define BNXT_MIN_STATS_COAL_TICKS        250000
+#define BNXT_MAX_STATS_COAL_TICKS       1000000
+
+       struct work_struct      sp_task;
+       unsigned long           sp_event;
+#define BNXT_RX_MASK_SP_EVENT          0
+#define BNXT_RX_NTP_FLTR_SP_EVENT      1
+#define BNXT_LINK_CHNG_SP_EVENT                2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT        3
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT   4
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT   5
+#define BNXT_RESET_TASK_SP_EVENT       6
+#define BNXT_RST_RING_SP_EVENT         7
+#define BNXT_HWRM_PF_UNLOAD_SP_EVENT   8
+#define BNXT_PERIODIC_STATS_SP_EVENT   9
+#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
+#define BNXT_RESET_TASK_SILENT_SP_EVENT        11
+#define BNXT_GENEVE_ADD_PORT_SP_EVENT  12
+#define BNXT_GENEVE_DEL_PORT_SP_EVENT  13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT  14
+
+       struct bnxt_pf_info     pf;
+#ifdef CONFIG_BNXT_SRIOV
+       int                     nr_vfs;
+       struct bnxt_vf_info     vf;
+       wait_queue_head_t       sriov_cfg_wait;
+       bool                    sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO        msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_NTP_FLTR_HASH_SIZE        512
+#define BNXT_NTP_FLTR_HASH_MASK        (BNXT_NTP_FLTR_HASH_SIZE - 1)
+       struct hlist_head       ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+       spinlock_t              ntp_fltr_lock;  /* for hash table add, del */
+
+       unsigned long           *ntp_fltr_bmap;
+       int                     ntp_fltr_count;
+
+       struct bnxt_link_info   link_info;
+       struct ethtool_eee      eee;
+       u32                     lpi_tmr_lo;
+       u32                     lpi_tmr_hi;
+
+       u8                      num_tests;
+       struct bnxt_test_info   *test_info;
+
+       u8                      wol_filter_id;
+       u8                      wol;
+
+       u8                      num_leds;
+       struct bnxt_led_info    leds[BNXT_MAX_LED];
+
+#ifdef HAVE_NDO_XDP
+       struct bpf_prog         *xdp_prog;
+#endif
+
+       struct bnxt_ptp_cfg     *ptp_cfg;
+
+#ifndef PCIE_SRIOV_CONFIGURE
+       int                     req_vfs;
+       struct work_struct      iov_task;
+#endif
+};
+
+#define BNXT_RX_STATS_OFFSET(counter)                  \
+       (offsetof(struct rx_port_stats, counter) / 8)
+
+#define BNXT_TX_STATS_OFFSET(counter)                  \
+       ((offsetof(struct tx_port_stats, counter) +     \
+         sizeof(struct rx_port_stats) + 512) / 8)
+
+#ifdef BNXT_PRIV_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+       int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+                               BNXT_STATE_NAPI);
+
+       return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+       int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+                               BNXT_STATE_POLL);
+
+       return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+       return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+       int old;
+
+       while (1) {
+               old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+                                    BNXT_STATE_DISABLE);
+               if (old == BNXT_STATE_IDLE)
+                       break;
+               usleep_range(500, 5000);
+       }
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+       return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+       return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+       return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+#define I2C_DEV_ADDR_A0                                0xa0
+#define I2C_DEV_ADDR_A2                                0xa2
+#define SFP_EEPROM_SFF_8472_COMP_ADDR          0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE          1
+#define SFF_MODULE_ID_SFP                      0x3
+#define SFF_MODULE_ID_QSFP                     0xc
+#define SFF_MODULE_ID_QSFP_PLUS                        0xd
+#define SFF_MODULE_ID_QSFP28                   0x11
+#define BNXT_MAX_PHY_I2C_RESP_SIZE             64
+
+#define BDETBD_REG_BD_PRODUCER_IDX                     0x90000UL
+#define BDETBD_REG_BD_REQ_CONSUMER_IDX                 0x91000UL
+#define BDETBD_REG_BD_CMPL_CONSUMER_IDX                        0x92000UL
+#define BDERBD_REG_BD_PRODUCER_IDX                    0x410000UL
+#define BDERBD_REG_BD_REQ_CONSUMER_IDX                0x411000UL
+#define BDERBD_REG_BD_CMPL_CONSUMER_IDX                       0x412000UL
+#define CAG_REG_CAG_PRODUCER_INDEX_REG                0x302000UL
+#define CAG_REG_CAG_CONSUMER_INDEX_REG                0x303000UL
+#define CAG_REG_CAG_VECTOR_CTRL                               0x301000UL
+#define TDC_REG_INT_STS_0                             0x180020UL
+#define TDC_REG_TDC_DEBUG_CNTL                        0x180014UL
+#define TDC_REG_TDC_DEBUG_STATUS                      0x180018UL
+#define TDI_REG_DBG_DWORD_ENABLE                      0x100104UL
+#define TDI_REG_DBG_OUT_DATA                          0x100120UL
+#define TDI_REG_DBG_SELECT                            0x100100UL
+#define TE_DEC_REG_PORT_CURRENT_CREDIT_REG           0x2401300UL
+#define RDI_REG_RDI_DEBUG_CONTROL_REG                 0x27001cUL
+#define RDI_REG_RDI_DEBUG_STATUS_REG                  0x270020UL
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+       /* Tell compiler to fetch tx indices from memory. */
+       barrier();
+
+       return bp->tx_ring_size -
+               ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+extern const u16 bnxt_lhint_arr[];
+
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                      u16 prod, gfp_t gfp);
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+void bnxt_set_tpa_flags(struct bnxt *bp);
+void bnxt_set_ring_params(struct bnxt *);
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
+                                    int bmap_size);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_func_qcaps(struct bnxt *);
+unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
+void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
+void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
+void bnxt_tx_disable(struct bnxt *);
+void bnxt_tx_enable(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *);
+int bnxt_hwrm_free_wol_fltr(struct bnxt *);
+int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_half_open_nic(struct bnxt *bp);
+void bnxt_half_close_nic(struct bnxt *bp);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
+                      int tx_xdp);
+#if defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB)
+int bnxt_setup_mq_tc(struct net_device *, u8);
+#endif
+int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
+void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+#endif
diff --git a/ubuntu/bnxt/bnxt_compat.h b/ubuntu/bnxt/bnxt_compat.h
new file mode 100644 (file)
index 0000000..a00dccf
--- /dev/null
@@ -0,0 +1,776 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#if !defined(NEW_FLOW_KEYS) && defined(HAVE_FLOW_KEYS)
+#include <net/flow_keys.h>
+#endif
+#include <linux/sched.h>
+
+#ifndef SPEED_20000
+#define SPEED_20000            20000
+#endif
+
+#ifndef SPEED_25000
+#define SPEED_25000            25000
+#endif
+
+#ifndef SPEED_40000
+#define SPEED_40000            40000
+#endif
+
+#ifndef SPEED_50000
+#define SPEED_50000            50000
+#endif
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN          -1
+#endif
+
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN         0xff
+#endif
+
+#ifndef PORT_DA
+#define PORT_DA                        0x05
+#endif
+
+#ifndef PORT_NONE
+#define PORT_NONE              0xef
+#endif
+
+#if !defined(SUPPORTED_40000baseCR4_Full)
+#define SUPPORTED_40000baseCR4_Full    (1 << 24)
+
+#define ADVERTISED_40000baseCR4_Full   (1 << 24)
+#endif
+
+#if !defined(IPV4_FLOW)
+#define IPV4_FLOW      0x10
+#endif
+
+#if !defined(IPV6_FLOW)
+#define IPV6_FLOW      0x11
+#endif
+
+#if defined(HAVE_ETH_GET_HEADLEN) || (LINUX_VERSION_CODE > 0x040900)
+#define BNXT_RX_PAGE_MODE_SUPPORT      1
+#endif
+
+#if !defined(ETH_P_8021AD)
+#define ETH_P_8021AD           0x88A8
+#endif
+
+#if !defined(ETH_P_ROCE)
+#define ETH_P_ROCE             0x8915
+#endif
+
+#if !defined(ROCE_V2_UDP_PORT)
+#define ROCE_V2_UDP_DPORT      4791
+#endif
+
+#ifndef NETIF_F_GSO_UDP_TUNNEL
+#define NETIF_F_GSO_UDP_TUNNEL 0
+#endif
+
+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM
+#define NETIF_F_GSO_UDP_TUNNEL_CSUM    0
+#endif
+
+#ifndef NETIF_F_GSO_GRE
+#define NETIF_F_GSO_GRE                0
+#endif
+
+#ifndef NETIF_F_GSO_GRE_CSUM
+#define NETIF_F_GSO_GRE_CSUM   0
+#endif
+
+#ifndef NETIF_F_GSO_IPIP
+#define NETIF_F_GSO_IPIP       0
+#endif
+
+#ifndef NETIF_F_GSO_SIT
+#define NETIF_F_GSO_SIT                0
+#endif
+
+#ifndef NETIF_F_GSO_IPXIP4
+#define NETIF_F_GSO_IPXIP4     (NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT)
+#endif
+
+#ifndef NETIF_F_GSO_PARTIAL
+#define NETIF_F_GSO_PARTIAL    0
+#else
+#define HAVE_GSO_PARTIAL_FEATURES      1
+#endif
+
+/* Tie rx checksum offload to tx checksum offload for older kernels. */
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM         NETIF_F_IP_CSUM
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE         0
+#endif
+
+#ifndef NETIF_F_RXHASH
+#define NETIF_F_RXHASH         0
+#else
+#define HAVE_NETIF_F_RXHASH
+#endif
+
+#ifndef HAVE_SKB_GSO_UDP_TUNNEL_CSUM
+#ifndef HAVE_SKB_GSO_UDP_TUNNEL
+#define SKB_GSO_UDP_TUNNEL 0
+#endif
+#define SKB_GSO_UDP_TUNNEL_CSUM SKB_GSO_UDP_TUNNEL
+#endif
+
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB                0
+#endif
+
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA       1
+#endif
+
+#ifndef BRIDGE_MODE_UNDEF
+#define BRIDGE_MODE_UNDEF      0xffff
+#endif
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(mapping) DECLARE_PCI_UNMAP_ADDR(mapping)
+#endif
+
+#ifndef dma_unmap_addr_set
+#define dma_unmap_addr_set pci_unmap_addr_set
+#endif
+
+#ifndef dma_unmap_addr
+#define dma_unmap_addr pci_unmap_addr
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) 0
+#endif
+
+#if defined(RHEL_RELEASE_CODE) && (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,3))
+#if defined(CONFIG_X86_64) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
+#endif
+#endif
+
+#ifdef HAVE_NDO_SET_VF_VLAN_RH73
+#define ndo_set_vf_vlan ndo_set_vf_vlan_rh73
+#endif
+
+#ifndef ETHTOOL_GEEE
+struct ethtool_eee {
+       __u32   cmd;
+       __u32   supported;
+       __u32   advertised;
+       __u32   lp_advertised;
+       __u32   eee_active;
+       __u32   eee_enabled;
+       __u32   tx_lpi_enabled;
+       __u32   tx_lpi_timer;
+       __u32   reserved[2];
+};
+#endif
+
+#ifndef HAVE_SKB_FRAG_PAGE
+static inline struct page *skb_frag_page(const skb_frag_t *frag)
+{
+       return frag->page;
+}
+
+static inline void *skb_frag_address_safe(const skb_frag_t *frag)
+{
+       void *ptr = page_address(skb_frag_page(frag));
+       if (unlikely(!ptr))
+               return NULL;
+
+       return ptr + frag->page_offset;
+}
+
+static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
+{
+       frag->page = page;
+}
+
+#define skb_frag_dma_map(x, frag, y, len, z) \
+       pci_map_page(bp->pdev, (frag)->page, \
+                    (frag)->page_offset, (len), PCI_DMA_TODEVICE)
+#endif
+
+#ifndef HAVE_PCI_VFS_ASSIGNED
+static inline int pci_vfs_assigned(struct pci_dev *dev)
+{
+       return 0;
+}
+#endif
+
+#ifndef HAVE_PCI_NUM_VF
+#include <../drivers/pci/pci.h>
+
+static inline int pci_num_vf(struct pci_dev *dev)
+{
+       if (!dev->is_physfn)
+               return 0;
+
+       return dev->sriov->nr_virtfn;
+}
+#endif
+
+#ifndef SKB_ALLOC_NAPI
+static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
+                                            unsigned int length)
+{
+       struct sk_buff *skb;
+
+       length += NET_SKB_PAD + NET_IP_ALIGN;
+       skb = netdev_alloc_skb(napi->dev, length);
+
+       if (likely(skb))
+               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+       return skb;
+}
+#endif
+
+#ifndef HAVE_SKB_HASH_TYPE
+
+enum pkt_hash_types {
+       PKT_HASH_TYPE_NONE,     /* Undefined type */
+       PKT_HASH_TYPE_L2,       /* Input: src_MAC, dest_MAC */
+       PKT_HASH_TYPE_L3,       /* Input: src_IP, dst_IP */
+       PKT_HASH_TYPE_L4,       /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+#ifdef HAVE_NETIF_F_RXHASH
+       skb->rxhash = hash;
+#endif
+}
+
+#endif
+
+#define GET_NET_STATS(x) (unsigned long)le64_to_cpu(x)
+
+#if !defined(NETDEV_RX_FLOW_STEER) || !defined(HAVE_FLOW_KEYS) || (LINUX_VERSION_CODE < 0x030300) || defined(NO_NETDEV_CPU_RMAP)
+#undef CONFIG_RFS_ACCEL
+#endif
+
+#if !defined(IEEE_8021QAZ_APP_SEL_DGRAM) || !defined(CONFIG_DCB)
+#undef CONFIG_BNXT_DCB
+#endif
+
+#ifdef NETDEV_UDP_TUNNEL_PUSH_INFO
+#define HAVE_NDO_UDP_TUNNEL    1
+#endif
+
+#ifdef HAVE_NDO_XDP
+#define CONFIG_BNXT_XDP        1
+#endif
+
+#ifndef NETDEV_HW_FEATURES
+#define hw_features features
+#endif
+
+#ifndef HAVE_NETDEV_FEATURES_T
+#ifdef HAVE_NDO_FIX_FEATURES
+typedef u32 netdev_features_t;
+#else
+typedef unsigned long netdev_features_t;
+#endif
+#endif
+
+#if !defined(IFF_UNICAST_FLT)
+#define IFF_UNICAST_FLT 0
+#endif
+
+#ifndef HAVE_NEW_BUILD_SKB
+#define build_skb(data, frag) build_skb(data)
+#endif
+
+#ifndef __rcu
+#define __rcu
+#endif
+
+#ifndef rcu_dereference_protected
+#define rcu_dereference_protected(p, c)        \
+       rcu_dereference((p))
+#endif
+
+#ifndef rcu_access_pointer
+#define rcu_access_pointer rcu_dereference
+#endif
+
+#ifndef rtnl_dereference
+#define rtnl_dereference(p)            \
+       rcu_dereference_protected(p, lockdep_rtnl_is_held())
+#endif
+
+#ifndef RCU_INIT_POINTER
+#define RCU_INIT_POINTER(p, v) \
+       p = (typeof(*v) __force __rcu *)(v)
+#endif
+
+#ifdef HAVE_OLD_HLIST
+#define __hlist_for_each_entry_rcu(f, n, h, m) \
+       hlist_for_each_entry_rcu(f, n, h, m)
+#define __hlist_for_each_entry_safe(f, n, t, h, m) \
+       hlist_for_each_entry_safe(f, n, t, h, m)
+#else
+#define __hlist_for_each_entry_rcu(f, n, h, m) \
+       hlist_for_each_entry_rcu(f, h, m)
+#define __hlist_for_each_entry_safe(f, n, t, h, m) \
+       hlist_for_each_entry_safe(f, t, h, m)
+#endif
+
+#ifndef skb_vlan_tag_present
+#define skb_vlan_tag_present(skb) vlan_tx_tag_present(skb)
+#define skb_vlan_tag_get(skb) vlan_tx_tag_get(skb)
+#endif
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT                13
+#endif
+
+#ifndef NETIF_F_HW_VLAN_CTAG_TX
+#define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX
+#define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX
+/* 802.1AD not supported on older kernels */
+#define NETIF_F_HW_VLAN_STAG_TX 0
+#define NETIF_F_HW_VLAN_STAG_RX 0
+
+#define __vlan_hwaccel_put_tag(skb, proto, tag) \
+       if (proto == ntohs(ETH_P_8021Q))        \
+               __vlan_hwaccel_put_tag(skb, tag)
+
+#define vlan_proto protocol
+
+#if defined(HAVE_VLAN_RX_REGISTER)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define OLD_VLAN       1
+#define OLD_VLAN_VALID (1 << 31)
+#endif
+#endif
+
+#endif
+
+#ifndef HAVE_NETDEV_UPDATE_FEATURES
+static inline void netdev_update_features(struct net_device *dev)
+{
+       /* Do nothing, since we can't set default VLAN on these old kernels. */
+}
+#endif
+
+#if !defined(netdev_printk) && (LINUX_VERSION_CODE < 0x020624)
+
+#ifndef HAVE_NETDEV_NAME
+static inline const char *netdev_name(const struct net_device *dev)
+{
+       if (dev->reg_state != NETREG_REGISTERED)
+               return "(unregistered net_device)";
+       return dev->name;
+}
+#endif
+
+#define NET_PARENT_DEV(netdev)  ((netdev)->dev.parent)
+
+#define netdev_printk(level, netdev, format, args...)          \
+       dev_printk(level, NET_PARENT_DEV(netdev),               \
+                  "%s: " format,                               \
+                  netdev_name(netdev), ##args)
+
+#endif
+
+#ifndef netdev_err
+#define netdev_err(dev, format, args...)                       \
+       netdev_printk(KERN_ERR, dev, format, ##args)
+#endif
+
+#ifndef netdev_info
+#define netdev_info(dev, format, args...)                      \
+       netdev_printk(KERN_INFO, dev, format, ##args)
+#endif
+
+#ifndef netdev_warn
+#define netdev_warn(dev, format, args...)                      \
+       netdev_printk(KERN_WARNING, dev, format, ##args)
+#endif
+
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev)   ((dev)->uc.count)
+#endif
+
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+       list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+       for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic()        smp_mb()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() smp_mb()
+#endif
+
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#if defined(HAVE_NAPI_HASH_ADD) && defined(NETDEV_BUSY_POLL)
+#define BNXT_PRIV_RX_BUSY_POLL 1
+#endif
+#endif
+
+#if !defined(CONFIG_PTP_1588_CLOCK) && !defined(CONFIG_PTP_1588_CLOCK_MODULE)
+#undef HAVE_IEEE1588_SUPPORT
+#endif
+
+#if !defined(HAVE_NAPI_HASH_DEL)
+static inline void napi_hash_del(struct napi_struct *napi)
+{
+}
+#endif
+
+#if !defined(LL_FLUSH_FAILED) || !defined(HAVE_NAPI_HASH_ADD)
+static inline void napi_hash_add(struct napi_struct *napi)
+{
+}
+#endif
+
+#ifndef HAVE_SET_COHERENT_MASK
+static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+       struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+
+       return pci_set_consistent_dma_mask(pdev, mask);
+}
+#endif
+
+#ifndef HAVE_SET_MASK_AND_COHERENT
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+       int rc = dma_set_mask(dev, mask);
+       if (rc == 0)
+               dma_set_coherent_mask(dev, mask);
+       return rc;
+}
+#endif
+
+#ifndef HAVE_IFLA_TX_RATE
+#define ndo_set_vf_rate ndo_set_vf_tx_rate
+#endif
+
+#ifndef HAVE_PRANDOM_BYTES
+#define prandom_bytes get_random_bytes
+#endif
+
+#ifndef rounddown
+#define rounddown(x, y) (                              \
+{                                                      \
+       typeof(x) __x = (x);                            \
+       __x - (__x % (y));                              \
+}                                                      \
+)
+#endif
+
+#ifdef NO_SKB_FRAG_SIZE
+static inline unsigned int skb_frag_size(const skb_frag_t *frag)
+{
+       return frag->size;
+}
+#endif
+
+#ifndef HAVE_SKB_CHECKSUM_NONE_ASSERT
+static inline void skb_checksum_none_assert(struct sk_buff *skb)
+{
+       skb->ip_summed = CHECKSUM_NONE;
+}
+#endif
+
+#ifndef HAVE_NEW_FLOW_DISSECTOR_WITH_FLAGS
+#define skb_flow_dissect_flow_keys(skb, fkeys, flags)  \
+       skb_flow_dissect_flow_keys(skb, fkeys)
+#endif
+
+#ifndef HAVE_ETHER_ADDR_EQUAL
+static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+       return !compare_ether_addr(addr1, addr2);
+}
+#endif
+
+#ifndef HAVE_ETHER_ADDR_COPY
+static inline void ether_addr_copy(u8 *dst, const u8 *src)
+{
+       memcpy(dst, src, ETH_ALEN);
+}
+#endif
+
+#ifndef HAVE_ETH_BROADCAST_ADDR
+static inline void eth_broadcast_addr(u8 *addr)
+{
+       memset(addr, 0xff, ETH_ALEN);
+}
+#endif
+
+#ifndef HAVE_ETH_HW_ADDR_RANDOM
+static inline void eth_hw_addr_random(struct net_device *dev)
+{
+#if defined(NET_ADDR_RANDOM)
+       dev->addr_assign_type = NET_ADDR_RANDOM;
+#endif
+       random_ether_addr(dev->dev_addr);
+}
+#endif
+
+#ifndef HAVE_NETDEV_TX_QUEUE_CTRL
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+                               unsigned int bytes)
+{
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+                               unsigned int pkts, unsigned int bytes)
+{
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+}
+#endif
+
+#ifndef HAVE_NETIF_SET_REAL_NUM_RX
+static inline int netif_set_real_num_rx_queues(struct net_device *dev,
+                               unsigned int rxq)
+{
+       return 0;
+}
+#endif
+
+#ifndef HAVE_NETIF_SET_REAL_NUM_TX
+static inline void netif_set_real_num_tx_queues(struct net_device *dev,
+                                               unsigned int txq)
+{
+       dev->real_num_tx_queues = txq;
+}
+#endif
+
+#ifndef HAVE_NETIF_GET_DEFAULT_RSS
+static inline int netif_get_num_default_rss_queues(void)
+{
+       return min_t(int, 8, num_online_cpus());
+}
+#endif
+
+#if !defined(HAVE_TCP_V6_CHECK)
+static __inline__ __sum16 tcp_v6_check(int len,
+                               const struct in6_addr *saddr,
+                               const struct in6_addr *daddr,
+                               __wsum base)
+{
+       return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
+}
+#endif
+
+#ifndef HAVE_USLEEP_RANGE
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+       if (min < 1000)
+               udelay(min);
+       else
+               msleep(min / 1000);
+}
+#endif
+
+#ifndef HAVE_GET_NUM_TC
+static inline int netdev_get_num_tc(struct net_device *dev)
+{
+       return 0;
+}
+
+static inline void netdev_reset_tc(struct net_device *dev)
+{
+}
+
+static inline int netdev_set_tc_queue(struct net_device *devi, u8 tc,
+                                     u16 count, u16 offset)
+{
+       return 0;
+}
+#endif
+
+#ifndef HAVE_VZALLOC
+static inline void *vzalloc(size_t size)
+{
+       void *ret = vmalloc(size);
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+#endif
+
+#ifndef ETH_MODULE_SFF_8436
+#define ETH_MODULE_SFF_8436             0x4
+#endif
+
+#ifndef ETH_MODULE_SFF_8436_LEN
+#define ETH_MODULE_SFF_8436_LEN         256
+#endif
+
+#ifndef ETH_MODULE_SFF_8636
+#define ETH_MODULE_SFF_8636             0x3
+#endif
+
+#ifndef ETH_MODULE_SFF_8636_LEN
+#define ETH_MODULE_SFF_8636_LEN         256
+#endif
+
+#ifndef HAVE_PCIE_GET_MINIMUM_LINK
+enum pcie_link_width {
+       PCIE_LNK_WIDTH_UNKNOWN          = 0xFF,
+};
+
+#ifndef HAVE_PCIE_BUS_SPEED
+enum pci_bus_speed {
+       PCIE_SPEED_2_5GT                = 0x14,
+       PCIE_SPEED_5_0GT                = 0x15,
+       PCIE_SPEED_8_0GT                = 0x16,
+       PCI_SPEED_UNKNOWN               = 0xFF,
+};
+#endif
+
+static const unsigned char pcie_link_speed[] = {
+       PCI_SPEED_UNKNOWN,              /* 0 */
+       PCIE_SPEED_2_5GT,               /* 1 */
+       PCIE_SPEED_5_0GT,               /* 2 */
+       PCIE_SPEED_8_0GT,               /* 3 */
+       PCI_SPEED_UNKNOWN,              /* 4 */
+       PCI_SPEED_UNKNOWN,              /* 5 */
+       PCI_SPEED_UNKNOWN,              /* 6 */
+       PCI_SPEED_UNKNOWN,              /* 7 */
+       PCI_SPEED_UNKNOWN,              /* 8 */
+       PCI_SPEED_UNKNOWN,              /* 9 */
+       PCI_SPEED_UNKNOWN,              /* A */
+       PCI_SPEED_UNKNOWN,              /* B */
+       PCI_SPEED_UNKNOWN,              /* C */
+       PCI_SPEED_UNKNOWN,              /* D */
+       PCI_SPEED_UNKNOWN,              /* E */
+       PCI_SPEED_UNKNOWN               /* F */
+};
+
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define PCI_EXP_LNKSTA_NLW_SHIFT       4
+#endif
+
+#ifdef HAVE_PCIE_CAPABILITY_READ_WORD
+static inline int pcie_get_minimum_link(struct pci_dev *dev,
+                                       enum pci_bus_speed *speed,
+                                       enum pcie_link_width *width)
+{
+       int ret;
+
+       *speed = PCI_SPEED_UNKNOWN;
+       *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+       while (dev) {
+               u16 lnksta;
+               enum pci_bus_speed next_speed;
+               enum pcie_link_width next_width;
+
+               ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+               if (ret)
+                       return ret;
+
+               next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+               next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+                       PCI_EXP_LNKSTA_NLW_SHIFT;
+
+               if (next_speed < *speed)
+                       *speed = next_speed;
+
+               if (next_width < *width)
+                       *width = next_width;
+
+               dev = dev->bus->self;
+       }
+
+       return 0;
+}
+#else
+static inline int pcie_get_minimum_link(struct pci_dev *dev,
+                                       enum pci_bus_speed *speed,
+                                       enum pcie_link_width *width)
+{
+#define BNXT_PCIE_CAP          0xAC
+       u16 lnksta;
+       int ret;
+
+       ret = pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKSTA,
+                                  &lnksta);
+       if (ret)
+               return ret;
+
+       *speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+       *width = (lnksta & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+
+       return 0;
+}
+#endif
+#endif
+
+#ifndef HAVE_PCI_IS_BRIDGE
+static inline bool pci_is_bridge(struct pci_dev *dev)
+{
+       return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+               dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
+}
+#endif
+
+#ifndef HAVE_NDO_XDP
+struct netdev_xdp;
+#endif
+
+#ifndef XDP_PACKET_HEADROOM
+#define XDP_PACKET_HEADROOM    0
+#endif
+
+#ifndef HAVE_BPF_TRACE
+#define trace_xdp_exception(dev, xdp_prog, act)
+#endif
diff --git a/ubuntu/bnxt/bnxt_dcb.c b/ubuntu/bnxt/bnxt_dcb.c
new file mode 100644 (file)
index 0000000..0c502cd
--- /dev/null
@@ -0,0 +1,610 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_dcb.h"
+
+#ifdef CONFIG_BNXT_DCB
+static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+       struct hwrm_queue_pri2cos_cfg_input req = {0};
+       int rc = 0, i;
+       u8 *pri2cos;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
+       req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
+                               QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
+
+       pri2cos = &req.pri0_cos_queue_id;
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               req.enables |= cpu_to_le32(
+                       QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
+
+               pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
+       }
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       return rc;
+}
+
+static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+       struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_queue_pri2cos_qcfg_input req = {0};
+       int rc = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
+       req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               u8 *pri2cos = &resp->pri0_cos_queue_id;
+               int i, j;
+
+               for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+                       u8 queue_id = pri2cos[i];
+
+                       for (j = 0; j < bp->max_tc; j++) {
+                               if (bp->q_info[j].queue_id == queue_id) {
+                                       ets->prio_tc[i] = j;
+                                       break;
+                               }
+                       }
+               }
+       }
+       return rc;
+}
+
+static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
+                                     u8 max_tc)
+{
+       struct hwrm_queue_cos2bw_cfg_input req = {0};
+       struct bnxt_cos2bw_cfg cos2bw;
+       int rc = 0, i;
+       void *data;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
+       data = &req.unused_0;
+       for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
+               req.enables |= cpu_to_le32(
+                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+
+               memset(&cos2bw, 0, sizeof(cos2bw));
+               cos2bw.queue_id = bp->q_info[i].queue_id;
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
+                       cos2bw.tsa =
+                               QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
+                       cos2bw.pri_lvl = i;
+               } else {
+                       cos2bw.tsa =
+                               QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
+                       cos2bw.bw_weight = ets->tc_tx_bw[i];
+               }
+               memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
+               if (i == 0) {
+                       req.queue_id0 = cos2bw.queue_id;
+                       req.unused_0 = 0;
+               }
+       }
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       return rc;
+}
+
+static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
+{
+       struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_queue_cos2bw_qcfg_input req = {0};
+       struct bnxt_cos2bw_cfg cos2bw;
+       void *data;
+       int rc, i;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               return rc;
+
+       data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
+       for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+               int j;
+
+               memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+               if (i == 0)
+                       cos2bw.queue_id = resp->queue_id0;
+
+               for (j = 0; j < bp->max_tc; j++) {
+                       if (bp->q_info[j].queue_id != cos2bw.queue_id)
+                               continue;
+                       if (cos2bw.tsa ==
+                           QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
+                               ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
+                       } else {
+                               ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
+                               ets->tc_tx_bw[j] = cos2bw.bw_weight;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
+{
+       struct hwrm_queue_cfg_input req = {0};
+       int i;
+
+       if (netif_running(bp->dev))
+               bnxt_tx_disable(bp);
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
+       req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
+       req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
+
+       /* Configure lossless queues to lossy first */
+       req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+       for (i = 0; i < bp->max_tc; i++) {
+               if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
+                       req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+                       hwrm_send_message(bp, &req, sizeof(req),
+                                         HWRM_CMD_TIMEOUT);
+                       bp->q_info[i].queue_profile =
+                               QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+               }
+       }
+
+       /* Now configure desired queues to lossless */
+       req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+       for (i = 0; i < bp->max_tc; i++) {
+               if (lltc_mask & (1 << i)) {
+                       req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+                       hwrm_send_message(bp, &req, sizeof(req),
+                                         HWRM_CMD_TIMEOUT);
+                       bp->q_info[i].queue_profile =
+                               QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+               }
+       }
+       if (netif_running(bp->dev))
+               bnxt_tx_enable(bp);
+
+       return 0;
+}
+
+static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
+{
+       struct hwrm_queue_pfcenable_cfg_input req = {0};
+       struct ieee_ets *my_ets = bp->ieee_ets;
+       unsigned int tc_mask = 0, pri_mask = 0;
+       u8 i, pri, lltc_count = 0;
+       bool need_q_recfg = false;
+       int rc;
+
+       if (!my_ets)
+               return -EINVAL;
+
+       for (i = 0; i < bp->max_tc; i++) {
+               for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
+                       if ((pfc->pfc_en & (1 << pri)) &&
+                           (my_ets->prio_tc[pri] == i)) {
+                               pri_mask |= 1 << pri;
+                               tc_mask |= 1 << i;
+                       }
+               }
+               if (tc_mask & (1 << i))
+                       lltc_count++;
+       }
+       if (lltc_count > bp->max_lltc)
+               return -EINVAL;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
+       req.flags = cpu_to_le32(pri_mask);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               return rc;
+
+       for (i = 0; i < bp->max_tc; i++) {
+               if (tc_mask & (1 << i)) {
+                       if (!BNXT_LLQ(bp->q_info[i].queue_profile))
+                               need_q_recfg = true;
+               }
+       }
+
+       if (need_q_recfg)
+               rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
+
+       return rc;
+}
+
+static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
+{
+       struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_queue_pfcenable_qcfg_input req = {0};
+       u8 pri_mask;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               return rc;
+
+       pri_mask = le32_to_cpu(resp->flags);
+       pfc->pfc_en = pri_mask;
+       return 0;
+}
+
+static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
+                                 bool add)
+{
+       struct hwrm_fw_set_structured_data_input set = {0};
+       struct hwrm_fw_get_structured_data_input get = {0};
+       struct hwrm_struct_data_dcbx_app *fw_app;
+       struct hwrm_struct_hdr *data;
+       dma_addr_t mapping;
+       size_t data_len;
+       int rc, n, i;
+
+       if (bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       n = 8;
+       data_len = sizeof(*data) + sizeof(*fw_app) * n;
+       data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+                                 GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       memset(data, 0, data_len);
+       bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
+       get.dest_data_addr = cpu_to_le64(mapping);
+       get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
+       get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+       get.count = 0;
+       rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto set_app_exit;
+
+       fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
+
+       if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
+               rc = -ENODEV;
+               goto set_app_exit;
+       }
+
+       n = data->count;
+       for (i = 0; i < n; i++, fw_app++) {
+               if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
+                   fw_app->protocol_selector == app->selector &&
+                   fw_app->priority == app->priority) {
+                       if (add)
+                               goto set_app_exit;
+                       else
+                               break;
+               }
+       }
+       if (add) {
+               /* append */
+               n++;
+               fw_app->protocol_id = cpu_to_be16(app->protocol);
+               fw_app->protocol_selector = app->selector;
+               fw_app->priority = app->priority;
+               fw_app->valid = 1;
+       } else {
+               size_t len = 0;
+
+               /* not found, nothing to delete */
+               if (n == i)
+                       goto set_app_exit;
+
+               len = (n - 1 - i) * sizeof(*fw_app);
+               if (len)
+                       memmove(fw_app, fw_app + 1, len);
+               n--;
+               memset(fw_app + n, 0, sizeof(*fw_app));
+       }
+       data->count = n;
+       data->len = cpu_to_le16(sizeof(*fw_app) * n);
+       data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+
+       bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
+       set.src_data_addr = cpu_to_le64(mapping);
+       set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
+       set.hdr_cnt = 1;
+       rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
+       if (rc)
+               rc = -EIO;
+
+set_app_exit:
+       dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
+       return rc;
+}
+
+static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
+{
+       int total_ets_bw = 0;
+       u8 max_tc = 0;
+       int i;
+
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               if (ets->prio_tc[i] > bp->max_tc) {
+                       netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
+                                  ets->prio_tc[i]);
+                       return -EINVAL;
+               }
+               if (ets->prio_tc[i] > max_tc)
+                       max_tc = ets->prio_tc[i];
+
+               if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
+                       return -EINVAL;
+
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       total_ets_bw += ets->tc_tx_bw[i];
+                       break;
+               default:
+                       return -ENOTSUPP;
+               }
+       }
+       if (total_ets_bw > 100)
+               return -EINVAL;
+
+       *tc = max_tc + 1;
+       return 0;
+}
+
+static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct ieee_ets *my_ets = bp->ieee_ets;
+
+       ets->ets_cap = bp->max_tc;
+
+       if (!my_ets) {
+               int rc;
+
+               if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+                       return 0;
+
+               my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+               if (!my_ets)
+                       return 0;
+               rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
+               if (rc)
+                       return 0;
+               rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
+               if (rc)
+                       return 0;
+       }
+
+       ets->cbs = my_ets->cbs;
+       memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+       memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+       memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+       return 0;
+}
+
+static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct ieee_ets *my_ets = bp->ieee_ets;
+       u8 max_tc = 0;
+       int rc, i;
+
+       if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+           !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+               return -EINVAL;
+
+       rc = bnxt_ets_validate(bp, ets, &max_tc);
+       if (!rc) {
+               if (!my_ets) {
+                       my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
+                       if (!my_ets)
+                               return -ENOMEM;
+                       /* initialize PRI2TC mappings to invalid value */
+                       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+                               my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
+                       bp->ieee_ets = my_ets;
+               }
+               rc = bnxt_setup_mq_tc(dev, max_tc);
+               if (rc)
+                       return rc;
+               rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
+               if (rc)
+                       return rc;
+               rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
+               if (rc)
+                       return rc;
+               memcpy(my_ets, ets, sizeof(*my_ets));
+       }
+       return rc;
+}
+
+static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
+       struct ieee_pfc *my_pfc = bp->ieee_pfc;
+       long rx_off, tx_off;
+       int i, rc;
+
+       pfc->pfc_cap = bp->max_lltc;
+
+       if (!my_pfc) {
+               if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
+                       return 0;
+
+               my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
+               if (!my_pfc)
+                       return 0;
+               bp->ieee_pfc = my_pfc;
+               rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
+               if (rc)
+                       return 0;
+       }
+
+       pfc->pfc_en = my_pfc->pfc_en;
+       pfc->mbc = my_pfc->mbc;
+       pfc->delay = my_pfc->delay;
+
+       if (!stats)
+               return 0;
+
+       rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
+       tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
+               pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
+               pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
+       }
+
+       return 0;
+}
+
+static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct ieee_pfc *my_pfc = bp->ieee_pfc;
+       int rc;
+
+       if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+           !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+               return -EINVAL;
+
+       if (!my_pfc) {
+               my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
+               if (!my_pfc)
+                       return -ENOMEM;
+               bp->ieee_pfc = my_pfc;
+       }
+       rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
+       if (!rc)
+               memcpy(my_pfc, pfc, sizeof(*my_pfc));
+
+       return rc;
+}
+
+static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = -EINVAL;
+
+       if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+           !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+               return -EINVAL;
+
+       rc = dcb_ieee_setapp(dev, app);
+       if (rc)
+               return rc;
+
+       if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+            app->protocol == ETH_P_ROCE) ||
+           (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+            app->protocol == ROCE_V2_UDP_DPORT))
+               rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+
+       return rc;
+}
+
+static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+
+       if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+           !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+               return -EINVAL;
+
+       rc = dcb_ieee_delapp(dev, app);
+       if (rc)
+               return rc;
+       if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+            app->protocol == ETH_P_ROCE) ||
+           (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+            app->protocol == ROCE_V2_UDP_DPORT))
+               rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+
+       return rc;
+}
+
+static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       return bp->dcbx_cap;
+}
+
+static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       /* only support IEEE */
+       if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
+               return 1;
+
+       if (mode & DCB_CAP_DCBX_HOST) {
+               if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+                       return 1;
+       }
+
+       if (mode == bp->dcbx_cap)
+               return 0;
+
+       bp->dcbx_cap = mode;
+       return 0;
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+       .ieee_getets    = bnxt_dcbnl_ieee_getets,
+       .ieee_setets    = bnxt_dcbnl_ieee_setets,
+       .ieee_getpfc    = bnxt_dcbnl_ieee_getpfc,
+       .ieee_setpfc    = bnxt_dcbnl_ieee_setpfc,
+       .ieee_setapp    = bnxt_dcbnl_ieee_setapp,
+       .ieee_delapp    = bnxt_dcbnl_ieee_delapp,
+       .getdcbx        = bnxt_dcbnl_getdcbx,
+       .setdcbx        = bnxt_dcbnl_setdcbx,
+};
+
+void bnxt_dcb_init(struct bnxt *bp)
+{
+       if (bp->hwrm_spec_code < 0x10501)
+               return;
+
+       bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
+       if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+               bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
+       else
+               bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
+       bp->dev->dcbnl_ops = &dcbnl_ops;
+}
+
+void bnxt_dcb_free(struct bnxt *bp)
+{
+       kfree(bp->ieee_pfc);
+       kfree(bp->ieee_ets);
+       bp->ieee_pfc = NULL;
+       bp->ieee_ets = NULL;
+}
+
+#else
+
+void bnxt_dcb_init(struct bnxt *bp)
+{
+}
+
+void bnxt_dcb_free(struct bnxt *bp)
+{
+}
+
+#endif
diff --git a/ubuntu/bnxt/bnxt_dcb.h b/ubuntu/bnxt/bnxt_dcb.h
new file mode 100644 (file)
index 0000000..5150990
--- /dev/null
@@ -0,0 +1,42 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_DCB_H
+#define BNXT_DCB_H
+
+#include <net/dcbnl.h>
+
+struct bnxt_dcb {
+       u8                      max_tc;
+       struct ieee_pfc         *ieee_pfc;
+       struct ieee_ets         *ieee_ets;
+       u8                      dcbx_cap;
+       u8                      default_pri;
+};
+
+struct bnxt_cos2bw_cfg {
+       u8                      pad[3];
+       u8                      queue_id;
+       __le32                  min_bw;
+       __le32                  max_bw;
+       u8                      tsa;
+       u8                      pri_lvl;
+       u8                      bw_weight;
+       u8                      unused;
+};
+
+#define BNXT_LLQ(q_profile)    \
+       ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS)
+
+#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL      0x0300
+
+void bnxt_dcb_init(struct bnxt *);
+void bnxt_dcb_free(struct bnxt *);
+#endif
diff --git a/ubuntu/bnxt/bnxt_ethtool.c b/ubuntu/bnxt/bnxt_ethtool.c
new file mode 100644 (file)
index 0000000..f576ec0
--- /dev/null
@@ -0,0 +1,2946 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ctype.h>
+#include <linux/stringify.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#if !defined(NEW_FLOW_KEYS) && defined(HAVE_FLOW_KEYS)
+#include <net/flow_keys.h>
+#endif
+#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT)
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#endif
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
+#include "bnxt_ethtool.h"
+#ifdef CONFIG_BNXT_FLASHDEV
+#include "bnxt_nvm_defs.h"     /* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h"       /* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT    ((HWRM_CMD_TIMEOUT) * 100)
+#define FLASH_PACKAGE_TIMEOUT  ((HWRM_CMD_TIMEOUT) * 200)
+#define INSTALL_PACKAGE_TIMEOUT        ((HWRM_CMD_TIMEOUT) * 200)
+#endif
+
+static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+                            struct ethtool_coalesce *coal)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       memset(coal, 0, sizeof(*coal));
+
+       coal->rx_coalesce_usecs = bp->rx_coal_ticks;
+       /* 2 completion records per rx packet */
+       coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
+       coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
+       coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
+
+       coal->tx_coalesce_usecs = bp->tx_coal_ticks;
+       coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
+       coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
+       coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+
+       coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
+
+       return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+                            struct ethtool_coalesce *coal)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       bool update_stats = false;
+       int rc = 0;
+
+       bp->rx_coal_ticks = coal->rx_coalesce_usecs;
+       /* 2 completion records per rx packet */
+       bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
+       bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
+       bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+       bp->tx_coal_ticks = coal->tx_coalesce_usecs;
+       bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
+       bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
+       bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
+
+       if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
+               u32 stats_ticks = coal->stats_block_coalesce_usecs;
+
+               /* Allow 0, which means disable. */
+               if (stats_ticks)
+                       stats_ticks = clamp_t(u32, stats_ticks,
+                                             BNXT_MIN_STATS_COAL_TICKS,
+                                             BNXT_MAX_STATS_COAL_TICKS);
+               stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
+               bp->stats_coal_ticks = stats_ticks;
+               update_stats = true;
+       }
+
+       if (netif_running(dev)) {
+               if (update_stats) {
+                       rc = bnxt_close_nic(bp, true, false);
+                       if (!rc)
+                               rc = bnxt_open_nic(bp, true, false);
+               } else {
+                       rc = bnxt_hwrm_set_coal(bp);
+               }
+       }
+
+       return rc;
+}
+
+#define BNXT_NUM_STATS 22
+
+#define BNXT_RX_STATS_ENTRY(counter)   \
+       { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
+
+#define BNXT_TX_STATS_ENTRY(counter)   \
+       { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
+
+static const struct {
+       long offset;
+       char string[ETH_GSTRING_LEN];
+} bnxt_port_stats_arr[] = {
+       BNXT_RX_STATS_ENTRY(rx_64b_frames),
+       BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
+       BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
+       BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
+       BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
+       BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
+       BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
+       BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
+       BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
+       BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
+       BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
+       BNXT_RX_STATS_ENTRY(rx_total_frames),
+       BNXT_RX_STATS_ENTRY(rx_ucast_frames),
+       BNXT_RX_STATS_ENTRY(rx_mcast_frames),
+       BNXT_RX_STATS_ENTRY(rx_bcast_frames),
+       BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
+       BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
+       BNXT_RX_STATS_ENTRY(rx_pause_frames),
+       BNXT_RX_STATS_ENTRY(rx_pfc_frames),
+       BNXT_RX_STATS_ENTRY(rx_align_err_frames),
+       BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
+       BNXT_RX_STATS_ENTRY(rx_jbr_frames),
+       BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
+       BNXT_RX_STATS_ENTRY(rx_tagged_frames),
+       BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
+       BNXT_RX_STATS_ENTRY(rx_good_frames),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
+       BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
+       BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
+       BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
+       BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
+       BNXT_RX_STATS_ENTRY(rx_bytes),
+       BNXT_RX_STATS_ENTRY(rx_runt_bytes),
+       BNXT_RX_STATS_ENTRY(rx_runt_frames),
+
+       BNXT_TX_STATS_ENTRY(tx_64b_frames),
+       BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
+       BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
+       BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
+       BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
+       BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
+       BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
+       BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
+       BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
+       BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
+       BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
+       BNXT_TX_STATS_ENTRY(tx_good_frames),
+       BNXT_TX_STATS_ENTRY(tx_total_frames),
+       BNXT_TX_STATS_ENTRY(tx_ucast_frames),
+       BNXT_TX_STATS_ENTRY(tx_mcast_frames),
+       BNXT_TX_STATS_ENTRY(tx_bcast_frames),
+       BNXT_TX_STATS_ENTRY(tx_pause_frames),
+       BNXT_TX_STATS_ENTRY(tx_pfc_frames),
+       BNXT_TX_STATS_ENTRY(tx_jabber_frames),
+       BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
+       BNXT_TX_STATS_ENTRY(tx_err),
+       BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
+       BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
+       BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
+       BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
+       BNXT_TX_STATS_ENTRY(tx_total_collisions),
+       BNXT_TX_STATS_ENTRY(tx_bytes),
+};
+
+#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+
+static int bnxt_get_num_stats(struct bnxt *bp)
+{
+       int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+
+       if (bp->flags & BNXT_FLAG_PORT_STATS)
+               num_stats += BNXT_NUM_PORT_STATS;
+
+       return num_stats;
+}
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return bnxt_get_num_stats(bp);
+       case ETH_SS_TEST:
+               if (!bp->num_tests)
+                       return -EOPNOTSUPP;
+               return bp->num_tests;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+                                  struct ethtool_stats *stats, u64 *buf)
+{
+       u32 i, j = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       int buf_size = bnxt_get_num_stats(bp) * sizeof(u64);
+       u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+       memset(buf, 0, buf_size);
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+               __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+               struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
+               int k;
+
+               for (k = 0; k < stat_fields; j++, k++)
+                       buf[j] = le64_to_cpu(hw_stats[k]);
+               buf[j++] = sw_stats->rx_l4_csum_errors;
+               buf[j++] = sw_stats->rx_resets;
+       }
+       if (bp->flags & BNXT_FLAG_PORT_STATS) {
+               __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+
+               for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
+                       buf[j] = le64_to_cpu(*(port_stats +
+                                              bnxt_port_stats_arr[i].offset));
+               }
+       }
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u32 i;
+
+       switch (stringset) {
+       /* The number of strings must match BNXT_NUM_STATS defined above. */
+       case ETH_SS_STATS:
+               for (i = 0; i < bp->cp_nr_rings; i++) {
+                       sprintf(buf, "[%d]: rx_ucast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_mcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_bcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_discards", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_drops", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_ucast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_mcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_bcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_ucast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_mcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_bcast_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_discards", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_drops", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_ucast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_mcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tx_bcast_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_packets", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_bytes", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_events", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: tpa_aborts", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+                       buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: rx_resets", i);
+                       buf += ETH_GSTRING_LEN;
+               }
+               if (bp->flags & BNXT_FLAG_PORT_STATS) {
+                       for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
+                               strcpy(buf, bnxt_port_stats_arr[i].string);
+                               buf += ETH_GSTRING_LEN;
+                       }
+               }
+               break;
+       case ETH_SS_TEST:
+               if (bp->num_tests)
+                       memcpy(buf, bp->test_info->string,
+                              bp->num_tests * ETH_GSTRING_LEN);
+               break;
+       default:
+               netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+                          stringset);
+               break;
+       }
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *ering)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+       ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+       ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+       ering->rx_pending = bp->rx_ring_size;
+       ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+       ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+                             struct ethtool_ringparam *ering)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+           (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+           (ering->tx_pending <= MAX_SKB_FRAGS))
+               return -EINVAL;
+
+       if (netif_running(dev))
+               bnxt_close_nic(bp, false, false);
+
+       bp->rx_ring_size = ering->rx_pending;
+       bp->tx_ring_size = ering->tx_pending;
+       bnxt_set_ring_params(bp);
+
+       if (netif_running(dev))
+               return bnxt_open_nic(bp, false, false);
+
+       return 0;
+}
+
+#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT)
+static void bnxt_get_channels(struct net_device *dev,
+                             struct ethtool_channels *channel)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int max_rx_rings, max_tx_rings, tcs;
+
+       bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
+       channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
+
+       if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+               max_rx_rings = 0;
+               max_tx_rings = 0;
+       }
+
+       tcs = netdev_get_num_tc(dev);
+       if (tcs > 1)
+               max_tx_rings /= tcs;
+
+       channel->max_rx = max_rx_rings;
+       channel->max_tx = max_tx_rings;
+       channel->max_other = 0;
+       if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+               channel->combined_count = bp->rx_nr_rings;
+               if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+                       channel->combined_count--;
+       } else {
+               if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+                       channel->rx_count = bp->rx_nr_rings;
+                       channel->tx_count = bp->tx_nr_rings_per_tc;
+               }
+       }
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+                            struct ethtool_channels *channel)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int req_tx_rings, req_rx_rings, tcs;
+       bool sh = false;
+       int tx_xdp = 0;
+       int rc = 0;
+
+       if (channel->other_count)
+               return -EINVAL;
+
+       if (!channel->combined_count &&
+           (!channel->rx_count || !channel->tx_count))
+               return -EINVAL;
+
+       if (channel->combined_count &&
+           (channel->rx_count || channel->tx_count))
+               return -EINVAL;
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
+                                           channel->tx_count))
+               return -EINVAL;
+
+       if (channel->combined_count)
+               sh = true;
+
+       tcs = netdev_get_num_tc(dev);
+
+       req_tx_rings = sh ? channel->combined_count : channel->tx_count;
+       req_rx_rings = sh ? channel->combined_count : channel->rx_count;
+       if (bp->tx_nr_rings_xdp) {
+               if (!sh) {
+                       netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
+                       return -EINVAL;
+               }
+               tx_xdp = req_rx_rings;
+       }
+       rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs,
+                               tx_xdp);
+       if (rc) {
+               netdev_warn(dev, "Unable to allocate the requested rings\n");
+               return rc;
+       }
+
+       if (netif_running(dev)) {
+               if (BNXT_PF(bp)) {
+                       /* TODO CHIMP_FW: Send message to all VF's
+                        * before PF unload
+                        */
+               }
+               rc = bnxt_close_nic(bp, true, false);
+               if (rc) {
+                       netdev_err(bp->dev, "Set channel failure rc :%x\n",
+                                  rc);
+                       return rc;
+               }
+       }
+
+       if (sh) {
+               bp->flags |= BNXT_FLAG_SHARED_RINGS;
+               bp->rx_nr_rings = channel->combined_count;
+               bp->tx_nr_rings_per_tc = channel->combined_count;
+       } else {
+               bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
+               bp->rx_nr_rings = channel->rx_count;
+               bp->tx_nr_rings_per_tc = channel->tx_count;
+       }
+       bp->tx_nr_rings_xdp = tx_xdp;
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
+       if (tcs > 1)
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
+
+       bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+                              bp->tx_nr_rings + bp->rx_nr_rings;
+
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+
+       /* After changing number of rx channels, update NTUPLE feature. */
+       netdev_update_features(dev);
+       if (netif_running(dev)) {
+               rc = bnxt_open_nic(bp, true, false);
+               if ((!rc) && BNXT_PF(bp)) {
+                       /* TODO CHIMP_FW: Send message to all VF's
+                        * to renable
+                        */
+               }
+       }
+
+       return rc;
+}
+#endif
+
+#ifdef HAVE_RXNFC
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+                           u32 *rule_locs)
+{
+       int i, j = 0;
+
+       cmd->data = bp->ntp_fltr_count;
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct hlist_node __maybe_unused *node;
+               struct bnxt_ntuple_filter *fltr;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               rcu_read_lock();
+               __hlist_for_each_entry_rcu(fltr, node, head, hash) {
+                       if (j == cmd->rule_cnt)
+                               break;
+                       rule_locs[j++] = fltr->sw_id;
+               }
+               rcu_read_unlock();
+               if (j == cmd->rule_cnt)
+                       break;
+       }
+       cmd->rule_cnt = j;
+       return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fs =
+               (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct bnxt_ntuple_filter *fltr;
+       struct flow_keys *fkeys;
+       int i, rc = -EINVAL;
+
+       if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+               return rc;
+
+       for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+               struct hlist_head *head;
+               struct hlist_node __maybe_unused *node;
+
+               head = &bp->ntp_fltr_hash_tbl[i];
+               rcu_read_lock();
+               __hlist_for_each_entry_rcu(fltr, node, head, hash) {
+                       if (fltr->sw_id == fs->location)
+                               goto fltr_found;
+               }
+               rcu_read_unlock();
+       }
+       return rc;
+
+fltr_found:
+       fkeys = &fltr->fkeys;
+#ifdef NEW_FLOW_KEYS
+       if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+               if (fkeys->basic.ip_proto == IPPROTO_TCP)
+                       fs->flow_type = TCP_V4_FLOW;
+               else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+                       fs->flow_type = UDP_V4_FLOW;
+               else
+                       goto fltr_err;
+
+               fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+               fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+               fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+               fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+               fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+               fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+               fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+               fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+       } else {
+#ifdef HAVE_ETHTOOL_IP6_SPEC
+               int i;
+
+               if (fkeys->basic.ip_proto == IPPROTO_TCP)
+                       fs->flow_type = TCP_V6_FLOW;
+               else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+                       fs->flow_type = UDP_V6_FLOW;
+               else
+                       goto fltr_err;
+
+               *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+                       fkeys->addrs.v6addrs.src;
+               *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+                       fkeys->addrs.v6addrs.dst;
+               for (i = 0; i < 4; i++) {
+                       fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
+                       fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+               }
+               fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+               fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
+
+               fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+               fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+#endif
+       }
+
+#else
+       if (fkeys->ip_proto == IPPROTO_TCP)
+               fs->flow_type = TCP_V4_FLOW;
+       else if (fkeys->ip_proto == IPPROTO_UDP)
+               fs->flow_type = UDP_V4_FLOW;
+       else
+               goto fltr_err;
+
+       fs->h_u.tcp_ip4_spec.ip4src = fkeys->src;
+       fs->m_u.tcp_ip4_spec.ip4src = (__be32) ~0;
+
+       fs->h_u.tcp_ip4_spec.ip4dst = fkeys->dst;
+       fs->m_u.tcp_ip4_spec.ip4dst = (__be32) ~0;
+
+       fs->h_u.tcp_ip4_spec.psrc = fkeys->port16[0];
+       fs->m_u.tcp_ip4_spec.psrc = (__be16) ~0;
+
+       fs->h_u.tcp_ip4_spec.pdst = fkeys->port16[1];
+       fs->m_u.tcp_ip4_spec.pdst = (__be16) ~0;
+#endif
+
+       fs->ring_cookie = fltr->rxq;
+       rc = 0;
+
+fltr_err:
+       rcu_read_unlock();
+
+       return rc;
+}
+#endif /* CONFIG_RFS_ACCEL */
+
+static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
+{
+       if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
+               return RXH_IP_SRC | RXH_IP_DST;
+       return 0;
+}
+
+static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
+{
+       if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
+               return RXH_IP_SRC | RXH_IP_DST;
+       return 0;
+}
+
+static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+       cmd->data = 0;
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+               if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
+                       cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+                                    RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               cmd->data |= get_ethtool_ipv4_rss(bp);
+               break;
+       case UDP_V4_FLOW:
+               if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
+                       cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+                                    RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fall through */
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case IPV4_FLOW:
+               cmd->data |= get_ethtool_ipv4_rss(bp);
+               break;
+
+       case TCP_V6_FLOW:
+               if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
+                       cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+                                    RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               cmd->data |= get_ethtool_ipv6_rss(bp);
+               break;
+       case UDP_V6_FLOW:
+               if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
+                       cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+                                    RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fall through */
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IPV6_FLOW:
+               cmd->data |= get_ethtool_ipv6_rss(bp);
+               break;
+       }
+       return 0;
+}
+
+#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
+
+static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+       u32 rss_hash_cfg = bp->rss_hash_cfg;
+       int tuple, rc = 0;
+
+       if (cmd->data == RXH_4TUPLE)
+               tuple = 4;
+       else if (cmd->data == RXH_2TUPLE)
+               tuple = 2;
+       else if (!cmd->data)
+               tuple = 0;
+       else
+               return -EINVAL;
+
+       if (cmd->flow_type == TCP_V4_FLOW) {
+               rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
+               if (tuple == 4)
+                       rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
+       } else if (cmd->flow_type == UDP_V4_FLOW) {
+               if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+                       return -EINVAL;
+               rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
+               if (tuple == 4)
+                       rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
+       } else if (cmd->flow_type == TCP_V6_FLOW) {
+               rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+               if (tuple == 4)
+                       rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+       } else if (cmd->flow_type == UDP_V6_FLOW) {
+               if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+                       return -EINVAL;
+               rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+               if (tuple == 4)
+                       rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+       } else if (tuple == 4) {
+               return -EINVAL;
+       }
+
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case IPV4_FLOW:
+               if (tuple == 2)
+                       rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
+               else if (!tuple)
+                       rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
+               break;
+
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IPV6_FLOW:
+               if (tuple == 2)
+                       rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
+               else if (!tuple)
+                       rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
+               break;
+       }
+
+       if (bp->rss_hash_cfg == rss_hash_cfg)
+               return 0;
+
+       bp->rss_hash_cfg = rss_hash_cfg;
+       if (netif_running(bp->dev)) {
+               bnxt_close_nic(bp, false, false);
+               rc = bnxt_open_nic(bp, false, false);
+       }
+       return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_RXNFC_VOID
+                         void *rule_locs)
+#else
+                         u32 *rule_locs)
+#endif
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       switch (cmd->cmd) {
+#ifdef CONFIG_RFS_ACCEL
+       case ETHTOOL_GRXRINGS:
+               cmd->data = bp->rx_nr_rings;
+               break;
+
+       case ETHTOOL_GRXCLSRLCNT:
+               cmd->rule_cnt = bp->ntp_fltr_count;
+               cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+               break;
+
+       case ETHTOOL_GRXCLSRLALL:
+               rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+               break;
+
+       case ETHTOOL_GRXCLSRULE:
+               rc = bnxt_grxclsrule(bp, cmd);
+               break;
+#endif
+
+       case ETHTOOL_GRXFH:
+               rc = bnxt_grxfh(bp, cmd);
+               break;
+
+       default:
+               rc = -EOPNOTSUPP;
+               break;
+       }
+
+       return rc;
+}
+
+static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXFH:
+               rc = bnxt_srxfh(bp, cmd);
+               break;
+
+       default:
+               rc = -EOPNOTSUPP;
+               break;
+       }
+       return rc;
+}
+
+#endif /* HAVE_RXNFC */
+
+#if defined(HAVE_RXFH_INDIR_SIZE) && !defined(GET_ETHTOOL_OP_EXT)
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+       return HW_HASH_INDEX_SIZE;
+}
+#endif
+
+#if defined(HAVE_GET_RXFH_KEY_SIZE) && !defined(GET_ETHTOOL_OP_EXT)
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+       return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+                        u8 *hfunc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+       int i = 0;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       if (indir)
+               for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+                       indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+       if (key)
+               memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+       return 0;
+}
+#endif
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       char *pkglog;
+       char *pkgver = NULL;
+
+       pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
+       if (pkglog)
+               pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+               snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+                        "%s pkg %s", bp->fw_ver_str, pkgver);
+       else
+               strlcpy(info->fw_version, bp->fw_ver_str,
+                       sizeof(info->fw_version));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+       info->n_stats = bnxt_get_num_stats(bp);
+       info->testinfo_len = bp->num_tests;
+       /* TODO CHIMP_FW: eeprom dump details */
+       info->eedump_len = 0;
+       /* TODO CHIMP FW: reg dump details */
+       info->regdump_len = 0;
+       kfree(pkglog);
+}
+
+static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       wol->supported = 0;
+       wol->wolopts = 0;
+       memset(&wol->sopass, 0, sizeof(wol->sopass));
+       if (bp->flags & BNXT_FLAG_WOL_CAP) {
+               wol->supported = WAKE_MAGIC;
+               if (bp->wol)
+                       wol->wolopts = WAKE_MAGIC;
+       }
+}
+
+static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       if (wol->wolopts & WAKE_MAGIC) {
+               if (!(bp->flags & BNXT_FLAG_WOL_CAP))
+                       return -EINVAL;
+               if (!bp->wol) {
+                       if (bnxt_hwrm_alloc_wol_fltr(bp))
+                               return -EBUSY;
+                       bp->wol = 1;
+               }
+       } else {
+               if (bp->wol) {
+                       if (bnxt_hwrm_free_wol_fltr(bp))
+                               return -EBUSY;
+                       bp->wol = 0;
+               }
+       }
+       return 0;
+}
+
+u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+{
+       u32 speed_mask = 0;
+
+       /* TODO: support 25GB, 40GB, 50GB with different cable type */
+       /* set the advertised speeds */
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+               speed_mask |= ADVERTISED_100baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+               speed_mask |= ADVERTISED_1000baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+               speed_mask |= ADVERTISED_2500baseX_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+               speed_mask |= ADVERTISED_10000baseT_Full;
+       if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+               speed_mask |= ADVERTISED_40000baseCR4_Full;
+
+       if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
+               speed_mask |= ADVERTISED_Pause;
+       else if (fw_pause & BNXT_LINK_PAUSE_TX)
+               speed_mask |= ADVERTISED_Asym_Pause;
+       else if (fw_pause & BNXT_LINK_PAUSE_RX)
+               speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+       return speed_mask;
+}
+
+#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G
+#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
+{                                                                      \
+       if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)                    \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    100baseT_Full);    \
+       if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)                      \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    1000baseT_Full);   \
+       if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)                     \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    10000baseT_Full);  \
+       if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)                     \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    25000baseCR_Full); \
+       if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)                     \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    40000baseCR4_Full);\
+       if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)                     \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    50000baseCR2_Full);\
+       if ((fw_pause) & BNXT_LINK_PAUSE_RX) {                          \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    Pause);            \
+               if (!((fw_pause) & BNXT_LINK_PAUSE_TX))                 \
+                       ethtool_link_ksettings_add_link_mode(           \
+                                       lk_ksettings, name, Asym_Pause);\
+       } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {                   \
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+                                                    Asym_Pause);       \
+       }                                                               \
+}
+
+#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)         \
+{                                                                      \
+       if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 100baseT_Full) ||     \
+           ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 100baseT_Half))       \
+               (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;               \
+       if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 1000baseT_Full) ||    \
+           ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 1000baseT_Half))      \
+               (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;                 \
+       if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 10000baseT_Full))     \
+               (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;                \
+       if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 25000baseCR_Full))    \
+               (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;                \
+       if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 40000baseCR4_Full))   \
+               (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;                \
+       if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
+                                                 50000baseCR2_Full))   \
+               (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;                \
+}
+
+static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
+                               struct ethtool_link_ksettings *lk_ksettings)
+{
+       u16 fw_speeds = link_info->advertising;
+       u8 fw_pause = 0;
+
+       if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+               fw_pause = link_info->auto_pause_setting;
+
+       BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
+}
+
+static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
+                               struct ethtool_link_ksettings *lk_ksettings)
+{
+       u16 fw_speeds = link_info->lp_auto_link_speeds;
+       u8 fw_pause = 0;
+
+       if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+               fw_pause = link_info->lp_pause;
+
+       BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
+                               lp_advertising);
+}
+
+static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
+                               struct ethtool_link_ksettings *lk_ksettings)
+{
+       u16 fw_speeds = link_info->support_speeds;
+
+       BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+
+       ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+                                            Asym_Pause);
+
+       if (link_info->support_auto_speeds)
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+                                                    Autoneg);
+}
+
+#else
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+       u16 fw_speeds = link_info->advertising;
+       u8 fw_pause = 0;
+
+       if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+               fw_pause = link_info->auto_pause_setting;
+
+       return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+}
+
+static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
+{
+       u16 fw_speeds = link_info->lp_auto_link_speeds;
+       u8 fw_pause = 0;
+
+       if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+               fw_pause = link_info->lp_pause;
+
+       return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+       u16 fw_speeds = link_info->support_speeds;
+       u32 supported;
+
+       supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+       return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static u32 bnxt_fw_to_ethtool_support_adv_spds(struct bnxt_link_info *link_info)
+{
+       u16 fw_speeds = link_info->support_auto_speeds;
+       u32 supported;
+
+       supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+       if (supported)
+               supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       return supported;
+}
+#endif
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+       switch (fw_link_speed) {
+       case BNXT_LINK_SPEED_100MB:
+               return SPEED_100;
+       case BNXT_LINK_SPEED_1GB:
+               return SPEED_1000;
+       case BNXT_LINK_SPEED_2_5GB:
+               return SPEED_2500;
+       case BNXT_LINK_SPEED_10GB:
+               return SPEED_10000;
+       case BNXT_LINK_SPEED_20GB:
+               return SPEED_20000;
+       case BNXT_LINK_SPEED_25GB:
+               return SPEED_25000;
+       case BNXT_LINK_SPEED_40GB:
+               return SPEED_40000;
+       case BNXT_LINK_SPEED_50GB:
+               return SPEED_50000;
+       default:
+               return SPEED_UNKNOWN;
+       }
+}
+
+#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G
+static int bnxt_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *lk_ksettings)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       struct ethtool_link_settings *base = &lk_ksettings->base;
+       u32 ethtool_speed;
+
+       ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+       bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+
+       ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
+       if (link_info->autoneg) {
+               bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
+               ethtool_link_ksettings_add_link_mode(lk_ksettings,
+                                                    advertising, Autoneg);
+               base->autoneg = AUTONEG_ENABLE;
+               if (link_info->phy_link_status == BNXT_LINK_LINK)
+                       bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
+               ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+               if (!netif_carrier_ok(dev))
+                       base->duplex = DUPLEX_UNKNOWN;
+               else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+                       base->duplex = DUPLEX_FULL;
+               else
+                       base->duplex = DUPLEX_HALF;
+       } else {
+               base->autoneg = AUTONEG_DISABLE;
+               ethtool_speed =
+                       bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+               base->duplex = DUPLEX_HALF;
+               if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+                       base->duplex = DUPLEX_FULL;
+       }
+       base->speed = ethtool_speed;
+
+       base->port = PORT_NONE;
+       if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+               base->port = PORT_TP;
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+                                                    TP);
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+                                                    TP);
+       } else {
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+                                                    FIBRE);
+               ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+                                                    FIBRE);
+
+               if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+                       base->port = PORT_DA;
+               else if (link_info->media_type ==
+                        PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+                       base->port = PORT_FIBRE;
+       }
+       base->phy_address = link_info->phy_addr;
+
+       return 0;
+}
+
+#else
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u16 ethtool_speed;
+
+       cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+       if (link_info->support_auto_speeds)
+               cmd->supported |= SUPPORTED_Autoneg;
+
+       if (link_info->autoneg) {
+               cmd->advertising =
+                       bnxt_fw_to_ethtool_advertised_spds(link_info);
+               cmd->advertising |= ADVERTISED_Autoneg;
+               cmd->autoneg = AUTONEG_ENABLE;
+               if (link_info->phy_link_status == BNXT_LINK_LINK)
+                       cmd->lp_advertising =
+                               bnxt_fw_to_ethtool_lp_adv(link_info);
+               ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+               if (!netif_carrier_ok(dev))
+                       cmd->duplex = DUPLEX_UNKNOWN;
+               else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+                       cmd->duplex = DUPLEX_FULL;
+               else
+                       cmd->duplex = DUPLEX_HALF;
+       } else {
+               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->advertising = 0;
+               ethtool_speed =
+                       bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+               cmd->duplex = DUPLEX_HALF;
+               if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+                       cmd->duplex = DUPLEX_FULL;
+       }
+       ethtool_cmd_speed_set(cmd, ethtool_speed);
+
+       cmd->port = PORT_NONE;
+       if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+               cmd->port = PORT_TP;
+               cmd->supported |= SUPPORTED_TP;
+               cmd->advertising |= ADVERTISED_TP;
+       } else {
+               cmd->supported |= SUPPORTED_FIBRE;
+               cmd->advertising |= ADVERTISED_FIBRE;
+
+               if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+                       cmd->port = PORT_DA;
+               else if (link_info->media_type ==
+                        PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+                       cmd->port = PORT_FIBRE;
+       }
+
+       if (link_info->transceiver ==
+           PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
+               cmd->transceiver = XCVR_INTERNAL;
+       else
+               cmd->transceiver = XCVR_EXTERNAL;
+       cmd->phy_address = link_info->phy_addr;
+
+       return 0;
+}
+#endif
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u16 support_spds = link_info->support_speeds;
+       u32 fw_speed = 0;
+
+       switch (ethtool_speed) {
+       case SPEED_100:
+               if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+               break;
+       case SPEED_1000:
+               if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+               break;
+       case SPEED_2500:
+               if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+               break;
+       case SPEED_10000:
+               if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+               break;
+       case SPEED_20000:
+               if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+               break;
+       case SPEED_25000:
+               if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+               break;
+       case SPEED_40000:
+               if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+               break;
+       case SPEED_50000:
+               if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
+                       fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+               break;
+       default:
+               netdev_err(dev, "unsupported speed!\n");
+               break;
+       }
+       return fw_speed;
+}
+
+u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+       u16 fw_speed_mask = 0;
+
+       /* only support autoneg at speed 100, 1000, and 10000 */
+       if (advertising & (ADVERTISED_100baseT_Full |
+                          ADVERTISED_100baseT_Half)) {
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+       }
+       if (advertising & (ADVERTISED_1000baseT_Full |
+                          ADVERTISED_1000baseT_Half)) {
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+       }
+       if (advertising & ADVERTISED_10000baseT_Full)
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+       if (advertising & ADVERTISED_40000baseCR4_Full)
+               fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
+
+       return fw_speed_mask;
+}
+
+#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G
+static int bnxt_set_link_ksettings(struct net_device *dev,
+                          const struct ethtool_link_ksettings *lk_ksettings)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       const struct ethtool_link_settings *base = &lk_ksettings->base;
+       bool set_pause = false;
+       u16 fw_advertising = 0;
+       u32 speed;
+       int rc = 0;
+
+       if (!BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       if (base->autoneg == AUTONEG_ENABLE) {
+               BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+                                       advertising);
+               link_info->autoneg |= BNXT_AUTONEG_SPEED;
+               if (!fw_advertising)
+                       link_info->advertising = link_info->support_auto_speeds;
+               else
+                       link_info->advertising = fw_advertising;
+               /* any change to autoneg will cause link change, therefore the
+                * driver should put back the original pause setting in autoneg
+                */
+               set_pause = true;
+       } else {
+               u16 fw_speed;
+               u8 phy_type = link_info->phy_type;
+
+               if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
+                   phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+                   link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+
+                       netdev_err(dev, "10GBase-T devices must autoneg\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               if (base->duplex == DUPLEX_HALF) {
+                       netdev_err(dev, "HALF DUPLEX is not supported!\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               speed = base->speed;
+               fw_speed = bnxt_get_fw_speed(dev, speed);
+               if (!fw_speed) {
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               link_info->req_link_speed = fw_speed;
+               link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+               link_info->autoneg = 0;
+               link_info->advertising = 0;
+       }
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
+
+set_setting_exit:
+       return rc;
+}
+
+#else
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       int rc = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+       bool set_pause = false;
+       u16 fw_advertising = 0;
+       u32 speed;
+
+       if (!BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       if (cmd->autoneg == AUTONEG_ENABLE) {
+               u32 supported_spds =
+                       bnxt_fw_to_ethtool_support_adv_spds(link_info);
+
+               if (!supported_spds) {
+                       netdev_err(dev, "Autoneg not supported\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg |
+                                        ADVERTISED_TP | ADVERTISED_FIBRE)) {
+                       netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+                                  cmd->advertising);
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+               link_info->autoneg |= BNXT_AUTONEG_SPEED;
+               if (!fw_advertising)
+                       link_info->advertising = link_info->support_auto_speeds;
+               else
+                       link_info->advertising = fw_advertising;
+               /* any change to autoneg will cause link change, therefore the
+                * driver should put back the original pause setting in autoneg
+                */
+               set_pause = true;
+       } else {
+               u16 fw_speed;
+               u8 phy_type = link_info->phy_type;
+
+               if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
+                   phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+                   link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+
+                       netdev_err(dev, "10GBase-T devices must autoneg\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               /* TODO: currently don't support half duplex */
+               if (cmd->duplex == DUPLEX_HALF) {
+                       netdev_err(dev, "HALF DUPLEX is not supported!\n");
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               /* If received a request for an unknown duplex, assume full*/
+               if (cmd->duplex == DUPLEX_UNKNOWN)
+                       cmd->duplex = DUPLEX_FULL;
+               speed = ethtool_cmd_speed(cmd);
+               fw_speed = bnxt_get_fw_speed(dev, speed);
+               if (!fw_speed) {
+                       rc = -EINVAL;
+                       goto set_setting_exit;
+               }
+               link_info->req_link_speed = fw_speed;
+               link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+               link_info->autoneg = 0;
+               link_info->advertising = 0;
+       }
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
+
+set_setting_exit:
+       return rc;
+}
+#endif
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+                               struct ethtool_pauseparam *epause)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (BNXT_VF(bp))
+               return;
+       epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
+       epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
+       epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+                              struct ethtool_pauseparam *epause)
+{
+       int rc = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (!BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       if (epause->autoneg) {
+               if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+                       return -EINVAL;
+
+               link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+               if (bp->hwrm_spec_code >= 0x10201)
+                       link_info->req_flow_ctrl =
+                               PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+       } else {
+               /* when transition from auto pause to force pause,
+                * force a link change
+                */
+               if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+                       link_info->force_link_chng = true;
+               link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+               link_info->req_flow_ctrl = 0;
+       }
+       if (epause->rx_pause)
+               link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+
+       if (epause->tx_pause)
+               link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_pause(bp);
+       return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       /* TODO: handle MF, VF, driver close case */
+       return bp->link_info.link_up;
+}
+
+#ifdef CONFIG_BNXT_FLASHDEV
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                               u16 ext, u16 *index, u32 *item_length,
+                               u32 *data_length);
+
+static int bnxt_flash_nvram(struct net_device *dev,
+                           u16 dir_type,
+                           u16 dir_ordinal,
+                           u16 dir_ext,
+                           u16 dir_attr,
+                           const u8 *data,
+                           size_t data_len)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       struct hwrm_nvm_write_input req = {0};
+       dma_addr_t dma_handle;
+       u8 *kmem;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+       req.dir_type = cpu_to_le16(dir_type);
+       req.dir_ordinal = cpu_to_le16(dir_ordinal);
+       req.dir_ext = cpu_to_le16(dir_ext);
+       req.dir_attr = cpu_to_le16(dir_attr);
+       req.dir_data_length = cpu_to_le32(data_len);
+
+       kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+                                 GFP_KERNEL);
+       if (!kmem) {
+               netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+                          (unsigned)data_len);
+               return -ENOMEM;
+       }
+       memcpy(kmem, data, data_len);
+       req.host_src_addr = cpu_to_le64(dma_handle);
+
+       rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+       dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+       return rc;
+}
+
+static int bnxt_firmware_reset(struct net_device *dev,
+                              u16 dir_type)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_fw_reset_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+
+       /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
+       /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
+       /*       (e.g. when firmware isn't already running) */
+       switch (dir_type) {
+       case BNX_DIR_TYPE_CHIMP_PATCH:
+       case BNX_DIR_TYPE_BOOTCODE:
+       case BNX_DIR_TYPE_BOOTCODE_2:
+               req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
+               /* Self-reset ChiMP upon next PCIe reset: */
+               req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+               break;
+       case BNX_DIR_TYPE_APE_FW:
+       case BNX_DIR_TYPE_APE_PATCH:
+               req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+               /* Self-reset APE upon next PCIe reset: */
+               req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+               break;
+       case BNX_DIR_TYPE_KONG_FW:
+       case BNX_DIR_TYPE_KONG_PATCH:
+               req.embedded_proc_type =
+                       FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
+               break;
+       case BNX_DIR_TYPE_BONO_FW:
+       case BNX_DIR_TYPE_BONO_PATCH:
+               req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+                              u16 dir_type,
+                              const u8 *fw_data,
+                              size_t fw_size)
+{
+       int     rc = 0;
+       u16     code_type;
+       u32     stored_crc;
+       u32     calculated_crc;
+       struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+       switch (dir_type) {
+       case BNX_DIR_TYPE_BOOTCODE:
+       case BNX_DIR_TYPE_BOOTCODE_2:
+               code_type = CODE_BOOT;
+               break;
+       case BNX_DIR_TYPE_CHIMP_PATCH:
+               code_type = CODE_CHIMP_PATCH;
+               break;
+       case BNX_DIR_TYPE_APE_FW:
+               code_type = CODE_MCTP_PASSTHRU;
+               break;
+       case BNX_DIR_TYPE_APE_PATCH:
+               code_type = CODE_APE_PATCH;
+               break;
+       case BNX_DIR_TYPE_KONG_FW:
+               code_type = CODE_KONG_FW;
+               break;
+       case BNX_DIR_TYPE_KONG_PATCH:
+               code_type = CODE_KONG_PATCH;
+               break;
+       case BNX_DIR_TYPE_BONO_FW:
+               code_type = CODE_BONO_FW;
+               break;
+       case BNX_DIR_TYPE_BONO_PATCH:
+               code_type = CODE_BONO_PATCH;
+               break;
+       default:
+               netdev_err(dev, "Unsupported directory entry type: %u\n",
+                          dir_type);
+               return -EINVAL;
+       }
+       if (fw_size < sizeof(struct bnxt_fw_header)) {
+               netdev_err(dev, "Invalid firmware file size: %u\n",
+                          (unsigned int)fw_size);
+               return -EINVAL;
+       }
+       if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+               netdev_err(dev, "Invalid firmware signature: %08X\n",
+                          le32_to_cpu(header->signature));
+               return -EINVAL;
+       }
+       if (header->code_type != code_type) {
+               netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+                          code_type, header->code_type);
+               return -EINVAL;
+       }
+       if (header->device != DEVICE_CUMULUS_FAMILY) {
+               netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+                          DEVICE_CUMULUS_FAMILY, header->device);
+               return -EINVAL;
+       }
+       /* Confirm the CRC32 checksum of the file: */
+       stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+                                            sizeof(stored_crc)));
+       calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+       if (calculated_crc != stored_crc) {
+               netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+                          (unsigned long)stored_crc,
+                          (unsigned long)calculated_crc);
+               return -EINVAL;
+       }
+       rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+                             0, 0, fw_data, fw_size);
+       if (rc == 0)    /* Firmware update successful */
+               rc = bnxt_firmware_reset(dev, dir_type);
+
+       return rc;
+}
+
+static int bnxt_flash_microcode(struct net_device *dev,
+                               u16 dir_type,
+                               const u8 *fw_data,
+                               size_t fw_size)
+{
+       struct bnxt_ucode_trailer *trailer;
+       u32 calculated_crc;
+       u32 stored_crc;
+       int rc = 0;
+
+       if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
+               netdev_err(dev, "Invalid microcode file size: %u\n",
+                          (unsigned int)fw_size);
+               return -EINVAL;
+       }
+       trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
+                                               sizeof(*trailer)));
+       if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
+               netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
+                          le32_to_cpu(trailer->sig));
+               return -EINVAL;
+       }
+       if (le16_to_cpu(trailer->dir_type) != dir_type) {
+               netdev_err(dev, "Expected microcode type: %d, read: %d\n",
+                          dir_type, le16_to_cpu(trailer->dir_type));
+               return -EINVAL;
+       }
+       if (le16_to_cpu(trailer->trailer_length) <
+               sizeof(struct bnxt_ucode_trailer)) {
+               netdev_err(dev, "Invalid microcode trailer length: %d\n",
+                          le16_to_cpu(trailer->trailer_length));
+               return -EINVAL;
+       }
+
+       /* Confirm the CRC32 checksum of the file: */
+       stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+                                            sizeof(stored_crc)));
+       calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+       if (calculated_crc != stored_crc) {
+               netdev_err(dev,
+                          "CRC32 (%08lX) does not match calculated: %08lX\n",
+                          (unsigned long)stored_crc,
+                          (unsigned long)calculated_crc);
+               return -EINVAL;
+       }
+       rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+                             0, 0, fw_data, fw_size);
+
+       return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+       switch (dir_type) {
+       case BNX_DIR_TYPE_CHIMP_PATCH:
+       case BNX_DIR_TYPE_BOOTCODE:
+       case BNX_DIR_TYPE_BOOTCODE_2:
+       case BNX_DIR_TYPE_APE_FW:
+       case BNX_DIR_TYPE_APE_PATCH:
+       case BNX_DIR_TYPE_KONG_FW:
+       case BNX_DIR_TYPE_KONG_PATCH:
+       case BNX_DIR_TYPE_BONO_FW:
+       case BNX_DIR_TYPE_BONO_PATCH:
+               return true;
+       }
+
+       return false;
+}
+
+static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
+{
+       switch (dir_type) {
+       case BNX_DIR_TYPE_AVS:
+       case BNX_DIR_TYPE_EXP_ROM_MBA:
+       case BNX_DIR_TYPE_PCIE:
+       case BNX_DIR_TYPE_TSCF_UCODE:
+       case BNX_DIR_TYPE_EXT_PHY:
+       case BNX_DIR_TYPE_CCM:
+       case BNX_DIR_TYPE_ISCSI_BOOT:
+       case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+       case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+               return true;
+       }
+
+       return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+       return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+               bnxt_dir_type_is_other_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+                                        u16 dir_type,
+                                        const char *filename)
+{
+       const struct firmware  *fw;
+       int                     rc;
+
+       rc = request_firmware(&fw, filename, &dev->dev);
+       if (rc != 0) {
+               netdev_err(dev, "Error %d requesting firmware file: %s\n",
+                          rc, filename);
+               return rc;
+       }
+       if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+               rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+       else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+               rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
+       else
+               rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+                                     0, 0, fw->data, fw->size);
+       release_firmware(fw);
+       return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+                                       char *filename, u32 install_type)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_nvm_install_update_input install = {0};
+       const struct firmware *fw;
+       u32 item_len;
+       u16 index;
+       int rc;
+
+       bnxt_hwrm_fw_set_time(bp);
+
+       if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+                                BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+                                &index, &item_len, NULL) != 0) {
+               netdev_err(dev, "PKG update area not created in nvram\n");
+               return -ENOBUFS;
+       }
+
+       rc = request_firmware(&fw, filename, &dev->dev);
+       if (rc != 0) {
+               netdev_err(dev, "PKG error %d requesting file: %s\n",
+                          rc, filename);
+               return rc;
+       }
+
+       if (fw->size > item_len) {
+               netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+                          (unsigned long)fw->size);
+               rc = -EFBIG;
+       } else {
+               dma_addr_t dma_handle;
+               u8 *kmem;
+               struct hwrm_nvm_modify_input modify = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+
+               modify.dir_idx = cpu_to_le16(index);
+               modify.len = cpu_to_le32(fw->size);
+
+               kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
+                                         &dma_handle, GFP_KERNEL);
+               if (!kmem) {
+                       netdev_err(dev,
+                                  "dma_alloc_coherent failure, length = %u\n",
+                                  (unsigned int)fw->size);
+                       rc = -ENOMEM;
+               } else {
+                       memcpy(kmem, fw->data, fw->size);
+                       modify.host_src_addr = cpu_to_le64(dma_handle);
+
+                       rc = hwrm_send_message(bp, &modify, sizeof(modify),
+                                              FLASH_PACKAGE_TIMEOUT);
+                       dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
+                                         dma_handle);
+               }
+       }
+       release_firmware(fw);
+       if (rc)
+               return rc;
+
+       if ((install_type & 0xffff) == 0)
+               install_type >>= 16;
+       bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+       install.install_type = cpu_to_le32(install_type);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                               INSTALL_PACKAGE_TIMEOUT);
+       if (rc) {
+               rc = -EOPNOTSUPP;
+               goto flash_pkg_exit;
+       }
+
+       if (resp->error_code) {
+               u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
+
+               if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+                       install.flags |= cpu_to_le16(
+                              NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+                       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                                               INSTALL_PACKAGE_TIMEOUT);
+                       if (rc) {
+                               rc = -EOPNOTSUPP;
+                               goto flash_pkg_exit;
+                       }
+               }
+       }
+
+       if (resp->result) {
+               netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
+                          (s8)resp->result, (int)resp->problem_item);
+               rc = -ENOPKG;
+       }
+flash_pkg_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+                            struct ethtool_flash *flash)
+{
+       if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+               netdev_err(dev, "flashdev not supported from a virtual function\n");
+               return -EINVAL;
+       }
+
+       if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
+           flash->region > 0xffff)
+               return bnxt_flash_package_from_file(dev, flash->data,
+                                                   flash->region);
+
+       return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       struct hwrm_nvm_get_dir_info_input req = {0};
+       struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               *entries = le32_to_cpu(output->entries);
+               *length = le32_to_cpu(output->entry_length);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+       /* The -1 return value allows the entire 32-bit range of offsets to be
+        * passed via the ethtool command-line utility.
+        */
+       return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       u32 dir_entries;
+       u32 entry_length;
+       u8 *buf;
+       size_t buflen;
+       dma_addr_t dma_handle;
+       struct hwrm_nvm_get_dir_entries_input req = {0};
+
+       rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+       if (rc != 0)
+               return rc;
+
+       /* Insert 2 bytes of directory info (count and size of entries) */
+       if (len < 2)
+               return -EINVAL;
+
+       *data++ = dir_entries;
+       *data++ = entry_length;
+       len -= 2;
+       memset(data, 0xff, len);
+
+       buflen = dir_entries * entry_length;
+       buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+                                GFP_KERNEL);
+       if (!buf) {
+               netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+                          (unsigned)buflen);
+               return -ENOMEM;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+       req.host_dest_addr = cpu_to_le64(dma_handle);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc == 0)
+               memcpy(data, buf, len > buflen ? buflen : len);
+       dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+       return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+                              u32 length, u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       u8 *buf;
+       dma_addr_t dma_handle;
+       struct hwrm_nvm_read_input req = {0};
+
+       buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+                                GFP_KERNEL);
+       if (!buf) {
+               netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+                          (unsigned)length);
+               return -ENOMEM;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+       req.host_dest_addr = cpu_to_le64(dma_handle);
+       req.dir_idx = cpu_to_le16(index);
+       req.offset = cpu_to_le32(offset);
+       req.len = cpu_to_le32(length);
+
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc == 0)
+               memcpy(data, buf, length);
+       dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+       return rc;
+}
+
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                               u16 ext, u16 *index, u32 *item_length,
+                               u32 *data_length)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+       struct hwrm_nvm_find_dir_entry_input req = {0};
+       struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
+       req.enables = 0;
+       req.dir_idx = 0;
+       req.dir_type = cpu_to_le16(type);
+       req.dir_ordinal = cpu_to_le16(ordinal);
+       req.dir_ext = cpu_to_le16(ext);
+       req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
+       rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc == 0) {
+               if (index)
+                       *index = le16_to_cpu(output->dir_idx);
+               if (item_length)
+                       *item_length = le32_to_cpu(output->dir_item_length);
+               if (data_length)
+                       *data_length = le32_to_cpu(output->dir_data_length);
+       }
+       return rc;
+}
+
+static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
+{
+       char    *retval = NULL;
+       char    *p;
+       char    *value;
+       int     field = 0;
+
+       if (datalen < 1)
+               return NULL;
+       /* null-terminate the log data (removing last '\n'): */
+       data[datalen - 1] = 0;
+       for (p = data; *p != 0; p++) {
+               field = 0;
+               retval = NULL;
+               while (*p != 0 && *p != '\n') {
+                       value = p;
+                       while (*p != 0 && *p != '\t' && *p != '\n')
+                               p++;
+                       if (field == desired_field)
+                               retval = value;
+                       if (*p != '\t')
+                               break;
+                       *p = 0;
+                       field++;
+                       p++;
+               }
+               if (*p == 0)
+                       break;
+               *p = 0;
+       }
+       return retval;
+}
+
+static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
+{
+       u16 index = 0;
+       u32 datalen;
+
+       if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+                                BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+                                &index, NULL, &datalen) != 0)
+               return NULL;
+
+       memset(buf, 0, buflen);
+       if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
+               return NULL;
+
+       return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
+               datalen);
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom,
+                          u8 *data)
+{
+       int i;
+       int rc;
+       u32 index;
+       u32 offset;
+       u32 size;
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_fw_qstatus_input fw_status_req = {0};
+       struct hwrm_fw_qstatus_output *fw_status_resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_nvm_get_dev_info_input dev_info_req = {0};
+
+       if (eeprom->len < 1)
+               return -EINVAL;
+
+       if (eeprom->offset == 0) /* special offset value to get directory */
+               return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+       index = eeprom->offset >> 24;
+       offset = eeprom->offset & 0xffffff;
+
+       if (index != 0)
+               return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len,
+                                          data);
+
+       switch (offset) {
+       case 1: /* Query firmware reset status */
+               if (eeprom->len < 5)
+                       return -EINVAL;
+               size = 4; /* procs: BOOT, MGMT, NETCTRL, and ROCE */
+               *(data++) = size;
+               mutex_lock(&bp->hwrm_cmd_lock);
+               for (i = 0; i < size; i++) {
+                       bnxt_hwrm_cmd_hdr_init(bp, &fw_status_req,
+                                              HWRM_FW_QSTATUS, -1, -1);
+                       fw_status_req.embedded_proc_type = i;
+                       rc = _hwrm_send_message(bp, &fw_status_req,
+                                              sizeof(fw_status_req),
+                                              HWRM_CMD_TIMEOUT);
+                       if (rc == 0)
+                               *(data++) = fw_status_resp->selfrst_status;
+                       else
+                               break;
+               }
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               return rc;
+       case 2: /* Query firmware version information */
+               size = sizeof(bp->ver_resp);
+               *(data++) = size;
+               memcpy(data, &bp->ver_resp, min(size, eeprom->len - 1));
+               return 0;
+       case 3: /* Query NVM device information */
+               bnxt_hwrm_cmd_hdr_init(bp, &dev_info_req,
+                                      HWRM_NVM_GET_DEV_INFO, -1, -1);
+               mutex_lock(&bp->hwrm_cmd_lock);
+               rc = _hwrm_send_message(bp, &dev_info_req, sizeof(dev_info_req),
+                                      HWRM_CMD_TIMEOUT);
+               if (rc == 0) {
+                       size = sizeof(struct hwrm_nvm_get_dev_info_output);
+                       *(data++) = size;
+                       memcpy(data, bp->hwrm_cmd_resp_addr, min(size,
+                              eeprom->len - 1));
+               }
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               return rc;
+       }
+       return -EINVAL;
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+       req.dir_idx = cpu_to_le16(index);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom,
+                          u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u8 index, dir_op;
+       u16 type, ext, ordinal, attr;
+
+       if (!BNXT_PF(bp)) {
+               netdev_err(dev, "NVM write not supported from a virtual function\n");
+               return -EINVAL;
+       }
+
+       type = eeprom->magic >> 16;
+
+       if (type == 0xffff) { /* special value for directory operations */
+               index = eeprom->magic & 0xff;
+               dir_op = eeprom->magic >> 8;
+               if (index == 0)
+                       return -EINVAL;
+               switch (dir_op) {
+               case 0x0e: /* erase */
+                       if (eeprom->offset != ~eeprom->magic)
+                               return -EINVAL;
+                       return bnxt_erase_nvram_directory(dev, index - 1);
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       /* Create or re-write an NVM item: */
+       if (bnxt_dir_type_is_executable(type) == true)
+               return -EOPNOTSUPP;
+       ext = eeprom->magic & 0xffff;
+       ordinal = eeprom->offset >> 16;
+       attr = eeprom->offset & 0xffff;
+
+       return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+                               eeprom->len);
+}
+
+#endif
+
+#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct ethtool_eee *eee = &bp->eee;
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u32 advertising =
+                _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+       int rc = 0;
+
+       if (!BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+               return -EOPNOTSUPP;
+
+       if (!edata->eee_enabled)
+               goto eee_ok;
+
+       if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+               netdev_warn(dev, "EEE requires autoneg\n");
+               return -EINVAL;
+       }
+       if (edata->tx_lpi_enabled) {
+               if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
+                                      edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
+                       netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
+                                   bp->lpi_tmr_lo, bp->lpi_tmr_hi);
+                       return -EINVAL;
+               } else if (!bp->lpi_tmr_hi) {
+                       edata->tx_lpi_timer = eee->tx_lpi_timer;
+               }
+       }
+       if (!edata->advertised) {
+               edata->advertised = advertising & eee->supported;
+       } else if (edata->advertised & ~advertising) {
+               netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
+                           edata->advertised, advertising);
+               return -EINVAL;
+       }
+
+       eee->advertised = edata->advertised;
+       eee->tx_lpi_enabled = edata->tx_lpi_enabled;
+       eee->tx_lpi_timer = edata->tx_lpi_timer;
+eee_ok:
+       eee->eee_enabled = edata->eee_enabled;
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, false, true);
+
+       return rc;
+}
+
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+               return -EOPNOTSUPP;
+
+       *edata = bp->eee;
+       if (!bp->eee.eee_enabled) {
+               /* Preserve tx_lpi_timer so that the last value will be used
+                * by default when it is re-enabled.
+                */
+               edata->advertised = 0;
+               edata->tx_lpi_enabled = 0;
+       }
+
+       if (!bp->eee.eee_active)
+               edata->lp_advertised = 0;
+
+       return 0;
+}
+#endif
+
+#if defined(ETHTOOL_GMODULEEEPROM) && !defined(GET_ETHTOOL_OP_EXT)
+static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
+                                           u16 page_number, u16 start_addr,
+                                           u16 data_length, u8 *buf)
+{
+       struct hwrm_port_phy_i2c_read_input req = {0};
+       struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+       int rc, byte_offset = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+       req.i2c_slave_addr = i2c_addr;
+       req.page_number = cpu_to_le16(page_number);
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       do {
+               u16 xfer_size;
+
+               xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+               data_length -= xfer_size;
+               req.page_offset = cpu_to_le16(start_addr + byte_offset);
+               req.data_length = xfer_size;
+               req.enables = cpu_to_le32(start_addr + byte_offset ?
+                                PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+               mutex_lock(&bp->hwrm_cmd_lock);
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (!rc)
+                       memcpy(buf + byte_offset, output->data, xfer_size);
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               byte_offset += xfer_size;
+       } while (!rc && data_length > 0);
+
+       return rc;
+}
+
+static int bnxt_get_module_info(struct net_device *dev,
+                               struct ethtool_modinfo *modinfo)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_port_phy_i2c_read_input req = {0};
+       struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* No point in going further if phy status indicates
+        * module is not inserted or if it is powered down or
+        * if it is of type 10GBase-T
+        */
+       if (bp->link_info.module_status >
+               PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+               return -EOPNOTSUPP;
+
+       /* This feature is not supported in older firmware versions */
+       if (bp->hwrm_spec_code < 0x10202)
+               return -EOPNOTSUPP;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+       req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+       req.page_number = 0;
+       req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+       req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               u32 module_id = le32_to_cpu(output->data[0]);
+
+               switch (module_id) {
+               case SFF_MODULE_ID_SFP:
+                       modinfo->type = ETH_MODULE_SFF_8472;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+                       break;
+               case SFF_MODULE_ID_QSFP:
+               case SFF_MODULE_ID_QSFP_PLUS:
+                       modinfo->type = ETH_MODULE_SFF_8436;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+                       break;
+               case SFF_MODULE_ID_QSFP28:
+                       modinfo->type = ETH_MODULE_SFF_8636;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+                       break;
+               default:
+                       rc = -EOPNOTSUPP;
+                       break;
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_get_module_eeprom(struct net_device *dev,
+                                 struct ethtool_eeprom *eeprom,
+                                 u8 *data)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       u16  start = eeprom->offset, length = eeprom->len;
+       int rc = 0;
+
+       memset(data, 0, eeprom->len);
+
+       /* Read A0 portion of the EEPROM */
+       if (start < ETH_MODULE_SFF_8436_LEN) {
+               if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+                       length = ETH_MODULE_SFF_8436_LEN - start;
+               rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+                                                     start, length, data);
+               if (rc)
+                       return rc;
+               start += length;
+               data += start;
+               length = eeprom->len - length;
+       }
+
+       /* Read A2 portion of the EEPROM */
+       if (length) {
+               start -= ETH_MODULE_SFF_8436_LEN;
+               bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+                                                length, data);
+       }
+       return rc;
+}
+#endif
+
+static int bnxt_nway_reset(struct net_device *dev)
+{
+       int rc = 0;
+
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (!BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+               return -EINVAL;
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, true, false);
+
+       return rc;
+}
+
+#if defined(HAVE_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT)
+static int bnxt_set_phys_id(struct net_device *dev,
+                           enum ethtool_phys_id_state state)
+{
+       struct hwrm_port_led_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_pf_info *pf = &bp->pf;
+       struct bnxt_led_cfg *led_cfg;
+       u8 led_state;
+       __le16 duration;
+       int i, rc;
+
+       if (!bp->num_leds || BNXT_VF(bp))
+               return -EOPNOTSUPP;
+
+       if (state == ETHTOOL_ID_ACTIVE) {
+               led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
+               duration = cpu_to_le16(500);
+       } else if (state == ETHTOOL_ID_INACTIVE) {
+               led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
+               duration = cpu_to_le16(0);
+       } else {
+               return -EINVAL;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
+       req.port_id = cpu_to_le16(pf->port_id);
+       req.num_leds = bp->num_leds;
+       led_cfg = (struct bnxt_led_cfg *) &req.led0_id;
+       for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+               req.enables |= BNXT_LED_DFLT_ENABLES(i);
+               led_cfg->led_id = bp->leds[i].led_id;
+               led_cfg->led_state = led_state;
+               led_cfg->led_blink_on = duration;
+               led_cfg->led_blink_off = duration;
+               led_cfg->led_group_id = bp->leds[i].led_group_id;
+       }
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               rc = -EIO;
+       return rc;
+}
+#endif
+
+static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
+{
+       struct hwrm_selftest_irq_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_test_irq(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
+               int rc;
+
+               rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
+               if (rc)
+                       return rc;
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
+{
+       struct hwrm_port_mac_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+
+       req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+       if (enable)
+               req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+       else
+               req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
+                                   struct hwrm_port_phy_cfg_input *req)
+{
+       struct bnxt_link_info *link_info = &bp->link_info;
+       u16 fw_advertising = link_info->advertising;
+       u16 fw_speed;
+       int rc;
+
+       if (!link_info->autoneg)
+               return 0;
+
+       fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
+       if (netif_carrier_ok(bp->dev))
+               fw_speed = bp->link_info.link_speed;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+       else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
+               fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+
+       req->force_link_speed = cpu_to_le16(fw_speed);
+       req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
+                                 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+       rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+       req->flags = 0;
+       req->force_link_speed = cpu_to_le16(0);
+       return rc;
+}
+
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+{
+       struct hwrm_port_phy_cfg_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+
+       if (enable) {
+               bnxt_disable_an_for_lpbk(bp, &req);
+               req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+       } else {
+               req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+       }
+       req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+                           u32 raw_cons, int pkt_size)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       struct bnxt_sw_rx_bd *rx_buf;
+       struct rx_cmp *rxcmp;
+       u16 cp_cons, cons;
+       u8 *data;
+       u32 len;
+       int i;
+
+       cp_cons = RING_CMP(raw_cons);
+       rxcmp = (struct rx_cmp *)
+               &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+       cons = rxcmp->rx_cmp_opaque;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data_ptr;
+       len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+       if (len != pkt_size)
+               return -EIO;
+       i = ETH_ALEN;
+       if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
+               return -EIO;
+       i += ETH_ALEN;
+       for (  ; i < pkt_size; i++) {
+               if (data[i] != (u8)(i & 0xff))
+                       return -EIO;
+       }
+       return 0;
+}
+
+static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+{
+       struct bnxt_napi *bnapi = bp->bnapi[0];
+       struct bnxt_cp_ring_info *cpr;
+       struct tx_cmp *txcmp;
+       int rc = -EIO;
+       u32 raw_cons;
+       u32 cons;
+       int i;
+
+       cpr = &bnapi->cp_ring;
+       raw_cons = cpr->cp_raw_cons;
+       for (i = 0; i < 200; i++) {
+               cons = RING_CMP(raw_cons);
+               txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+               if (!TX_CMP_VALID(txcmp, raw_cons)) {
+                       udelay(5);
+                       continue;
+               }
+
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               dma_rmb();
+               if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+                       rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+                       raw_cons = NEXT_RAW_CMP(raw_cons);
+                       raw_cons = NEXT_RAW_CMP(raw_cons);
+                       break;
+               }
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+       }
+       cpr->cp_raw_cons = raw_cons;
+       return rc;
+}
+
+static int bnxt_run_loopback(struct bnxt *bp)
+{
+       struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+       int pkt_size, i = 0;
+       struct sk_buff *skb;
+       dma_addr_t map;
+       u8 *data;
+       int rc;
+
+       pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+       skb = netdev_alloc_skb(bp->dev, pkt_size);
+       if (!skb)
+               return -ENOMEM;
+       data = skb_put(skb, pkt_size);
+       eth_broadcast_addr(data);
+       i += ETH_ALEN;
+       ether_addr_copy(&data[i], bp->dev->dev_addr);
+       i += ETH_ALEN;
+       for ( ; i < pkt_size; i++)
+               data[i] = (u8)(i & 0xff);
+
+       map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+                            PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&bp->pdev->dev, map)) {
+               dev_kfree_skb(skb);
+               return -EIO;
+       }
+       bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
+
+       /* Sync BD data before updating doorbell */
+       wmb();
+
+       writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+       writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+       rc = bnxt_poll_loopback(bp, pkt_size);
+
+       dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+       dev_kfree_skb(skb);
+       return rc;
+}
+
+static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
+{
+       struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_selftest_exec_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       resp->test_success = 0;
+       req.flags = test_mask;
+       rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+       *test_results = resp->test_success;
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+#define BNXT_DRV_TESTS                 3
+#define BNXT_MACLPBK_TEST_IDX          (bp->num_tests - BNXT_DRV_TESTS)
+#define BNXT_PHYLPBK_TEST_IDX          (BNXT_MACLPBK_TEST_IDX + 1)
+#define BNXT_IRQ_TEST_IDX              (BNXT_MACLPBK_TEST_IDX + 2)
+
+static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+                          u64 *buf)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       bool offline = false;
+       u8 test_results = 0;
+       u8 test_mask = 0;
+       int rc, i;
+
+       if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+               return;
+       memset(buf, 0, sizeof(u64) * bp->num_tests);
+       if (!netif_running(dev)) {
+               etest->flags |= ETH_TEST_FL_FAILED;
+               return;
+       }
+
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               if (bp->pf.active_vfs) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+                       return;
+               }
+               offline = true;
+       }
+
+       for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+               u8 bit_val = 1 << i;
+
+               if (!(bp->test_info->offline_mask & bit_val))
+                       test_mask |= bit_val;
+               else if (offline)
+                       test_mask |= bit_val;
+       }
+       if (!offline) {
+               bnxt_run_fw_tests(bp, test_mask, &test_results);
+       } else {
+               rc = bnxt_close_nic(bp, false, false);
+               if (rc)
+                       return;
+               bnxt_run_fw_tests(bp, test_mask, &test_results);
+
+               buf[BNXT_MACLPBK_TEST_IDX] = 1;
+               bnxt_hwrm_mac_loopback(bp, true);
+               msleep(250);
+               rc = bnxt_half_open_nic(bp);
+               if (rc) {
+                       bnxt_hwrm_mac_loopback(bp, false);
+                       etest->flags |= ETH_TEST_FL_FAILED;
+                       return;
+               }
+               if (bnxt_run_loopback(bp))
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               else
+                       buf[BNXT_MACLPBK_TEST_IDX] = 0;
+
+               bnxt_hwrm_mac_loopback(bp, false);
+               bnxt_hwrm_phy_loopback(bp, true);
+               msleep(1000);
+               if (bnxt_run_loopback(bp)) {
+                       buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+               bnxt_hwrm_phy_loopback(bp, false);
+               bnxt_half_close_nic(bp);
+               bnxt_open_nic(bp, false, true);
+       }
+       if (bnxt_test_irq(bp)) {
+               buf[BNXT_IRQ_TEST_IDX] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+       for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+               u8 bit_val = 1 << i;
+
+               if ((test_mask & bit_val) && !(test_results & bit_val)) {
+                       buf[i] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+       }
+}
+
+#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT)
+static int bnxt_get_ts_info(struct net_device *dev,
+                           struct ethtool_ts_info *info)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ptp_cfg *ptp;
+
+       ptp = bp->ptp_cfg;
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
+                               SOF_TIMESTAMPING_SOFTWARE;
+
+       info->phc_index = -1;
+       if (!ptp)
+               return 0;
+
+       info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+                                SOF_TIMESTAMPING_RX_HARDWARE |
+                                SOF_TIMESTAMPING_RAW_HARDWARE;
+       if (ptp->ptp_clock)
+               info->phc_index = ptp_clock_index(ptp->ptp_clock);
+
+       info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+       return 0;
+}
+#endif
+
+void bnxt_ethtool_init(struct bnxt *bp)
+{
+       struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_selftest_qlist_input req = {0};
+       struct bnxt_test_info *test_info;
+       int i, rc;
+
+       if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+               return;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto ethtool_init_exit;
+
+       test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+       if (!test_info)
+               goto ethtool_init_exit;
+
+       bp->test_info = test_info;
+       bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
+       if (bp->num_tests > BNXT_MAX_TEST)
+               bp->num_tests = BNXT_MAX_TEST;
+
+       test_info->offline_mask = resp->offline_tests;
+       test_info->timeout = le16_to_cpu(resp->test_timeout);
+       if (!test_info->timeout)
+               test_info->timeout = HWRM_CMD_TIMEOUT;
+       for (i = 0; i < bp->num_tests; i++) {
+               char *str = test_info->string[i];
+               char *fw_str = resp->test0_name + i * 32;
+
+               if (i == BNXT_MACLPBK_TEST_IDX) {
+                       strcpy(str, "Mac loopback test (offline)");
+               } else if (i == BNXT_PHYLPBK_TEST_IDX) {
+                       strcpy(str, "Phy loopback test (offline)");
+               } else if (i == BNXT_IRQ_TEST_IDX) {
+                       strcpy(str, "Interrupt_test (offline)");
+               } else {
+                       strlcpy(str, fw_str, ETH_GSTRING_LEN);
+                       strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+                       if (test_info->offline_mask & (1 << i))
+                               strncat(str, " (offline)",
+                                       ETH_GSTRING_LEN - strlen(str));
+                       else
+                               strncat(str, " (online)",
+                                       ETH_GSTRING_LEN - strlen(str));
+               }
+       }
+
+ethtool_init_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+void bnxt_ethtool_free(struct bnxt *bp)
+{
+       kfree(bp->test_info);
+       bp->test_info = NULL;
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+#ifdef HAVE_ETHTOOL_GLINKSETTINGS_25G
+       .get_link_ksettings     = bnxt_get_link_ksettings,
+       .set_link_ksettings     = bnxt_set_link_ksettings,
+#else
+       .get_settings           = bnxt_get_settings,
+       .set_settings           = bnxt_set_settings,
+#endif
+       .get_pauseparam         = bnxt_get_pauseparam,
+       .set_pauseparam         = bnxt_set_pauseparam,
+       .get_drvinfo            = bnxt_get_drvinfo,
+       .get_wol                = bnxt_get_wol,
+       .set_wol                = bnxt_set_wol,
+       .get_coalesce           = bnxt_get_coalesce,
+       .set_coalesce           = bnxt_set_coalesce,
+       .get_msglevel           = bnxt_get_msglevel,
+       .set_msglevel           = bnxt_set_msglevel,
+       .get_sset_count         = bnxt_get_sset_count,
+       .get_strings            = bnxt_get_strings,
+       .get_ethtool_stats      = bnxt_get_ethtool_stats,
+       .set_ringparam          = bnxt_set_ringparam,
+       .get_ringparam          = bnxt_get_ringparam,
+#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT)
+       .get_channels           = bnxt_get_channels,
+       .set_channels           = bnxt_set_channels,
+#endif
+#ifdef HAVE_RXNFC
+       .get_rxnfc              = bnxt_get_rxnfc,
+       .set_rxnfc              = bnxt_set_rxnfc,
+#endif
+#if defined(HAVE_RXFH_INDIR_SIZE) && !defined(GET_ETHTOOL_OP_EXT)
+       .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
+#endif
+#if defined(HAVE_GET_RXFH_KEY_SIZE) && !defined(GET_ETHTOOL_OP_EXT)
+       .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
+       .get_rxfh               = bnxt_get_rxfh,
+#endif
+#ifdef CONFIG_BNXT_FLASHDEV
+       .flash_device           = bnxt_flash_device,
+       .get_eeprom_len         = bnxt_get_eeprom_len,
+       .get_eeprom             = bnxt_get_eeprom,
+       .set_eeprom             = bnxt_set_eeprom,
+#endif
+       .get_link               = bnxt_get_link,
+#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT)
+       .get_eee                = bnxt_get_eee,
+       .set_eee                = bnxt_set_eee,
+#endif
+#if defined(ETHTOOL_GMODULEEEPROM) && !defined(GET_ETHTOOL_OP_EXT)
+       .get_module_info        = bnxt_get_module_info,
+       .get_module_eeprom      = bnxt_get_module_eeprom,
+#endif
+       .nway_reset             = bnxt_nway_reset,
+#if defined(HAVE_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT)
+       .set_phys_id            = bnxt_set_phys_id,
+#endif
+       .self_test              = bnxt_self_test,
+#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT)
+       .get_ts_info            = bnxt_get_ts_info,
+#endif
+};
diff --git a/ubuntu/bnxt/bnxt_ethtool.h b/ubuntu/bnxt/bnxt_ethtool.h
new file mode 100644 (file)
index 0000000..f1bc90b
--- /dev/null
@@ -0,0 +1,45 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+struct bnxt_led_cfg {
+       u8 led_id;
+       u8 led_state;
+       u8 led_color;
+       u8 unused;
+       __le16 led_blink_on;
+       __le16 led_blink_off;
+       u8 led_group_id;
+       u8 rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA                              \
+       (PORT_LED_CFG_REQ_ENABLES_LED0_ID |             \
+        PORT_LED_CFG_REQ_ENABLES_LED0_STATE |          \
+        PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON |       \
+        PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF |      \
+        PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT        6
+
+#define BNXT_LED_DFLT_ENABLES(x)                       \
+       cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+u32 bnxt_fw_to_ethtool_speed(u16);
+u16 bnxt_get_fw_auto_link_speeds(u32);
+void bnxt_ethtool_init(struct bnxt *bp);
+void bnxt_ethtool_free(struct bnxt *bp);
+
+#endif
diff --git a/ubuntu/bnxt/bnxt_fw_hdr.h b/ubuntu/bnxt/bnxt_fw_hdr.h
new file mode 100644 (file)
index 0000000..b94d804
--- /dev/null
@@ -0,0 +1,120 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE     0x1a4d4342     /* "BCM"+0x1a */
+#define BNXT_UCODE_TRAILER_SIGNATURE   0x726c7254      /* "Trlr" */
+
+enum SUPPORTED_FAMILY {
+       DEVICE_5702_3_4_FAMILY,         /* 0  - Denali, Vinson, K2 */
+       DEVICE_5705_FAMILY,             /* 1  - Bachelor */
+       DEVICE_SHASTA_FAMILY,           /* 2  - 5751 */
+       DEVICE_5706_FAMILY,             /* 3  - Teton */
+       DEVICE_5714_FAMILY,             /* 4  - Hamilton */
+       DEVICE_STANFORD_FAMILY,         /* 5  - 5755 */
+       DEVICE_STANFORD_ME_FAMILY,      /* 6  - 5756 */
+       DEVICE_SOLEDAD_FAMILY,          /* 7  - 5761[E] */
+       DEVICE_CILAI_FAMILY,            /* 8  - 57780/60/90/91 */
+       DEVICE_ASPEN_FAMILY,            /* 9  - 57781/85/61/65/91/95 */
+       DEVICE_ASPEN_PLUS_FAMILY,       /* 10 - 57786 */
+       DEVICE_LOGAN_FAMILY,            /* 11 - Any device in the Logan family
+                                        */
+       DEVICE_LOGAN_5762,              /* 12 - Logan Enterprise (aka Columbia)
+                                        */
+       DEVICE_LOGAN_57767,             /* 13 - Logan Client */
+       DEVICE_LOGAN_57787,             /* 14 - Logan Consumer */
+       DEVICE_LOGAN_5725,              /* 15 - Logan Server (TruManage-enabled)
+                                        */
+       DEVICE_SAWTOOTH_FAMILY,         /* 16 - 5717/18 */
+       DEVICE_COTOPAXI_FAMILY,         /* 17 - 5719 */
+       DEVICE_SNAGGLETOOTH_FAMILY,     /* 18 - 5720 */
+       DEVICE_CUMULUS_FAMILY,          /* 19 - Cumulus/Whitney */
+       MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+       CODE_ASF1,              /* 0  - ASF VERSION 1.03 <deprecated> */
+       CODE_ASF2,              /* 1  - ASF VERSION 2.00 <deprecated> */
+       CODE_PASSTHRU,          /* 2  - PassThru         <deprecated> */
+       CODE_PT_SEC,            /* 3  - PassThru with security <deprecated> */
+       CODE_UMP,               /* 4  - UMP                     <deprecated> */
+       CODE_BOOT,              /* 5  - Bootcode */
+       CODE_DASH,              /* 6  - TruManage (DASH + ASF + PMCI)
+                                *      Management firmwares
+                                */
+       CODE_MCTP_PASSTHRU,     /* 7  - NCSI / MCTP Passt-hrough firmware */
+       CODE_PM_OFFLOAD,        /* 8  - Power-Management Proxy Offload firmwares
+                                */
+       CODE_MDNS_SD_OFFLOAD,   /* 9  - Multicast DNS Service Discovery Proxys
+                                *      Offload firmware
+                                */
+       CODE_DISC_OFFLOAD,      /* 10 - Discovery Offload firmware */
+       CODE_MUSTANG,           /* 11 - I2C Error reporting APE firmwares
+                                *      <deprecated>
+                                */
+       CODE_ARP_BATCH,         /* 12 - ARP Batch firmware */
+       CODE_SMASH,             /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+                                *      Management firmware
+                                */
+       CODE_APE_DIAG,          /* 14 - APE Test Diag firmware */
+       CODE_APE_PATCH,         /* 15 - APE Patch firmware */
+       CODE_TANG_PATCH,        /* 16 - TANG Patch firmware */
+       CODE_KONG_FW,           /* 17 - KONG firmware */
+       CODE_KONG_PATCH,        /* 18 - KONG Patch firmware */
+       CODE_BONO_FW,           /* 19 - BONO firmware */
+       CODE_BONO_PATCH,        /* 20 - BONO Patch firmware */
+       CODE_CHIMP_PATCH,       /* 21 - ChiMP Patch firmware */
+
+       MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+       MEDIA_COPPER,           /* 0 */
+       MEDIA_FIBER,            /* 1 */
+       MEDIA_NONE,             /* 2 */
+       MEDIA_COPPER_FIBER,     /* 3 */
+       MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+       __le32 signature;       /* constains the constant value of
+                                * BNXT_FIRMWARE_BIN_SIGNATURE
+                                */
+       u8 flags;               /* reserved for ChiMP use */
+       u8 code_type;           /* enum SUPPORTED_CODE */
+       u8 device;              /* enum SUPPORTED_FAMILY */
+       u8 media;               /* enum SUPPORTED_MEDIA */
+       u8 version[16];         /* the null terminated version string to
+                                * indicate the version of the
+                                * file, this will be copied from the binary
+                                * file version string
+                                */
+       u8 build;
+       u8 revision;
+       u8 minor_ver;
+       u8 major_ver;
+};
+
+/* Microcode and pre-boot software/firmware trailer: */
+struct bnxt_ucode_trailer {
+       u8 rsa_sig[256];
+       __le16 flags;
+       u8 version_format;
+       u8 version_length;
+       u8 version[16];
+       __le16 dir_type;
+       __le16 trailer_length;
+       __le32 sig;             /* BNXT_UCODE_TRAILER_SIGNATURE */
+       __le32 chksum;          /* CRC-32 */
+};
+
+#endif
diff --git a/ubuntu/bnxt/bnxt_hsi.h b/ubuntu/bnxt/bnxt_hsi.h
new file mode 100644 (file)
index 0000000..4c0e881
--- /dev/null
@@ -0,0 +1,8155 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* HSI and HWRM Specification 1.8.0 */
+#define HWRM_VERSION_MAJOR     1
+#define HWRM_VERSION_MINOR     8
+#define HWRM_VERSION_UPDATE    0
+
+#define HWRM_VERSION_RSVD      0 /* non-zero means beta version */
+
+#define HWRM_VERSION_STR       "1.8.0.0"
+/*
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE      ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN    (248)  /* hwrm_selftest_qlist */
+#define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE       40
+#define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+       __le16 type;
+       #define EJECT_CMPL_TYPE_MASK                                0x3fUL
+       #define EJECT_CMPL_TYPE_SFT                                 0
+       #define EJECT_CMPL_TYPE_STAT_EJECT                         0x1aUL
+       __le16 len;
+       __le32 opaque;
+       __le32 v;
+       #define EJECT_CMPL_V                                        0x1UL
+       __le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+       __le16 type;
+       #define CMPL_TYPE_MASK                                      0x3fUL
+       #define CMPL_TYPE_SFT                                       0
+       #define CMPL_TYPE_HWRM_DONE                                0x20UL
+       __le16 sequence_id;
+       __le32 unused_1;
+       __le32 v;
+       #define CMPL_V                                              0x1UL
+       __le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+       __le16 req_len_type;
+       #define FWD_REQ_CMPL_TYPE_MASK                              0x3fUL
+       #define FWD_REQ_CMPL_TYPE_SFT                               0
+       #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ                     0x22UL
+       #define FWD_REQ_CMPL_REQ_LEN_MASK                           0xffc0UL
+       #define FWD_REQ_CMPL_REQ_LEN_SFT                            6
+       __le16 source_id;
+       __le32 unused_0;
+       __le32 req_buf_addr_v[2];
+       #define FWD_REQ_CMPL_V                                      0x1UL
+       #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK                      0xfffffffeUL
+       #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT                       1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+       __le16 type;
+       #define FWD_RESP_CMPL_TYPE_MASK                     0x3fUL
+       #define FWD_RESP_CMPL_TYPE_SFT                              0
+       #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP                   0x24UL
+       __le16 source_id;
+       __le16 resp_len;
+       __le16 unused_1;
+       __le32 resp_buf_addr_v[2];
+       #define FWD_RESP_CMPL_V                             0x1UL
+       #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK                    0xfffffffeUL
+       #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT             1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_TYPE_MASK                          0x3fUL
+       #define ASYNC_EVENT_CMPL_TYPE_SFT                           0
+       #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT             0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE      0x0UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE          0x1UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE       0x2UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE       0x3UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED   0x4UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE   0x6UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE     0x7UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD         0x10UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD           0x11UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT     0x12UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD           0x20UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD             0x21UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR                   0x30UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE      0x31UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE    0x33UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR               0xffUL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_V                                  0x1UL
+       #define ASYNC_EVENT_CMPL_OPAQUE_MASK                        0xfeUL
+       #define ASYNC_EVENT_CMPL_OPAQUE_SFT                         1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK      0x3fUL
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT       0
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V               0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK    0xfeUL
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT     1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST    ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+       #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK          0x3fUL
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT           0
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V                  0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK       0xfeUL
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT         1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK       0x3fUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT         0
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V                0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK     0xfeUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT      1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST    ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK       0x3fUL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT         0
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+       __le32 event_data2;
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V                0x1UL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK     0xfeUL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT      1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST    ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
+       #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST    ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK   0x3fUL
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT    0
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V            0x1UL
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT  1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+       #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST    ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V      0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK   0x3fUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT    0
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V            0x1UL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT  1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+       #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* HWRM Asynchronous Event Completion Record for port PHY configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_port_phy_cfg_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK     0x3fUL
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT      0
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V              0x1UL
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK   0xfeUL
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT    1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE 0x10000UL
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE 0x20000UL
+       #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE 0x40000UL
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK         0x3fUL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT          0
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V                 0x1UL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK      0xfeUL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT       1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK           0x3fUL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT            0
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V                   0x1UL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK         0xfeUL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT          1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
+struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK     0x3fUL
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT      0
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V              0x1UL
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK   0xfeUL
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT    1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK           0x3fUL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT            0
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V                   0x1UL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK         0xfeUL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT          1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK     0x3fUL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT              0
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V             0x1UL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK           0xfeUL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT            1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+       #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK                   0x3fUL
+       #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT                    0
+       #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT     0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR    0x30UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_VF_FLR_V                           0x1UL
+       #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK                 0xfeUL
+       #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT                  1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK     0xffffUL
+       #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT      0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK      0x3fUL
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT       0
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V               0x1UL
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK    0xfeUL
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT     1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V         0x1UL
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT     0
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+       __le32 event_data2;
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V                    0x1UL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK          0xfeUL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT           1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+       __le16 type;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK               0x3fUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT                0
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+       __le16 event_id;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR   0xffUL
+       __le32 event_data2;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST    ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+       u8 opaque_v;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_V                       0x1UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK     0xfeUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT              1
+       u8 timestamp_lo;
+       __le16 timestamp_hi;
+       __le32 event_data1;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP  0x1UL
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 hwrm_intf_maj;
+       u8 hwrm_intf_min;
+       u8 hwrm_intf_upd;
+       u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 hwrm_intf_maj;
+       u8 hwrm_intf_min;
+       u8 hwrm_intf_upd;
+       u8 hwrm_intf_rsvd;
+       u8 hwrm_fw_maj;
+       u8 hwrm_fw_min;
+       u8 hwrm_fw_bld;
+       u8 hwrm_fw_rsvd;
+       u8 mgmt_fw_maj;
+       u8 mgmt_fw_min;
+       u8 mgmt_fw_bld;
+       u8 mgmt_fw_rsvd;
+       u8 netctrl_fw_maj;
+       u8 netctrl_fw_min;
+       u8 netctrl_fw_bld;
+       u8 netctrl_fw_rsvd;
+       __le32 dev_caps_cfg;
+       #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED  0x1UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED  0x2UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED      0x4UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED       0x8UL
+       u8 roce_fw_maj;
+       u8 roce_fw_min;
+       u8 roce_fw_bld;
+       u8 roce_fw_rsvd;
+       char hwrm_fw_name[16];
+       char mgmt_fw_name[16];
+       char netctrl_fw_name[16];
+       __le32 reserved2[4];
+       char roce_fw_name[16];
+       __le16 chip_num;
+       u8 chip_rev;
+       u8 chip_metal;
+       u8 chip_bond_id;
+       u8 chip_platform_type;
+       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC               0x0UL
+       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA               0x1UL
+       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM          0x2UL
+       __le16 max_req_win_len;
+       __le16 max_resp_len;
+       __le16 def_req_timeout;
+       u8 init_pending;
+       #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY               0x1UL
+       u8 unused_0;
+       u8 unused_1;
+       u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID                  0x1UL
+       __le16 vf_id;
+       u8 func_reset_level;
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL           0x0UL
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME    0x1UL
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN     0x2UL
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF    0x3UL
+       u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_GETFID_REQ_ENABLES_PCI_ID                      0x1UL
+       __le16 pci_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 fid;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID               0x1UL
+       __le16 first_vf_id;
+       __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 first_vf_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID                0x1UL
+       __le16 first_vf_id;
+       __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (32 bytes) */
+struct hwrm_func_vf_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_VF_CFG_REQ_ENABLES_MTU                         0x1UL
+       #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN                  0x2UL
+       #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR              0x4UL
+       #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR               0x8UL
+       __le16 mtu;
+       __le16 guest_vlan;
+       __le16 async_event_cr;
+       u8 dflt_mac_addr[6];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 fid;
+       __le16 port_id;
+       __le32 flags;
+       #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED           0x1UL
+       #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
+       #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED                 0x4UL
+       #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED     0x8UL
+       #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED     0x10UL
+       #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED       0x20UL
+       #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED     0x40UL
+       #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED          0x80UL
+       #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED           0x100UL
+       #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED      0x200UL
+       #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED           0x400UL
+       #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED   0x800UL
+       u8 mac_address[6];
+       __le16 max_rsscos_ctx;
+       __le16 max_cmpl_rings;
+       __le16 max_tx_rings;
+       __le16 max_rx_rings;
+       __le16 max_l2_ctxs;
+       __le16 max_vnics;
+       __le16 first_vf_id;
+       __le16 max_vfs;
+       __le16 max_stat_ctx;
+       __le32 max_encap_records;
+       __le32 max_decap_records;
+       __le32 max_tx_em_flows;
+       __le32 max_tx_wm_flows;
+       __le32 max_rx_em_flows;
+       __le32 max_rx_wm_flows;
+       __le32 max_mcast_filters;
+       __le32 max_flow_id;
+       __le32 max_hw_ring_grps;
+       __le16 max_sp_tx_rings;
+       u8 unused_0;
+       u8 valid;
+};
+
+/* hwrm_func_qcfg */
+/* Input (24 bytes) */
+struct hwrm_func_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (72 bytes) */
+struct hwrm_func_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 fid;
+       __le16 port_id;
+       __le16 vlan;
+       __le16 flags;
+       #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED      0x1UL
+       #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED            0x2UL
+       #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED          0x4UL
+       #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED      0x8UL
+       #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED          0x10UL
+       #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST             0x20UL
+       u8 mac_address[6];
+       __le16 pci_id;
+       __le16 alloc_rsscos_ctx;
+       __le16 alloc_cmpl_rings;
+       __le16 alloc_tx_rings;
+       __le16 alloc_rx_rings;
+       __le16 alloc_l2_ctx;
+       __le16 alloc_vnics;
+       __le16 mtu;
+       __le16 mru;
+       __le16 stat_ctx_id;
+       u8 port_partition_type;
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF             0x0UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS    0x1UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0         0x2UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5         0x3UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0         0x4UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN         0xffUL
+       u8 port_pf_cnt;
+       #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL                 0x0UL
+       __le16 dflt_vnic_id;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 min_bw;
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK                 0xfffffffUL
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT                  0
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE                         0x10000000UL
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS                   (0x0UL << 28)
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES                  (0x1UL << 28)
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST    FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT     29
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO           (0x2UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE           (0x4UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA           (0x6UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 max_bw;
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK                 0xfffffffUL
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT                  0
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE                         0x10000000UL
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS                   (0x0UL << 28)
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES                  (0x1UL << 28)
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST    FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT     29
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO           (0x2UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE           (0x4UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA           (0x6UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 evb_mode;
+       #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB                     0x0UL
+       #define FUNC_QCFG_RESP_EVB_MODE_VEB                        0x1UL
+       #define FUNC_QCFG_RESP_EVB_MODE_VEPA                       0x2UL
+       u8 unused_2;
+       __le16 alloc_vfs;
+       __le32 alloc_mcast_filters;
+       __le32 alloc_hw_ring_grps;
+       __le16 alloc_sp_tx_rings;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vlan_qcfg */
+/* Input (24 bytes) */
+struct hwrm_func_vlan_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (40 bytes) */
+struct hwrm_func_vlan_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+       __le16 stag_vid;
+       u8 stag_pcp;
+       u8 unused_4;
+       __be16 stag_tpid;
+       __le16 ctag_vid;
+       u8 ctag_pcp;
+       u8 unused_5;
+       __be16 ctag_tpid;
+       __le32 rsvd2;
+       __le32 rsvd3;
+       __le32 unused_6;
+};
+
+/* hwrm_func_vlan_cfg */
+/* Input (48 bytes) */
+struct hwrm_func_vlan_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 enables;
+       #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID                  0x1UL
+       #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID                  0x2UL
+       #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP                  0x4UL
+       #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP                  0x8UL
+       #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID                 0x10UL
+       #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID                 0x20UL
+       __le16 stag_vid;
+       u8 stag_pcp;
+       u8 unused_2;
+       __be16 stag_tpid;
+       __le16 ctag_vid;
+       u8 ctag_pcp;
+       u8 unused_3;
+       __be16 ctag_tpid;
+       __le32 rsvd1;
+       __le32 rsvd2;
+       __le32 unused_4;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vlan_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 flags;
+       #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE      0x1UL
+       #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE       0x2UL
+       #define FUNC_CFG_REQ_FLAGS_RSVD_MASK                        0x1fcUL
+       #define FUNC_CFG_REQ_FLAGS_RSVD_SFT                         2
+       #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE          0x200UL
+       #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE         0x400UL
+       #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST                 0x800UL
+       #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC           0x1000UL
+       __le32 enables;
+       #define FUNC_CFG_REQ_ENABLES_MTU                            0x1UL
+       #define FUNC_CFG_REQ_ENABLES_MRU                            0x2UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS                0x4UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS                 0x8UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS                   0x10UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS                   0x20UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS                    0x40UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_VNICS                      0x80UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS                  0x100UL
+       #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR                  0x200UL
+       #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN                      0x400UL
+       #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR                   0x800UL
+       #define FUNC_CFG_REQ_ENABLES_MIN_BW                         0x1000UL
+       #define FUNC_CFG_REQ_ENABLES_MAX_BW                         0x2000UL
+       #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR                 0x4000UL
+       #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE            0x8000UL
+       #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS              0x10000UL
+       #define FUNC_CFG_REQ_ENABLES_EVB_MODE                       0x20000UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS              0x40000UL
+       #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS               0x80000UL
+       __le16 mtu;
+       __le16 mru;
+       __le16 num_rsscos_ctxs;
+       __le16 num_cmpl_rings;
+       __le16 num_tx_rings;
+       __le16 num_rx_rings;
+       __le16 num_l2_ctxs;
+       __le16 num_vnics;
+       __le16 num_stat_ctxs;
+       __le16 num_hw_ring_grps;
+       u8 dflt_mac_addr[6];
+       __le16 dflt_vlan;
+       __be32 dflt_ip_addr[4];
+       __le32 min_bw;
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK                   0xfffffffUL
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT                    0
+       #define FUNC_CFG_REQ_MIN_BW_SCALE                           0x10000000UL
+       #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS                     (0x0UL << 28)
+       #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES            (0x1UL << 28)
+       #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST    FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK              0xe0000000UL
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT               29
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO             (0x2UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE             (0x4UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA             (0x6UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID          (0x7UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 max_bw;
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK                   0xfffffffUL
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT                    0
+       #define FUNC_CFG_REQ_MAX_BW_SCALE                           0x10000000UL
+       #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS                     (0x0UL << 28)
+       #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES            (0x1UL << 28)
+       #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST    FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK              0xe0000000UL
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT               29
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO             (0x2UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE             (0x4UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA             (0x6UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID          (0x7UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+       __le16 async_event_cr;
+       u8 vlan_antispoof_mode;
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK           0x0UL
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    0x1UL
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+       u8 allowed_vlan_pris;
+       u8 evb_mode;
+       #define FUNC_CFG_REQ_EVB_MODE_NO_EVB                       0x0UL
+       #define FUNC_CFG_REQ_EVB_MODE_VEB                          0x1UL
+       #define FUNC_CFG_REQ_EVB_MODE_VEPA                         0x2UL
+       u8 unused_2;
+       __le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 tx_ucast_pkts;
+       __le64 tx_mcast_pkts;
+       __le64 tx_bcast_pkts;
+       __le64 tx_discard_pkts;
+       __le64 tx_drop_pkts;
+       __le64 tx_ucast_bytes;
+       __le64 tx_mcast_bytes;
+       __le64 tx_bcast_bytes;
+       __le64 rx_ucast_pkts;
+       __le64 rx_mcast_pkts;
+       __le64 rx_bcast_pkts;
+       __le64 rx_discard_pkts;
+       __le64 rx_drop_pkts;
+       __le64 rx_ucast_bytes;
+       __le64 rx_mcast_bytes;
+       __le64 rx_bcast_bytes;
+       __le64 rx_agg_pkts;
+       __le64 rx_agg_bytes;
+       __le64 rx_agg_events;
+       __le64 rx_agg_aborts;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_id;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 max_vnic_id_cnt;
+       __le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 vnic_id_cnt;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE                0x1UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE               0x2UL
+       __le32 enables;
+       #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE                   0x1UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_VER                       0x2UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP                 0x4UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD                0x8UL
+       #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD           0x10UL
+       __le16 os_type;
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN                  0x0UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER            0x1UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS            0xeUL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS                  0x12UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS                  0x1dUL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX            0x24UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD                  0x2aUL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI                     0x68UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864                   0x73UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2                0x74UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI                     0x8000UL
+       u8 ver_maj;
+       u8 ver_min;
+       u8 ver_upd;
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 timestamp;
+       __le32 unused_2;
+       __le32 vf_req_fwd[8];
+       __le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN     0x1UL
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID             0x1UL
+       #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR              0x2UL
+       __le16 vf_id;
+       __le16 req_buf_num_pages;
+       __le16 req_buf_page_size;
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B    0x4UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K             0xcUL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K             0xdUL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K    0x10UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M             0x15UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M             0x16UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G             0x1eUL
+       __le16 req_buf_len;
+       __le16 resp_buf_len;
+       u8 unused_0;
+       u8 unused_1;
+       __le64 req_buf_page_addr0;
+       __le64 req_buf_page_addr1;
+       __le64 req_buf_page_addr2;
+       __le64 req_buf_page_addr3;
+       __le64 req_buf_page_addr4;
+       __le64 req_buf_page_addr5;
+       __le64 req_buf_page_addr6;
+       __le64 req_buf_page_addr7;
+       __le64 req_buf_page_addr8;
+       __le64 req_buf_page_addr9;
+       __le64 error_buf_addr;
+       __le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_buf_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_buf_unrgtr_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define FUNC_BUF_UNRGTR_REQ_ENABLES_VF_ID                   0x1UL
+       __le16 vf_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_unrgtr_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 reserved;
+       __le16 fid;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 os_type;
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN                 0x0UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER                   0x1UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS                   0xeUL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS                 0x12UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS                 0x1dUL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX                   0x24UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD                 0x2aUL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI            0x68UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864                  0x73UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2               0x74UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI            0x8000UL
+       u8 ver_maj;
+       u8 ver_min;
+       u8 ver_upd;
+       u8 unused_0;
+       u8 unused_1;
+       u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (56 bytes) */
+struct hwrm_port_phy_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY                    0x1UL
+       #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED                   0x2UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FORCE                        0x4UL
+       #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG              0x8UL
+       #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE                   0x10UL
+       #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE                  0x20UL
+       #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE            0x40UL
+       #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE           0x80UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE           0x100UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE          0x200UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE          0x400UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE         0x800UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE          0x1000UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE         0x2000UL
+       #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN               0x4000UL
+       __le32 enables;
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE                  0x1UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX                0x2UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE                 0x4UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED            0x8UL
+       #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK      0x10UL
+       #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED                  0x20UL
+       #define PORT_PHY_CFG_REQ_ENABLES_LPBK                       0x40UL
+       #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS                0x80UL
+       #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE                0x100UL
+       #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK       0x200UL
+       #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER               0x400UL
+       __le16 port_id;
+       __le16 force_link_speed;
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB    0x1UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB              0xaUL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB              0x14UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB    0x19UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB             0x64UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB             0xc8UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB             0xfaUL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB             0x190UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB             0x1f4UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB    0x3e8UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB             0xffffUL
+       u8 auto_mode;
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE            0x0UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS              0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED               0x2UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW    0x3UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK              0x4UL
+       u8 auto_duplex;
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF                  0x0UL
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL                  0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH                  0x2UL
+       u8 auto_pause;
+       #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX                      0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX                      0x2UL
+       #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE           0x4UL
+       u8 unused_0;
+       __le16 auto_link_speed;
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB             0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB               0xaUL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB               0x14UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB             0x19UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB              0x64UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB              0xc8UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB              0xfaUL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB              0x190UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB              0x1f4UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB             0x3e8UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB              0xffffUL
+       __le16 auto_link_speed_mask;
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB         0x2UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD         0x4UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB           0x8UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB           0x10UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB         0x20UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB          0x40UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB          0x80UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB          0x100UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB          0x200UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB          0x400UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB         0x800UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD       0x1000UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB          0x2000UL
+       u8 wirespeed;
+       #define PORT_PHY_CFG_REQ_WIRESPEED_OFF                     0x0UL
+       #define PORT_PHY_CFG_REQ_WIRESPEED_ON                      0x1UL
+       u8 lpbk;
+       #define PORT_PHY_CFG_REQ_LPBK_NONE                         0x0UL
+       #define PORT_PHY_CFG_REQ_LPBK_LOCAL                        0x1UL
+       #define PORT_PHY_CFG_REQ_LPBK_REMOTE                       0x2UL
+       u8 force_pause;
+       #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX             0x1UL
+       #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX             0x2UL
+       u8 unused_1;
+       __le32 preemphasis;
+       __le16 eee_link_speed_mask;
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1          0x1UL
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB          0x2UL
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2          0x4UL
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB            0x8UL
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3          0x10UL
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4          0x20UL
+       #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB           0x40UL
+       u8 unused_2;
+       u8 unused_3;
+       __le32 tx_lpi_timer;
+       __le32 unused_4;
+       #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK                  0xffffffUL
+       #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT                   0
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (96 bytes) */
+struct hwrm_port_phy_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 link;
+       #define PORT_PHY_QCFG_RESP_LINK_NO_LINK            0x0UL
+       #define PORT_PHY_QCFG_RESP_LINK_SIGNAL                     0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_LINK                       0x2UL
+       u8 unused_0;
+       __le16 link_speed;
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB                0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB                  0xaUL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB                  0x14UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB                0x19UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB                 0x64UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB                 0xc8UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB                 0xfaUL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB                 0x190UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB                 0x1f4UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB                0x3e8UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB                 0xffffUL
+       u8 duplex_cfg;
+       #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF                 0x0UL
+       #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL                 0x1UL
+       u8 pause;
+       #define PORT_PHY_QCFG_RESP_PAUSE_TX                         0x1UL
+       #define PORT_PHY_QCFG_RESP_PAUSE_RX                         0x2UL
+       __le16 support_speeds;
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD           0x1UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB     0x2UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD     0x4UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB               0x8UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB               0x10UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB     0x20UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB              0x40UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB              0x80UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB              0x100UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB              0x200UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB              0x400UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB     0x800UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD            0x1000UL
+       #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB              0x2000UL
+       __le16 force_link_speed;
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB          0x1UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB    0xaUL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB    0x14UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB          0x19UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB           0x64UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB           0xc8UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB           0xfaUL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB           0x190UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB           0x1f4UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB          0x3e8UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB           0xffffUL
+       u8 auto_mode;
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE                  0x0UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS    0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED             0x2UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW          0x3UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK    0x4UL
+       u8 auto_pause;
+       #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX                    0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX                    0x2UL
+       #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE         0x4UL
+       __le16 auto_link_speed;
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB           0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB             0xaUL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB             0x14UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB           0x19UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB    0x64UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB    0xc8UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB    0xfaUL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB    0x190UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB    0x1f4UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB           0x3e8UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB    0xffffUL
+       __le16 auto_link_speed_mask;
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD      0x4UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB         0x8UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB         0x10UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB      0x20UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB       0x40UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB       0x80UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB      0x800UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD     0x1000UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB       0x2000UL
+       u8 wirespeed;
+       #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF                   0x0UL
+       #define PORT_PHY_QCFG_RESP_WIRESPEED_ON            0x1UL
+       u8 lpbk;
+       #define PORT_PHY_QCFG_RESP_LPBK_NONE                       0x0UL
+       #define PORT_PHY_QCFG_RESP_LPBK_LOCAL                      0x1UL
+       #define PORT_PHY_QCFG_RESP_LPBK_REMOTE                     0x2UL
+       u8 force_pause;
+       #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX                   0x1UL
+       #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX                   0x2UL
+       u8 module_status;
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE              0x0UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX         0x1UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG       0x2UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN           0x3UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED      0x4UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE    0xffUL
+       __le32 preemphasis;
+       u8 phy_maj;
+       u8 phy_min;
+       u8 phy_bld;
+       u8 phy_type;
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN                0x0UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR                 0x1UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4                0x2UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR                 0x3UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR                 0x4UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2                0x5UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX                 0x6UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR                 0x7UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET                  0x8UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE                 0x9UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY    0xaUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L       0xbUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S       0xcUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N       0xdUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR             0xeUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4           0xfUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4           0x10UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4           0x11UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4           0x12UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10          0x13UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4    0x14UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4    0x15UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4    0x16UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4    0x17UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE      0x18UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET               0x19UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX              0x1aUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX              0x1bUL
+       u8 media_type;
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN              0x0UL
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP                   0x1UL
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC                  0x2UL
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE                0x3UL
+       u8 xcvr_pkg_type;
+       #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL    0x1UL
+       #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL    0x2UL
+       u8 eee_config_phy_addr;
+       #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK                    0x1fUL
+       #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT             0
+       #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED           0x20UL
+       #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE            0x40UL
+       #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI            0x80UL
+       #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK                  0xe0UL
+       #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT                   5
+       u8 parallel_detect;
+       #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT                  0x1UL
+       #define PORT_PHY_QCFG_RESP_RESERVED_MASK                    0xfeUL
+       #define PORT_PHY_QCFG_RESP_RESERVED_SFT             1
+       __le16 link_partner_adv_speeds;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD   0x4UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB     0x8UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB     0x10UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB   0x20UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB    0x40UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB    0x80UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB   0x800UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD  0x1000UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB    0x2000UL
+       u8 link_partner_adv_auto_mode;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+       u8 link_partner_adv_pause;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
+       __le16 adv_eee_link_speed_mask;
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1   0x1UL
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB   0x2UL
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2   0x4UL
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB     0x8UL
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3   0x10UL
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4   0x20UL
+       #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB    0x40UL
+       __le16 link_partner_adv_eee_link_speed_mask;
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+       __le32 xcvr_identifier_type_tx_lpi_timer;
+       #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK                0xffffffUL
+       #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT                 0
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK       0xff000000UL
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT         24
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN   (0x0UL << 24)
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP       (0x3UL << 24)
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP      (0xcUL << 24)
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS  (0xdUL << 24)
+       #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28    (0x11UL << 24)
+       __le16 fec_cfg;
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED      0x1UL
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED   0x2UL
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED     0x4UL
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED  0x8UL
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED    0x10UL
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED  0x20UL
+       #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED    0x40UL
+       u8 duplex_state;
+       #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF               0x0UL
+       #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL               0x1UL
+       u8 unused_1;
+       char phy_vendor_name[16];
+       char phy_vendor_partnumber[16];
+       __le32 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (40 bytes) */
+struct hwrm_port_mac_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK                   0x1UL
+       #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE          0x2UL
+       #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
+       #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE           0x8UL
+       #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE    0x10UL
+       #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE   0x20UL
+       #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE    0x40UL
+       #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE   0x80UL
+       #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE               0x100UL
+       #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE              0x200UL
+       #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE         0x400UL
+       #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE      0x800UL
+       #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE          0x1000UL
+       __le32 enables;
+       #define PORT_MAC_CFG_REQ_ENABLES_IPG                        0x1UL
+       #define PORT_MAC_CFG_REQ_ENABLES_LPBK                       0x2UL
+       #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI      0x4UL
+       #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1                  0x8UL
+       #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
+       #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI           0x20UL
+       #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+       #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+       #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG              0x100UL
+       __le16 port_id;
+       u8 ipg;
+       u8 lpbk;
+       #define PORT_MAC_CFG_REQ_LPBK_NONE                         0x0UL
+       #define PORT_MAC_CFG_REQ_LPBK_LOCAL                        0x1UL
+       #define PORT_MAC_CFG_REQ_LPBK_REMOTE                       0x2UL
+       u8 vlan_pri2cos_map_pri;
+       u8 reserved1;
+       u8 tunnel_pri2cos_map_pri;
+       u8 dscp2pri_map_pri;
+       __le16 rx_ts_capture_ptp_msg_type;
+       __le16 tx_ts_capture_ptp_msg_type;
+       u8 cos_field_cfg;
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1                0x1UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK   0x6UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT    1
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT  3
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK    0xe0UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT     5
+       u8 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 mru;
+       __le16 mtu;
+       u8 ipg;
+       u8 lpbk;
+       #define PORT_MAC_CFG_RESP_LPBK_NONE                        0x0UL
+       #define PORT_MAC_CFG_RESP_LPBK_LOCAL                       0x1UL
+       #define PORT_MAC_CFG_RESP_LPBK_REMOTE                      0x2UL
+       u8 unused_0;
+       u8 valid;
+};
+
+/* hwrm_port_mac_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_mac_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_mac_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 mru;
+       __le16 mtu;
+       u8 ipg;
+       u8 lpbk;
+       #define PORT_MAC_QCFG_RESP_LPBK_NONE                       0x0UL
+       #define PORT_MAC_QCFG_RESP_LPBK_LOCAL                      0x1UL
+       #define PORT_MAC_QCFG_RESP_LPBK_REMOTE                     0x2UL
+       u8 vlan_pri2cos_map_pri;
+       u8 flags;
+       #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE       0x1UL
+       #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE     0x2UL
+       #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE         0x4UL
+       #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE     0x8UL
+       #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE  0x10UL
+       #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE  0x20UL
+       u8 tunnel_pri2cos_map_pri;
+       u8 dscp2pri_map_pri;
+       __le16 rx_ts_capture_ptp_msg_type;
+       __le16 tx_ts_capture_ptp_msg_type;
+       u8 cos_field_cfg;
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD               0x1UL
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT  1
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST    PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST    PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK  0xe0UL
+       #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT   5
+       u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_mac_ptp_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_mac_ptp_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 flags;
+       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS          0x1UL
+       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS            0x2UL
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 rx_ts_reg_off_lower;
+       __le32 rx_ts_reg_off_upper;
+       __le32 rx_ts_reg_off_seq_id;
+       __le32 rx_ts_reg_off_src_id_0;
+       __le32 rx_ts_reg_off_src_id_1;
+       __le32 rx_ts_reg_off_src_id_2;
+       __le32 rx_ts_reg_off_domain_id;
+       __le32 rx_ts_reg_off_fifo;
+       __le32 rx_ts_reg_off_fifo_adv;
+       __le32 rx_ts_reg_off_granularity;
+       __le32 tx_ts_reg_off_lower;
+       __le32 tx_ts_reg_off_upper;
+       __le32 tx_ts_reg_off_seq_id;
+       __le32 tx_ts_reg_off_fifo;
+       __le32 tx_ts_reg_off_granularity;
+       __le32 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2[3];
+       u8 unused_3;
+       __le64 tx_stat_host_addr;
+       __le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 tx_stat_size;
+       __le16 rx_stat_size;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (96 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 lpbk_ucast_frames;
+       __le64 lpbk_mcast_frames;
+       __le64 lpbk_bcast_frames;
+       __le64 lpbk_ucast_bytes;
+       __le64 lpbk_mcast_bytes;
+       __le64 lpbk_bcast_bytes;
+       __le64 tx_stat_discard;
+       __le64 tx_stat_error;
+       __le64 rx_stat_discard;
+       __le64 rx_stat_error;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_ts_query */
+/* Input (24 bytes) */
+struct hwrm_port_ts_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define PORT_TS_QUERY_REQ_FLAGS_PATH                        0x1UL
+       #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX            0x0UL
+       #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX            0x1UL
+       #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST    PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+       __le16 port_id;
+       __le16 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_ts_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 ptp_msg_ts;
+       __le16 ptp_msg_seqid;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_port_phy_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_phy_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 flags;
+       #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED     0x1UL
+       #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK                0xfeUL
+       #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT                 1
+       u8 unused_0;
+       __le16 supported_speeds_force_mode;
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+       __le16 supported_speeds_auto_mode;
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+       __le16 supported_speeds_eee_mode;
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB  0x8UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+       #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+       __le32 tx_lpi_timer_low;
+       #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK           0xffffffUL
+       #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT            0
+       #define PORT_PHY_QCAPS_RESP_RSVD2_MASK                      0xff000000UL
+       #define PORT_PHY_QCAPS_RESP_RSVD2_SFT                       24
+       __le32 valid_tx_lpi_timer_high;
+       #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK          0xffffffUL
+       #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT           0
+       #define PORT_PHY_QCAPS_RESP_VALID_MASK                      0xff000000UL
+       #define PORT_PHY_QCAPS_RESP_VALID_SFT                       24
+};
+
+/* hwrm_port_phy_i2c_write */
+/* Input (48 bytes) */
+struct hwrm_port_phy_i2c_write_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 enables;
+       #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET          0x1UL
+       __le16 port_id;
+       u8 i2c_slave_addr;
+       u8 unused_0;
+       __le16 page_number;
+       __le16 page_offset;
+       u8 data_length;
+       u8 unused_1;
+       __le16 unused_2;
+       __le16 unused_3;
+       __le16 unused_4;
+       __le32 data[16];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_i2c_write_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read */
+/* Input (40 bytes) */
+struct hwrm_port_phy_i2c_read_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 enables;
+       #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET           0x1UL
+       __le16 port_id;
+       u8 i2c_slave_addr;
+       u8 unused_0;
+       __le16 page_number;
+       __le16 page_offset;
+       u8 data_length;
+       u8 unused_1[7];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_phy_i2c_read_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 data[16];
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_led_cfg */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_ID                    0x1UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE                 0x2UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR                 0x4UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON              0x8UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF     0x10UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID              0x20UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_ID                    0x40UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE                 0x80UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR                 0x100UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON              0x200UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF     0x400UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID              0x800UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_ID                    0x1000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE                 0x2000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR                 0x4000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON              0x8000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF     0x10000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID              0x20000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_ID                    0x40000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE                 0x80000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR                 0x100000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON              0x200000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF     0x400000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID              0x800000UL
+       __le16 port_id;
+       u8 num_leds;
+       u8 rsvd;
+       u8 led0_id;
+       u8 led0_state;
+       #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT               0x4UL
+       u8 led0_color;
+       #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER             0x3UL
+       u8 unused_0;
+       __le16 led0_blink_on;
+       __le16 led0_blink_off;
+       u8 led0_group_id;
+       u8 rsvd0;
+       u8 led1_id;
+       u8 led1_state;
+       #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT               0x4UL
+       u8 led1_color;
+       #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER             0x3UL
+       u8 unused_1;
+       __le16 led1_blink_on;
+       __le16 led1_blink_off;
+       u8 led1_group_id;
+       u8 rsvd1;
+       u8 led2_id;
+       u8 led2_state;
+       #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT               0x4UL
+       u8 led2_color;
+       #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER             0x3UL
+       u8 unused_2;
+       __le16 led2_blink_on;
+       __le16 led2_blink_off;
+       u8 led2_group_id;
+       u8 rsvd2;
+       u8 led3_id;
+       u8 led3_state;
+       #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT               0x4UL
+       u8 led3_color;
+       #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER             0x3UL
+       u8 unused_3;
+       __le16 led3_blink_on;
+       __le16 led3_blink_off;
+       u8 led3_group_id;
+       u8 rsvd3;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_led_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (56 bytes) */
+struct hwrm_port_led_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 num_leds;
+       u8 led0_id;
+       u8 led0_type;
+       #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED                 0x0UL
+       #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY              0x1UL
+       #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID               0xffUL
+       u8 led0_state;
+       #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED0_STATE_OFF                  0x1UL
+       #define PORT_LED_QCFG_RESP_LED0_STATE_ON                   0x2UL
+       #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK                0x3UL
+       #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT             0x4UL
+       u8 led0_color;
+       #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER                0x1UL
+       #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN                0x2UL
+       #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER           0x3UL
+       u8 unused_0;
+       __le16 led0_blink_on;
+       __le16 led0_blink_off;
+       u8 led0_group_id;
+       u8 led1_id;
+       u8 led1_type;
+       #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED                 0x0UL
+       #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY              0x1UL
+       #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID               0xffUL
+       u8 led1_state;
+       #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED1_STATE_OFF                  0x1UL
+       #define PORT_LED_QCFG_RESP_LED1_STATE_ON                   0x2UL
+       #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK                0x3UL
+       #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT             0x4UL
+       u8 led1_color;
+       #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER                0x1UL
+       #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN                0x2UL
+       #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER           0x3UL
+       u8 unused_1;
+       __le16 led1_blink_on;
+       __le16 led1_blink_off;
+       u8 led1_group_id;
+       u8 led2_id;
+       u8 led2_type;
+       #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED                 0x0UL
+       #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY              0x1UL
+       #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID               0xffUL
+       u8 led2_state;
+       #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED2_STATE_OFF                  0x1UL
+       #define PORT_LED_QCFG_RESP_LED2_STATE_ON                   0x2UL
+       #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK                0x3UL
+       #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT             0x4UL
+       u8 led2_color;
+       #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER                0x1UL
+       #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN                0x2UL
+       #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER           0x3UL
+       u8 unused_2;
+       __le16 led2_blink_on;
+       __le16 led2_blink_off;
+       u8 led2_group_id;
+       u8 led3_id;
+       u8 led3_type;
+       #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED                 0x0UL
+       #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY              0x1UL
+       #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID               0xffUL
+       u8 led3_state;
+       #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED3_STATE_OFF                  0x1UL
+       #define PORT_LED_QCFG_RESP_LED3_STATE_ON                   0x2UL
+       #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK                0x3UL
+       #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT             0x4UL
+       u8 led3_color;
+       #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT              0x0UL
+       #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER                0x1UL
+       #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN                0x2UL
+       #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER           0x3UL
+       u8 unused_3;
+       __le16 led3_blink_on;
+       __le16 led3_blink_off;
+       u8 led3_group_id;
+       u8 unused_4;
+       __le16 unused_5;
+       u8 unused_6;
+       u8 unused_7;
+       u8 unused_8;
+       u8 valid;
+};
+
+/* hwrm_port_led_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 num_leds;
+       u8 unused_0[3];
+       u8 led0_id;
+       u8 led0_type;
+       #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID              0xffUL
+       u8 led0_group_id;
+       u8 unused_1;
+       __le16 led0_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led0_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 led1_id;
+       u8 led1_type;
+       #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID              0xffUL
+       u8 led1_group_id;
+       u8 unused_2;
+       __le16 led1_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led1_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 led2_id;
+       u8 led2_type;
+       #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID              0xffUL
+       u8 led2_group_id;
+       u8 unused_3;
+       __le16 led2_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led2_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 led3_id;
+       u8 led3_type;
+       #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID              0xffUL
+       u8 led3_group_id;
+       u8 unused_4;
+       __le16 led3_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led3_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 unused_5;
+       u8 unused_6;
+       u8 unused_7;
+       u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH                       0x1UL
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX                   0x0UL
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX                   0x1UL
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST    QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+       __le16 port_id;
+       __le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 max_configurable_queues;
+       u8 max_configurable_lossless_queues;
+       u8 queue_cfg_allowed;
+       u8 queue_cfg_info;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG         0x1UL
+       u8 queue_pfcenable_cfg_allowed;
+       u8 queue_pri2cos_cfg_allowed;
+       u8 queue_cos2bw_cfg_allowed;
+       u8 queue_id0;
+       u8 queue_id0_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id1;
+       u8 queue_id1_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id2;
+       u8 queue_id2_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id3;
+       u8 queue_id3_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id4;
+       u8 queue_id4_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id5;
+       u8 queue_id5_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id6;
+       u8 queue_id6_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 queue_id7;
+       u8 queue_id7_service_profile;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+       u8 valid;
+};
+
+/* hwrm_queue_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_QCFG_REQ_FLAGS_PATH                           0x1UL
+       #define QUEUE_QCFG_REQ_FLAGS_PATH_TX                       0x0UL
+       #define QUEUE_QCFG_REQ_FLAGS_PATH_RX                       0x1UL
+       #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST    QUEUE_QCFG_REQ_FLAGS_PATH_RX
+       __le32 queue_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 queue_len;
+       u8 service_profile;
+       #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY              0x0UL
+       #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS           0x1UL
+       #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN    0xffUL
+       u8 queue_cfg_info;
+       #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG     0x1UL
+       u8 unused_0;
+       u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_CFG_REQ_FLAGS_PATH_MASK                       0x3UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_SFT                        0
+       #define QUEUE_CFG_REQ_FLAGS_PATH_TX                        0x0UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_RX                        0x1UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR                     0x2UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_LAST    QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+       __le32 enables;
+       #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN                      0x1UL
+       #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE               0x2UL
+       __le32 queue_id;
+       __le32 dflt_len;
+       u8 service_profile;
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY                0x0UL
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS             0x1UL
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN              0xffUL
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 flags;
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED   0x1UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED   0x2UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED   0x4UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED   0x8UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED   0x10UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED   0x20UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED   0x40UL
+       #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED   0x80UL
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED     0x1UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED     0x2UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED     0x4UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED     0x8UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED     0x10UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED     0x20UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED     0x40UL
+       #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED     0x80UL
+       __le16 port_id;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pri2cos_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH                   0x1UL
+       #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX               (0x0UL << 0)
+       #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX               (0x1UL << 0)
+       #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+       #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN                  0x2UL
+       u8 port_id;
+       u8 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_queue_pri2cos_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 pri0_cos_queue_id;
+       u8 pri1_cos_queue_id;
+       u8 pri2_cos_queue_id;
+       u8 pri3_cos_queue_id;
+       u8 pri4_cos_queue_id;
+       u8 pri5_cos_queue_id;
+       u8 pri6_cos_queue_id;
+       u8 pri7_cos_queue_id;
+       u8 queue_cfg_info;
+       #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG    0x1UL
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK               0x3UL
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT                0
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR             (0x2UL << 0)
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN                   0x4UL
+       __le32 enables;
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID    0x1UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID    0x2UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID    0x4UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID    0x8UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID    0x10UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID    0x20UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID    0x40UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID    0x80UL
+       u8 port_id;
+       u8 pri0_cos_queue_id;
+       u8 pri1_cos_queue_id;
+       u8 pri2_cos_queue_id;
+       u8 pri3_cos_queue_id;
+       u8 pri4_cos_queue_id;
+       u8 pri5_cos_queue_id;
+       u8 pri6_cos_queue_id;
+       u8 pri7_cos_queue_id;
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_cos2bw_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (112 bytes) */
+struct hwrm_queue_cos2bw_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 queue_id0;
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 queue_id0_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id0_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id0_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id0_pri_lvl;
+       u8 queue_id0_bw_weight;
+       u8 queue_id1;
+       __le32 queue_id1_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id1_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id1_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id1_pri_lvl;
+       u8 queue_id1_bw_weight;
+       u8 queue_id2;
+       __le32 queue_id2_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id2_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id2_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id2_pri_lvl;
+       u8 queue_id2_bw_weight;
+       u8 queue_id3;
+       __le32 queue_id3_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id3_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id3_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id3_pri_lvl;
+       u8 queue_id3_bw_weight;
+       u8 queue_id4;
+       __le32 queue_id4_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id4_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id4_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id4_pri_lvl;
+       u8 queue_id4_bw_weight;
+       u8 queue_id5;
+       __le32 queue_id5_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id5_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id5_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id5_pri_lvl;
+       u8 queue_id5_bw_weight;
+       u8 queue_id6;
+       __le32 queue_id6_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id6_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id6_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id6_pri_lvl;
+       u8 queue_id6_bw_weight;
+       u8 queue_id7;
+       __le32 queue_id7_min_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id7_max_bw;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id7_tsa_assign;
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP    0x0UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS   0x1UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id7_pri_lvl;
+       u8 queue_id7_bw_weight;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 enables;
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID   0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID   0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID   0x4UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID   0x8UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID   0x10UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID   0x20UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID   0x40UL
+       #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID   0x80UL
+       __le16 port_id;
+       u8 queue_id0;
+       u8 unused_0;
+       __le32 queue_id0_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id0_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id0_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id0_pri_lvl;
+       u8 queue_id0_bw_weight;
+       u8 queue_id1;
+       __le32 queue_id1_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id1_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id1_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id1_pri_lvl;
+       u8 queue_id1_bw_weight;
+       u8 queue_id2;
+       __le32 queue_id2_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id2_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id2_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id2_pri_lvl;
+       u8 queue_id2_bw_weight;
+       u8 queue_id3;
+       __le32 queue_id3_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id3_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id3_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id3_pri_lvl;
+       u8 queue_id3_bw_weight;
+       u8 queue_id4;
+       __le32 queue_id4_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id4_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id4_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id4_pri_lvl;
+       u8 queue_id4_bw_weight;
+       u8 queue_id5;
+       __le32 queue_id5_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id5_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id5_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id5_pri_lvl;
+       u8 queue_id5_bw_weight;
+       u8 queue_id6;
+       __le32 queue_id6_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id6_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id6_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id6_pri_lvl;
+       u8 queue_id6_bw_weight;
+       u8 queue_id7;
+       __le32 queue_id7_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+       __le32 queue_id7_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 queue_id7_tsa_assign;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+       u8 queue_id7_pri_lvl;
+       u8 queue_id7_bw_weight;
+       u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps */
+/* Input (24 bytes) */
+struct hwrm_queue_dscp_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 port_id;
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_dscp_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 num_dscp_bits;
+       u8 unused_0;
+       __le16 max_entries;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg */
+/* Input (32 bytes) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 dest_data_addr;
+       u8 port_id;
+       u8 unused_0;
+       __le16 dest_data_buffer_size;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 entry_cnt;
+       u8 default_pri;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_dscp2pri_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 src_data_addr;
+       __le32 flags;
+       #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI    0x1UL
+       __le32 enables;
+       #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI          0x1UL
+       u8 port_id;
+       u8 default_pri;
+       __le16 entry_cnt;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_dscp2pri_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_ALLOC_REQ_FLAGS_DEFAULT                        0x1UL
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 vnic_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 vnic_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_CFG_REQ_FLAGS_DEFAULT                          0x1UL
+       #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE                  0x2UL
+       #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE                    0x4UL
+       #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE              0x8UL
+       #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE              0x10UL
+       #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE                 0x20UL
+       __le32 enables;
+       #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP                  0x1UL
+       #define VNIC_CFG_REQ_ENABLES_RSS_RULE                       0x2UL
+       #define VNIC_CFG_REQ_ENABLES_COS_RULE                       0x4UL
+       #define VNIC_CFG_REQ_ENABLES_LB_RULE                        0x8UL
+       #define VNIC_CFG_REQ_ENABLES_MRU                            0x10UL
+       __le16 vnic_id;
+       __le16 dflt_ring_grp;
+       __le16 rss_rule;
+       __le16 cos_rule;
+       __le16 lb_rule;
+       __le16 mru;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_qcfg */
+/* Input (32 bytes) */
+struct hwrm_vnic_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define VNIC_QCFG_REQ_ENABLES_VF_ID_VALID                   0x1UL
+       __le32 vnic_id;
+       __le16 vf_id;
+       __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_vnic_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 dflt_ring_grp;
+       __le16 rss_rule;
+       __le16 cos_rule;
+       __le16 lb_rule;
+       __le16 mru;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 flags;
+       #define VNIC_QCFG_RESP_FLAGS_DEFAULT                        0x1UL
+       #define VNIC_QCFG_RESP_FLAGS_VLAN_STRIP_MODE                0x2UL
+       #define VNIC_QCFG_RESP_FLAGS_BD_STALL_MODE                  0x4UL
+       #define VNIC_QCFG_RESP_FLAGS_ROCE_DUAL_VNIC_MODE            0x8UL
+       #define VNIC_QCFG_RESP_FLAGS_ROCE_ONLY_VNIC_MODE            0x10UL
+       #define VNIC_QCFG_RESP_FLAGS_RSS_DFLT_CR_MODE               0x20UL
+       __le32 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_vnic_qcaps */
+/* Input (24 bytes) */
+struct hwrm_vnic_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_vnic_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 mru;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 flags;
+       #define VNIC_QCAPS_RESP_FLAGS_UNUSED                        0x1UL
+       #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP                0x2UL
+       #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP                  0x4UL
+       #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP            0x8UL
+       #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP            0x10UL
+       #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP               0x20UL
+       __le32 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_TPA_CFG_REQ_FLAGS_TPA                          0x1UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA                    0x2UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE               0x4UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_GRO                          0x8UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN                 0x10UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ       0x20UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK               0x40UL
+       #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK                0x80UL
+       __le32 enables;
+       #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS               0x1UL
+       #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS                   0x2UL
+       #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER              0x4UL
+       #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN                0x8UL
+       __le16 vnic_id;
+       __le16 max_agg_segs;
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1            0x0UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2            0x1UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4            0x2UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8            0x3UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX                  0x1fUL
+       __le16 max_aggs;
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_1                        0x0UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_2                        0x1UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_4                        0x2UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_8                        0x3UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_16                       0x4UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX                      0x7UL
+       u8 unused_0;
+       u8 unused_1;
+       __le32 max_agg_timer;
+       __le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg */
+/* Input (24 bytes) */
+struct hwrm_vnic_tpa_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vnic_id;
+       __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_vnic_tpa_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 flags;
+       #define VNIC_TPA_QCFG_RESP_FLAGS_TPA                        0x1UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA                  0x2UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE     0x4UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_GRO                        0x8UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN               0x10UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ     0x20UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK     0x40UL
+       #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK              0x80UL
+       __le16 max_agg_segs;
+       #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1                  0x0UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2                  0x1UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4                  0x2UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8                  0x3UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX                0x1fUL
+       __le16 max_aggs;
+       #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1                      0x0UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2                      0x1UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4                      0x2UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8                      0x3UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16                     0x4UL
+       #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX            0x7UL
+       __le32 max_agg_timer;
+       __le32 min_agg_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 hash_type;
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4             0x1UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4                 0x2UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4                 0x4UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6             0x8UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6                 0x10UL
+       #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6                 0x20UL
+       __le32 unused_0;
+       __le64 ring_grp_tbl_addr;
+       __le64 hash_key_tbl_addr;
+       __le16 rss_ctx_idx;
+       __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_qcfg */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 rss_ctx_idx;
+       __le16 unused_0[3];
+};
+
+/* Output (64 bytes) */
+struct hwrm_vnic_rss_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 hash_type;
+       #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4                   0x1UL
+       #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4               0x2UL
+       #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4               0x4UL
+       #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6                   0x8UL
+       #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6               0x10UL
+       #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6               0x20UL
+       __le32 unused_0;
+       __le32 hash_key[10];
+       __le32 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT      0x1UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT         0x2UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4                0x4UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6                0x8UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE                0x10UL
+       #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE                0x20UL
+       __le32 enables;
+       #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID   0x1UL
+       #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID     0x2UL
+       #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID  0x4UL
+       __le32 vnic_id;
+       __le16 jumbo_thresh;
+       __le16 hds_offset;
+       __le16 hds_threshold;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_qcfg */
+/* Input (24 bytes) */
+struct hwrm_vnic_plcmodes_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 vnic_id;
+       __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_vnic_plcmodes_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 flags;
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_REGULAR_PLACEMENT    0x1UL
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_JUMBO_PLACEMENT      0x2UL
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV4              0x4UL
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV6              0x8UL
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_FCOE              0x10UL
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_ROCE              0x20UL
+       #define VNIC_PLCMODES_QCFG_RESP_FLAGS_DFLT_VNIC     0x40UL
+       __le16 jumbo_thresh;
+       __le16 hds_offset;
+       __le16 hds_threshold;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 rss_cos_lb_ctx_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 rss_cos_lb_ctx_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define RING_ALLOC_REQ_ENABLES_RESERVED1                    0x1UL
+       #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG                 0x2UL
+       #define RING_ALLOC_REQ_ENABLES_RESERVED3                    0x4UL
+       #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID            0x8UL
+       #define RING_ALLOC_REQ_ENABLES_RESERVED4                    0x10UL
+       #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID                 0x20UL
+       u8 ring_type;
+       #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL                   0x0UL
+       #define RING_ALLOC_REQ_RING_TYPE_TX                        0x1UL
+       #define RING_ALLOC_REQ_RING_TYPE_RX                        0x2UL
+       #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL                 0x3UL
+       u8 unused_0;
+       __le16 unused_1;
+       __le64 page_tbl_addr;
+       __le32 fbo;
+       u8 page_size;
+       u8 page_tbl_depth;
+       u8 unused_2;
+       u8 unused_3;
+       __le32 length;
+       __le16 logical_id;
+       __le16 cmpl_ring_id;
+       __le16 queue_id;
+       u8 unused_4;
+       u8 unused_5;
+       __le32 reserved1;
+       __le16 ring_arb_cfg;
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK         0xfUL
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT          0
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP          (0x1UL << 0)
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ         (0x2UL << 0)
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST    RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+       #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK               0xf0UL
+       #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT                4
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK  0xff00UL
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT   8
+       u8 unused_6;
+       u8 unused_7;
+       __le32 reserved3;
+       __le32 stat_ctx_id;
+       __le32 reserved4;
+       __le32 max_bw;
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK                 0xfffffffUL
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT                  0
+       #define RING_ALLOC_REQ_MAX_BW_SCALE                         0x10000000UL
+       #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS                   (0x0UL << 28)
+       #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES                  (0x1UL << 28)
+       #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST    RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT     29
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA           (0x0UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO           (0x2UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE           (0x4UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA           (0x6UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST    RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+       u8 int_mode;
+       #define RING_ALLOC_REQ_INT_MODE_LEGACY                     0x0UL
+       #define RING_ALLOC_REQ_INT_MODE_RSVD                       0x1UL
+       #define RING_ALLOC_REQ_INT_MODE_MSIX                       0x2UL
+       #define RING_ALLOC_REQ_INT_MODE_POLL                       0x3UL
+       u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 ring_id;
+       __le16 logical_ring_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 ring_type;
+       #define RING_FREE_REQ_RING_TYPE_L2_CMPL            0x0UL
+       #define RING_FREE_REQ_RING_TYPE_TX                         0x1UL
+       #define RING_FREE_REQ_RING_TYPE_RX                         0x2UL
+       #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL                  0x3UL
+       u8 unused_0;
+       __le16 ring_id;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 ring_id;
+       __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 flags;
+       #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+       #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+       __le16 num_cmpl_dma_aggr;
+       __le16 num_cmpl_dma_aggr_during_int;
+       __le16 cmpl_aggr_dma_tmr;
+       __le16 cmpl_aggr_dma_tmr_during_int;
+       __le16 int_lat_tmr_min;
+       __le16 int_lat_tmr_max;
+       __le16 num_cmpl_aggr_int;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 ring_id;
+       __le16 flags;
+       #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+       #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+       __le16 num_cmpl_dma_aggr;
+       __le16 num_cmpl_dma_aggr_during_int;
+       __le16 cmpl_aggr_dma_tmr;
+       __le16 cmpl_aggr_dma_tmr_during_int;
+       __le16 int_lat_tmr_min;
+       __le16 int_lat_tmr_max;
+       __le16 num_cmpl_aggr_int;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 ring_type;
+       #define RING_RESET_REQ_RING_TYPE_L2_CMPL                   0x0UL
+       #define RING_RESET_REQ_RING_TYPE_TX                        0x1UL
+       #define RING_RESET_REQ_RING_TYPE_RX                        0x2UL
+       #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL                 0x3UL
+       u8 unused_0;
+       __le16 ring_id;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 cr;
+       __le16 rr;
+       __le16 ar;
+       __le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 ring_group_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 ring_group_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH                  0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX              (0x0UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX              (0x1UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK              0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP                  0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST     0x8UL
+       __le32 enables;
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR     0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK       0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN            0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK      0x8UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN            0x10UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK      0x20UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR           0x40UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK     0x80UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN          0x100UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK    0x200UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN          0x400UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK    0x800UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE            0x1000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID              0x2000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE         0x4000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID              0x8000UL
+       #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
+       u8 l2_addr[6];
+       u8 unused_0;
+       u8 unused_1;
+       u8 l2_addr_mask[6];
+       __le16 l2_ovlan;
+       __le16 l2_ovlan_mask;
+       __le16 l2_ivlan;
+       __le16 l2_ivlan_mask;
+       u8 unused_2;
+       u8 unused_3;
+       u8 t_l2_addr[6];
+       u8 unused_4;
+       u8 unused_5;
+       u8 t_l2_addr_mask[6];
+       __le16 t_l2_ovlan;
+       __le16 t_l2_ovlan_mask;
+       __le16 t_l2_ivlan;
+       __le16 t_l2_ivlan_mask;
+       u8 src_type;
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT             0x0UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF                0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF                0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC              0x3UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG              0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE               0x5UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO              0x6UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG              0x7UL
+       u8 unused_6;
+       __le32 src_id;
+       u8 tunnel_type;
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     0x0UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN          0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE          0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE          0x3UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP           0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE         0x5UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS           0x6UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT    0x7UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE          0x8UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     0xffUL
+       u8 unused_7;
+       __le16 dst_id;
+       __le16 mirror_vnic_id;
+       u8 pri_hint;
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER         0x0UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX               0x3UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN               0x4UL
+       u8 unused_8;
+       __le32 unused_9;
+       __le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 l2_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH                    0x1UL
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+       #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP                    0x2UL
+       __le32 enables;
+       #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID                0x1UL
+       #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID   0x2UL
+       __le64 l2_filter_id;
+       __le32 dst_id;
+       __le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (56 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 vnic_id;
+       __le32 mask;
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED                0x1UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST                   0x2UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST               0x4UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST                   0x8UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS     0x10UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST               0x20UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY                0x40UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN            0x80UL
+       #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN         0x100UL
+       __le64 mc_tbl_addr;
+       __le32 num_mc_entries;
+       __le32 unused_0;
+       __le64 vlan_tag_tbl_addr;
+       __le32 num_vlan_tags;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_vlan_antispoof_cfg */
+/* Input (32 bytes) */
+struct hwrm_cfa_vlan_antispoof_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 num_vlan_entries;
+       __le64 vlan_tag_mask_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vlan_antispoof_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_vlan_antispoof_qcfg */
+/* Input (32 bytes) */
+struct hwrm_cfa_vlan_antispoof_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 fid;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 max_vlan_entries;
+       __le64 vlan_tag_mask_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vlan_antispoof_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 num_vlan_entries;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK          0x1UL
+       __le32 enables;
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR         0x2UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN       0x4UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR         0x8UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE   0x10UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR      0x40UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x80UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI     0x100UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x200UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+       __le64 l2_filter_id;
+       u8 l2_addr[6];
+       __le16 l2_ivlan;
+       __le32 l3_addr[4];
+       __le32 t_l3_addr[4];
+       u8 l3_addr_type;
+       u8 t_l3_addr_type;
+       u8 tunnel_type;
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+       u8 unused_0;
+       __le32 vni;
+       __le32 dst_vnic_id;
+       __le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 tunnel_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK           0x1UL
+       u8 encap_type;
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       0x1UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       0x2UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       0x3UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP         0x4UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      0x5UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS         0x6UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN         0x7UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       0x8UL
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 encap_data[20];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 encap_record_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 encap_record_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK          0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP              0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER     0x4UL
+       __le32 enables;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x4UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR    0x8UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE    0x10UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR     0x20UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR     0x80UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL    0x200UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT       0x400UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK  0x800UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT       0x1000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK  0x2000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT       0x4000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID          0x10000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR    0x40000UL
+       __le64 l2_filter_id;
+       u8 src_macaddr[6];
+       __be16 ethertype;
+       u8 ip_addr_type;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN  0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4     0x4UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6     0x6UL
+       u8 ip_protocol;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN   0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP       0x6UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP       0x11UL
+       __le16 dst_id;
+       __le16 mirror_vnic_id;
+       u8 tunnel_type;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+       u8 pri_hint;
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE         0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW         0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST      0x3UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST       0x4UL
+       __be32 src_ipaddr[4];
+       __be32 src_ipaddr_mask[4];
+       __be32 dst_ipaddr[4];
+       __be32 dst_ipaddr_mask[4];
+       __be16 src_port;
+       __be16 src_port_mask;
+       __be16 dst_port;
+       __be16 dst_port_mask;
+       __le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 ntuple_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (48 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID       0x1UL
+       #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+       #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+       __le32 unused_0;
+       __le64 ntuple_filter_id;
+       __le32 new_dst_id;
+       __le32 new_mirror_vnic_id;
+       __le16 new_meter_instance_id;
+       #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+       __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_em_flow_alloc */
+/* Input (112 bytes) */
+struct hwrm_cfa_em_flow_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH                    0x1UL
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_TX                (0x0UL << 0)
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX                (0x1UL << 0)
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_LAST    CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_BYTE_CTR                0x2UL
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PKT_CTR                 0x4UL
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DECAP                   0x8UL
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_ENCAP                   0x10UL
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DROP                    0x20UL
+       #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_METER                   0x40UL
+       __le32 enables;
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_L2_FILTER_ID          0x1UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_TYPE           0x2UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_ID     0x4UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_MACADDR           0x8UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_MACADDR           0x10UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_OVLAN_VID     0x20UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IVLAN_VID     0x40UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ETHERTYPE     0x80UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_IPADDR            0x100UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_IPADDR            0x200UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IPADDR_TYPE           0x400UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IP_PROTOCOL           0x800UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_PORT              0x1000UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_PORT              0x2000UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_ID                0x4000UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID       0x8000UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ENCAP_RECORD_ID      0x10000UL
+       #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_METER_INSTANCE_ID    0x20000UL
+       __le64 l2_filter_id;
+       u8 tunnel_type;
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL       0x0UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE    0x2UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE    0x3UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP             0x4UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE           0x5UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS             0x6UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT              0x7UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE    0x8UL
+       #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL       0xffUL
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 tunnel_id;
+       u8 src_macaddr[6];
+       __le16 meter_instance_id;
+       #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID   0xffffUL
+       u8 dst_macaddr[6];
+       __le16 ovlan_vid;
+       __le16 ivlan_vid;
+       __be16 ethertype;
+       u8 ip_addr_type;
+       #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN         0x0UL
+       #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV4    0x4UL
+       #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6    0x6UL
+       u8 ip_protocol;
+       #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UNKNOWN          0x0UL
+       #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_TCP              0x6UL
+       #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP              0x11UL
+       u8 unused_2;
+       u8 unused_3;
+       __be32 src_ipaddr[4];
+       __be32 dst_ipaddr[4];
+       __be16 src_port;
+       __be16 dst_port;
+       __le16 dst_id;
+       __le16 mirror_vnic_id;
+       __le32 encap_record_id;
+       __le32 unused_4;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_em_flow_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 em_filter_id;
+       __le32 flow_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_em_flow_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_em_flow_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 em_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_em_flow_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_em_flow_cfg */
+/* Input (48 bytes) */
+struct hwrm_cfa_em_flow_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_DST_ID              0x1UL
+       #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID     0x2UL
+       #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID  0x4UL
+       __le32 unused_0;
+       __le64 em_filter_id;
+       __le32 new_dst_id;
+       __le32 new_mirror_vnic_id;
+       __le16 new_meter_instance_id;
+       #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+       __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_em_flow_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_meter_profile_alloc */
+/* Input (40 bytes) */
+struct hwrm_cfa_meter_profile_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH              0x1UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_TX          0x0UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX          0x1UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_LAST    CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX
+       u8 meter_type;
+       #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2697    0x0UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2698    0x1UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115    0x2UL
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 commit_rate;
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE      0x10000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_LAST    CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+       __le32 commit_burst;
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE     0x10000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_LAST    CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+       __le32 excess_peak_rate;
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_LAST    CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+       __le32 excess_peak_burst;
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_LAST    CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_meter_profile_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 meter_profile_id;
+       #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID 0xffffUL
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_cfa_meter_profile_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_meter_profile_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH               0x1UL
+       #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_TX           0x0UL
+       #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX           0x1UL
+       #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_LAST    CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX
+       u8 unused_0;
+       __le16 meter_profile_id;
+       #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID 0xffffUL
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_meter_profile_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_meter_profile_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_meter_profile_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH                0x1UL
+       #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_TX    0x0UL
+       #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX    0x1UL
+       #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_LAST    CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX
+       u8 meter_type;
+       #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2697      0x0UL
+       #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2698      0x1UL
+       #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115      0x2UL
+       __le16 meter_profile_id;
+       #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL
+       __le32 reserved;
+       __le32 commit_rate;
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE         0x10000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BITS  (0x0UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_LAST    CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+       __le32 commit_burst;
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE       0x10000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_LAST    CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+       __le32 excess_peak_rate;
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE   0x10000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_LAST    CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+       __le32 excess_peak_burst;
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE  0x10000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_LAST    CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST    CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_meter_profile_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_meter_instance_alloc */
+/* Input (24 bytes) */
+struct hwrm_cfa_meter_instance_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH     0x1UL
+       #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_TX         0x0UL
+       #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX         0x1UL
+       #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_LAST    CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX
+       u8 unused_0;
+       __le16 meter_profile_id;
+       #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID 0xffffUL
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_meter_instance_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 meter_instance_id;
+       #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID 0xffffUL
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_cfa_meter_instance_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_meter_instance_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH              0x1UL
+       #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_TX          0x0UL
+       #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX          0x1UL
+       #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_LAST    CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX
+       u8 unused_0;
+       __le16 meter_instance_id;
+       #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID 0xffffUL
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_meter_instance_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc */
+/* Input (104 bytes) */
+struct hwrm_cfa_decap_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL         0x1UL
+       __le32 enables;
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE     0x1UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID       0x2UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR     0x4UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR     0x8UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID       0x10UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID       0x20UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID     0x40UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID     0x80UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE       0x100UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR      0x200UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR      0x400UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE     0x800UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL     0x1000UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT         0x2000UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT         0x4000UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID           0x8000UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID  0x10000UL
+       __le32 tunnel_id;
+       u8 tunnel_type;
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL  0x0UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN      0x1UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE      0x2UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE      0x3UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP       0x4UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE     0x5UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS       0x6UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT         0x7UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE      0x8UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL  0xffUL
+       u8 unused_0;
+       __le16 unused_1;
+       u8 src_macaddr[6];
+       u8 unused_2;
+       u8 unused_3;
+       u8 dst_macaddr[6];
+       __le16 ovlan_vid;
+       __le16 ivlan_vid;
+       __le16 t_ovlan_vid;
+       __le16 t_ivlan_vid;
+       __be16 ethertype;
+       u8 ip_addr_type;
+       #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN   0x0UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4      0x4UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6      0x6UL
+       u8 ip_protocol;
+       #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN    0x0UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP         0x6UL
+       #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP         0x11UL
+       u8 unused_4;
+       u8 unused_5;
+       u8 unused_6[3];
+       u8 unused_7;
+       __be32 src_ipaddr[4];
+       __be32 dst_ipaddr[4];
+       __be16 src_port;
+       __be16 dst_port;
+       __le16 dst_id;
+       __le16 l2_ctxt_ref_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_decap_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 decap_filter_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_decap_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 decap_filter_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_decap_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_flow_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 flags;
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL             0x1UL
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK              0x6UL
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT               1
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE             (0x0UL << 1)
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE              (0x1UL << 1)
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO              (0x2UL << 1)
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST    CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK              0x38UL
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT               3
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2               (0x0UL << 3)
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4             (0x1UL << 3)
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6             (0x2UL << 3)
+       #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST    CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+       __le16 src_fid;
+       __le32 tunnel_handle;
+       __le16 action_flags;
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD                 0x1UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE     0x2UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP                0x4UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER               0x8UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL              0x10UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC     0x20UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST            0x40UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS   0x80UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE  0x100UL
+       #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT      0x200UL
+       __le16 dst_fid;
+       __be16 l2_rewrite_vlan_tpid;
+       __be16 l2_rewrite_vlan_tci;
+       __le16 act_meter_id;
+       __le16 ref_flow_handle;
+       __be16 ethertype;
+       __be16 outer_vlan_tci;
+       __be16 dmac[3];
+       __be16 inner_vlan_tci;
+       __be16 smac[3];
+       u8 ip_dst_mask_len;
+       u8 ip_src_mask_len;
+       __be32 ip_dst[4];
+       __be32 ip_src[4];
+       __be16 l4_src_port;
+       __be16 l4_src_port_mask;
+       __be16 l4_dst_port;
+       __be16 l4_dst_port_mask;
+       __be32 nat_ip_address[4];
+       __be16 l2_rewrite_dmac[3];
+       __be16 nat_port;
+       __be16 l2_rewrite_smac[3];
+       u8 ip_proto;
+       u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_flow_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 flow_handle;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_cfa_flow_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_flow_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 flow_handle;
+       __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_cfa_flow_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 packet;
+       __le64 byte;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_flow_info */
+/* Input (24 bytes) */
+struct hwrm_cfa_flow_info_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 flow_handle;
+       __le16 unused_0[3];
+};
+
+/* Output (56 bytes) */
+struct hwrm_cfa_flow_info_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 flags;
+       u8 profile;
+       __le16 src_fid;
+       __le16 dst_fid;
+       __le16 l2_ctxt_id;
+       __le64 em_info;
+       __le64 tcam_info;
+       __le64 vfp_tcam_info;
+       __le16 ar_id;
+       __le16 flow_handle;
+       __le32 tunnel_handle;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_flow_flush */
+/* Input (24 bytes) */
+struct hwrm_cfa_flow_flush_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_flow_flush_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_flow_stats */
+/* Input (40 bytes) */
+struct hwrm_cfa_flow_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 num_flows;
+       __le16 flow_handle_0;
+       __le16 flow_handle_1;
+       __le16 flow_handle_2;
+       __le16 flow_handle_3;
+       __le16 flow_handle_4;
+       __le16 flow_handle_5;
+       __le16 flow_handle_6;
+       __le16 flow_handle_7;
+       __le16 flow_handle_8;
+       __le16 flow_handle_9;
+       __le16 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_cfa_flow_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 packet_0;
+       __le64 packet_1;
+       __le64 packet_2;
+       __le64 packet_3;
+       __le64 packet_4;
+       __le64 packet_5;
+       __le64 packet_6;
+       __le64 packet_7;
+       __le64 packet_8;
+       __le64 packet_9;
+       __le64 byte_0;
+       __le64 byte_1;
+       __le64 byte_2;
+       __le64 byte_3;
+       __le64 byte_4;
+       __le64 byte_5;
+       __le64 byte_6;
+       __le64 byte_7;
+       __le64 byte_8;
+       __le64 byte_9;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_vf_pair_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_vf_pair_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_a_id;
+       __le16 vf_b_id;
+       __le32 unused_0;
+       char pair_name[32];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vf_pair_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_vf_pair_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_vf_pair_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       char pair_name[32];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vf_pair_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_cfa_vf_pair_info */
+/* Input (32 bytes) */
+struct hwrm_cfa_vf_pair_info_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define CFA_VF_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE              0x1UL
+       __le16 vf_pair_index;
+       u8 unused_0;
+       u8 unused_1;
+       char vf_pair_name[32];
+};
+
+/* Output (64 bytes) */
+struct hwrm_cfa_vf_pair_info_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 next_vf_pair_index;
+       __le16 vf_a_fid;
+       __le16 vf_a_index;
+       __le16 vf_b_fid;
+       __le16 vf_b_index;
+       u8 pair_state;
+       #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED         0x1UL
+       #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE    0x2UL
+       u8 unused_0;
+       __le32 unused_1;
+       char pair_name[32];
+       __le32 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_vfr_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 vf_id;
+       __le16 reserved;
+       __le32 unused_0;
+       char vfr_name[32];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vfr_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 rx_cfa_code;
+       __le16 tx_cfa_action;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_cfa_vfr_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_vfr_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       char vfr_name[32];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vfr_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 tunnel_type;
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       0x1UL
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      0x5UL
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 tunnel_dst_port_id;
+       __be16 tunnel_dst_port_val;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 tunnel_type;
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       0x1UL
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      0x5UL
+       u8 unused_0;
+       __be16 tunnel_dst_port_val;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 tunnel_dst_port_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 tunnel_type;
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN         0x1UL
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       0x5UL
+       u8 unused_0;
+       __le16 tunnel_dst_port_id;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 stats_dma_addr;
+       __le32 update_period_ms;
+       u8 stat_ctx_flags;
+       #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE              0x1UL
+       u8 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 stat_ctx_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 stat_ctx_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 stat_ctx_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 stat_ctx_id;
+       __le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 tx_ucast_pkts;
+       __le64 tx_mcast_pkts;
+       __le64 tx_bcast_pkts;
+       __le64 tx_err_pkts;
+       __le64 tx_drop_pkts;
+       __le64 tx_ucast_bytes;
+       __le64 tx_mcast_bytes;
+       __le64 tx_bcast_bytes;
+       __le64 rx_ucast_pkts;
+       __le64 rx_mcast_pkts;
+       __le64 rx_bcast_pkts;
+       __le64 rx_err_pkts;
+       __le64 rx_drop_pkts;
+       __le64 rx_ucast_bytes;
+       __le64 rx_mcast_bytes;
+       __le64 rx_bcast_bytes;
+       __le64 rx_agg_pkts;
+       __le64 rx_agg_bytes;
+       __le64 rx_agg_events;
+       __le64 rx_agg_aborts;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 stat_ctx_id;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 embedded_proc_type;
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT               0x0UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT               0x1UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL    0x2UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE               0x3UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST               0x4UL
+       u8 selfrst_status;
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE    0x0UL
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP    0x1UL
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST         0x2UL
+       u8 host_idx;
+       u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 selfrst_status;
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE           0x0UL
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP           0x1UL
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       0x2UL
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 embedded_proc_type;
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT             0x0UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT             0x1UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL          0x2UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE             0x3UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST             0x4UL
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 selfrst_status;
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE         0x0UL
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP         0x1UL
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     0x2UL
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_fw_set_time */
+/* Input (32 bytes) */
+struct hwrm_fw_set_time_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 year;
+       #define FW_SET_TIME_REQ_YEAR_UNKNOWN                       0x0UL
+       u8 month;
+       u8 day;
+       u8 hour;
+       u8 minute;
+       u8 second;
+       u8 unused_0;
+       __le16 millisecond;
+       __le16 zone;
+       #define FW_SET_TIME_REQ_ZONE_UTC                           0x0UL
+       #define FW_SET_TIME_REQ_ZONE_UNKNOWN                       0xffffUL
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_time_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fw_get_time */
+/* Input (16 bytes) */
+struct hwrm_fw_get_time_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_fw_get_time_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 year;
+       #define FW_GET_TIME_RESP_YEAR_UNKNOWN                      0x0UL
+       u8 month;
+       u8 day;
+       u8 hour;
+       u8 minute;
+       u8 second;
+       u8 unused_0;
+       __le16 millisecond;
+       __le16 zone;
+       #define FW_GET_TIME_RESP_ZONE_UTC                          0x0UL
+       #define FW_GET_TIME_RESP_ZONE_UNKNOWN                      0xffffUL
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fw_set_structured_data */
+/* Input (32 bytes) */
+struct hwrm_fw_set_structured_data_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 src_data_addr;
+       __le16 data_len;
+       u8 hdr_cnt;
+       u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_structured_data_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_fw_set_structured_data_cmd_err {
+       u8 code;
+       #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN       0x0UL
+       #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT   0x1UL
+       #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT       0x2UL
+       #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID         0x3UL
+       u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data */
+/* Input (32 bytes) */
+struct hwrm_fw_get_structured_data_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 dest_data_addr;
+       __le16 data_len;
+       __le16 structure_id;
+       __le16 subtype;
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL             0xffffUL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER  0x201UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+       u8 count;
+       u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_get_structured_data_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 hdr_cnt;
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_fw_get_structured_data_cmd_err {
+       u8 code;
+       #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN       0x0UL
+       #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID         0x3UL
+       u8 unused_0[7];
+};
+
+/* hwrm_fw_ipc_mailbox */
+/* Input (32 bytes) */
+struct hwrm_fw_ipc_mailbox_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       u8 unused_0;
+       u8 event_id;
+       u8 port_id;
+       __le32 event_data1;
+       __le32 event_data2;
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_ipc_mailbox_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_fw_ipc_mailbox_cmd_err {
+       u8 code;
+       #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN                0x0UL
+       #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID                 0x3UL
+       u8 unused_0[7];
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (128 bytes) */
+struct hwrm_exec_fwd_resp_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 encap_request[26];
+       __le16 encap_resp_target_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (128 bytes) */
+struct hwrm_reject_fwd_resp_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 encap_request[26];
+       __le16 encap_resp_target_id;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 encap_resp_target_id;
+       __le16 encap_resp_cmpl_ring;
+       __le16 encap_resp_len;
+       u8 unused_0;
+       u8 unused_1;
+       __le64 encap_resp_addr;
+       __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 encap_async_event_target_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2[3];
+       u8 unused_3;
+       __le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 temp;
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_wol_filter_alloc */
+/* Input (64 bytes) */
+struct hwrm_wol_filter_alloc_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       __le32 enables;
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS            0x1UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET         0x2UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE      0x4UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR      0x8UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR     0x10UL
+       #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE     0x20UL
+       __le16 port_id;
+       u8 wol_type;
+       #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT             0x0UL
+       #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP                  0x1UL
+       #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID              0xffUL
+       u8 unused_0;
+       __le32 unused_1;
+       u8 mac_address[6];
+       __le16 pattern_offset;
+       __le16 pattern_buf_size;
+       __le16 pattern_mask_size;
+       __le32 unused_2;
+       __le64 pattern_buf_addr;
+       __le64 pattern_mask_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_filter_alloc_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 wol_filter_id;
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* hwrm_wol_filter_free */
+/* Input (32 bytes) */
+struct hwrm_wol_filter_free_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS     0x1UL
+       __le32 enables;
+       #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID           0x1UL
+       __le16 port_id;
+       u8 wol_filter_id;
+       u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_filter_free_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg */
+/* Input (56 bytes) */
+struct hwrm_wol_filter_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 handle;
+       __le32 unused_0;
+       __le64 pattern_buf_addr;
+       __le16 pattern_buf_size;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3[3];
+       u8 unused_4;
+       __le64 pattern_mask_addr;
+       __le16 pattern_mask_size;
+       __le16 unused_5[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_wol_filter_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 next_handle;
+       u8 wol_filter_id;
+       u8 wol_type;
+       #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT             0x0UL
+       #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP                  0x1UL
+       #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID              0xffUL
+       __le32 unused_0;
+       u8 mac_address[6];
+       __le16 pattern_offset;
+       __le16 pattern_size;
+       __le16 pattern_mask_size;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg */
+/* Input (40 bytes) */
+struct hwrm_wol_reason_qcfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2[3];
+       u8 unused_3;
+       __le64 wol_pkt_buf_addr;
+       __le16 wol_pkt_buf_size;
+       __le16 unused_4[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_reason_qcfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 wol_filter_id;
+       u8 wol_reason;
+       #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT           0x0UL
+       #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP                0x1UL
+       #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID    0xffUL
+       u8 wol_pkt_len;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_read_direct */
+/* Input (32 bytes) */
+struct hwrm_dbg_read_direct_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+       __le32 read_addr;
+       __le32 read_len32;
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_read_direct_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_write_direct */
+/* Input (32 bytes) */
+struct hwrm_dbg_write_direct_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 write_addr;
+       __le32 write_len32;
+       __le32 write_data[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_write_direct_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_read_indirect */
+/* Input (40 bytes) */
+struct hwrm_dbg_read_indirect_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+       __le32 host_dest_addr_len;
+       u8 indirect_access_type;
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB    0x10UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL
+       #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 start_index;
+       __le32 num_of_entries;
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_read_indirect_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_write_indirect */
+/* Input (40 bytes) */
+struct hwrm_dbg_write_indirect_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 indirect_access_type;
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB   0x10UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL
+       #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL
+       u8 unused_0;
+       __le16 unused_1;
+       __le32 start_index;
+       __le32 num_of_entries;
+       __le32 unused_2;
+       __le32 write_data[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_write_indirect_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_dump */
+/* Input (40 bytes) */
+struct hwrm_dbg_dump_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 handle;
+       __le32 unused_0;
+       __le64 host_dbg_dump_addr;
+       __le64 host_dbg_dump_addr_len;
+};
+
+/* Output (24 bytes) */
+struct hwrm_dbg_dump_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 nexthandle;
+       __le32 dbg_data_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_erase_nvm */
+/* Input (24 bytes) */
+struct hwrm_dbg_erase_nvm_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 flags;
+       #define DBG_ERASE_NVM_REQ_FLAGS_ERASE_ALL                   0x1UL
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_erase_nvm_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_dbg_cfg */
+/* Input (24 bytes) */
+struct hwrm_dbg_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 flags;
+       #define DBG_CFG_REQ_FLAGS_UART_LOG                          0x1UL
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_src_addr;
+       __le32 dest_addr;
+       __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+       __le16 dir_idx;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 offset;
+       __le32 len;
+       __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+       __le32 offset;
+       __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 entries;
+       __le32 entry_length;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (48 bytes) */
+struct hwrm_nvm_write_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_src_addr;
+       __le16 dir_type;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       __le16 dir_attr;
+       __le32 dir_data_length;
+       __le16 option;
+       __le16 flags;
+       #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG            0x1UL
+       __le32 dir_item_length;
+       __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 dir_item_length;
+       __le16 dir_idx;
+       u8 unused_0;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_write_cmd_err {
+       u8 code;
+       #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN                     0x0UL
+       #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR            0x1UL
+       #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE            0x2UL
+       u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 host_src_addr;
+       __le16 dir_idx;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 offset;
+       __le32 len;
+       __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID       0x1UL
+       __le16 dir_idx;
+       __le16 dir_type;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       u8 opt_ordinal;
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK     0x3UL
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT              0
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ              0x0UL
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE              0x1UL
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT              0x2UL
+       u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 dir_item_length;
+       __le32 dir_data_length;
+       __le32 fw_ver;
+       __le16 dir_ordinal;
+       __le16 dir_idx;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 dir_idx;
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 manufacturer_id;
+       __le16 device_id;
+       __le32 sector_size;
+       __le32 nvram_size;
+       __le32 reserved_size;
+       __le32 available_size;
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM              0x1UL
+       __le16 dir_idx;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       __le16 dir_attr;
+       __le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 dir_type;
+       __le16 dir_ordinal;
+       __le16 dir_ext;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_nvm_install_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_install_update_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 install_type;
+       #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL         0x0UL
+       #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL    0xffffffffUL
+       __le16 flags;
+       #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE    0x1UL
+       #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG     0x2UL
+       #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG     0x4UL
+       __le16 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_install_update_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 installed_items;
+       u8 result;
+       #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS             0x0UL
+       u8 problem_item;
+       #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE          0x0UL
+       #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE      0xffUL
+       u8 reset_required;
+       #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE       0x0UL
+       #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI         0x1UL
+       #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER      0x2UL
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_install_update_cmd_err {
+       u8 code;
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN    0x0UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR           0x1UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE           0x2UL
+       u8 unused_0[7];
+};
+
+/* hwrm_nvm_flush */
+/* Input (16 bytes) */
+struct hwrm_nvm_flush_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_flush_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_flush_cmd_err {
+       u8 code;
+       #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN                     0x0UL
+       #define NVM_FLUSH_CMD_ERR_CODE_FAIL                        0x1UL
+       u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable */
+/* Input (40 bytes) */
+struct hwrm_nvm_get_variable_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 dest_data_addr;
+       __le16 data_len;
+       __le16 option_num;
+       #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0             0x0UL
+       #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF          0xffffUL
+       __le16 dimensions;
+       __le16 index_0;
+       __le16 index_1;
+       __le16 index_2;
+       __le16 index_3;
+       u8 flags;
+       #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT     0x1UL
+       u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_variable_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 data_len;
+       __le16 option_num;
+       #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0    0x0UL
+       #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF         0xffffUL
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_get_variable_cmd_err {
+       u8 code;
+       #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN              0x0UL
+       #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST       0x1UL
+       #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR          0x2UL
+       #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT       0x3UL
+       u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable */
+/* Input (40 bytes) */
+struct hwrm_nvm_set_variable_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 src_data_addr;
+       __le16 data_len;
+       __le16 option_num;
+       #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0             0x0UL
+       #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF          0xffffUL
+       __le16 dimensions;
+       __le16 index_0;
+       __le16 index_1;
+       __le16 index_2;
+       __le16 index_3;
+       u8 flags;
+       #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH              0x1UL
+       #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK       0xeUL
+       #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT         1
+       #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE      (0x0UL << 1)
+       #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+       #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST    NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+       u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_set_variable_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_set_variable_cmd_err {
+       u8 code;
+       #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN              0x0UL
+       #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST       0x1UL
+       #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR          0x2UL
+       u8 unused_0[7];
+};
+
+/* hwrm_nvm_validate_option */
+/* Input (40 bytes) */
+struct hwrm_nvm_validate_option_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le64 src_data_addr;
+       __le16 data_len;
+       __le16 option_num;
+       #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_0          0x0UL
+       #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF      0xffffUL
+       __le16 dimensions;
+       __le16 index_0;
+       __le16 index_1;
+       __le16 index_2;
+       __le16 index_3;
+       __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_validate_option_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 result;
+       #define NVM_VALIDATE_OPTION_RESP_RESULT_NOT_MATCH          0x0UL
+       #define NVM_VALIDATE_OPTION_RESP_RESULT_MATCH              0x1UL
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_validate_option_cmd_err {
+       u8 code;
+       #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN           0x0UL
+       u8 unused_0[7];
+};
+
+/* hwrm_nvm_factory_defaults */
+/* Input (24 bytes) */
+struct hwrm_nvm_factory_defaults_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 mode;
+       #define NVM_FACTORY_DEFAULTS_REQ_MODE_RESTORE              0x0UL
+       #define NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE               0x1UL
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_factory_defaults_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 result;
+       #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_OK         0x0UL
+       #define NVM_FACTORY_DEFAULTS_RESP_RESULT_RESTORE_OK       0x1UL
+       #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY   0x2UL
+       u8 unused_0;
+       __le16 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_factory_defaults_cmd_err {
+       u8 code;
+       #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN          0x0UL
+       #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG    0x1UL
+       #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG    0x2UL
+       u8 unused_0[7];
+};
+
+/* hwrm_selftest_qlist */
+/* Input (16 bytes) */
+struct hwrm_selftest_qlist_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (248 bytes) */
+struct hwrm_selftest_qlist_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 num_tests;
+       u8 available_tests;
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST       0x1UL
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST      0x2UL
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST  0x4UL
+       #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST    0x8UL
+       u8 offline_tests;
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST          0x1UL
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST         0x2UL
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST    0x4UL
+       #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST      0x8UL
+       u8 unused_0;
+       __le16 test_timeout;
+       u8 unused_1;
+       u8 unused_2;
+       char test0_name[32];
+       char test1_name[32];
+       char test2_name[32];
+       char test3_name[32];
+       char test4_name[32];
+       char test5_name[32];
+       char test6_name[32];
+       char test7_name[32];
+};
+
+/* hwrm_selftest_exec */
+/* Input (24 bytes) */
+struct hwrm_selftest_exec_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       u8 flags;
+       #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST                    0x1UL
+       #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST                   0x2UL
+       #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST               0x4UL
+       #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST                 0x8UL
+       u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_selftest_exec_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 requested_tests;
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST         0x1UL
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST       0x2UL
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST   0x4UL
+       #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST     0x8UL
+       u8 test_success;
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST            0x1UL
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST           0x2UL
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST      0x4UL
+       #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST         0x8UL
+       __le16 unused_0[3];
+};
+
+/* hwrm_selftest_irq */
+/* Input (16 bytes) */
+struct hwrm_selftest_irq_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct hwrm_selftest_irq_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+};
+
+/* Hardware Resource Manager Specification */
+/* Input (16 bytes) */
+struct input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+};
+
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+       __le16 req_type;
+       __le16 signature;
+       #define SHORT_REQ_SIGNATURE_SHORT_CMD                      0x4321UL
+       __le16 unused_0;
+       __le16 size;
+       __le64 req_addr;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+       __le16 req_type;
+       #define HWRM_VER_GET                                       (0x0UL)
+       #define HWRM_FUNC_BUF_UNRGTR                               (0xeUL)
+       #define HWRM_FUNC_VF_CFG                                   (0xfUL)
+       #define RESERVED1                                          (0x10UL)
+       #define HWRM_FUNC_RESET                            (0x11UL)
+       #define HWRM_FUNC_GETFID                                   (0x12UL)
+       #define HWRM_FUNC_VF_ALLOC                                 (0x13UL)
+       #define HWRM_FUNC_VF_FREE                                  (0x14UL)
+       #define HWRM_FUNC_QCAPS                            (0x15UL)
+       #define HWRM_FUNC_QCFG                                     (0x16UL)
+       #define HWRM_FUNC_CFG                                      (0x17UL)
+       #define HWRM_FUNC_QSTATS                                   (0x18UL)
+       #define HWRM_FUNC_CLR_STATS                                (0x19UL)
+       #define HWRM_FUNC_DRV_UNRGTR                               (0x1aUL)
+       #define HWRM_FUNC_VF_RESC_FREE                             (0x1bUL)
+       #define HWRM_FUNC_VF_VNIC_IDS_QUERY                        (0x1cUL)
+       #define HWRM_FUNC_DRV_RGTR                                 (0x1dUL)
+       #define HWRM_FUNC_DRV_QVER                                 (0x1eUL)
+       #define HWRM_FUNC_BUF_RGTR                                 (0x1fUL)
+       #define HWRM_PORT_PHY_CFG                                  (0x20UL)
+       #define HWRM_PORT_MAC_CFG                                  (0x21UL)
+       #define HWRM_PORT_TS_QUERY                                 (0x22UL)
+       #define HWRM_PORT_QSTATS                                   (0x23UL)
+       #define HWRM_PORT_LPBK_QSTATS                              (0x24UL)
+       #define HWRM_PORT_CLR_STATS                                (0x25UL)
+       #define HWRM_PORT_LPBK_CLR_STATS                           (0x26UL)
+       #define HWRM_PORT_PHY_QCFG                                 (0x27UL)
+       #define HWRM_PORT_MAC_QCFG                                 (0x28UL)
+       #define HWRM_PORT_MAC_PTP_QCFG                             (0x29UL)
+       #define HWRM_PORT_PHY_QCAPS                                (0x2aUL)
+       #define HWRM_PORT_PHY_I2C_WRITE                    (0x2bUL)
+       #define HWRM_PORT_PHY_I2C_READ                             (0x2cUL)
+       #define HWRM_PORT_LED_CFG                                  (0x2dUL)
+       #define HWRM_PORT_LED_QCFG                                 (0x2eUL)
+       #define HWRM_PORT_LED_QCAPS                                (0x2fUL)
+       #define HWRM_QUEUE_QPORTCFG                                (0x30UL)
+       #define HWRM_QUEUE_QCFG                            (0x31UL)
+       #define HWRM_QUEUE_CFG                                     (0x32UL)
+       #define HWRM_FUNC_VLAN_CFG                                 (0x33UL)
+       #define HWRM_FUNC_VLAN_QCFG                                (0x34UL)
+       #define HWRM_QUEUE_PFCENABLE_QCFG                          (0x35UL)
+       #define HWRM_QUEUE_PFCENABLE_CFG                           (0x36UL)
+       #define HWRM_QUEUE_PRI2COS_QCFG                    (0x37UL)
+       #define HWRM_QUEUE_PRI2COS_CFG                             (0x38UL)
+       #define HWRM_QUEUE_COS2BW_QCFG                             (0x39UL)
+       #define HWRM_QUEUE_COS2BW_CFG                              (0x3aUL)
+       #define HWRM_QUEUE_DSCP_QCAPS                              (0x3bUL)
+       #define HWRM_QUEUE_DSCP2PRI_QCFG                           (0x3cUL)
+       #define HWRM_QUEUE_DSCP2PRI_CFG                    (0x3dUL)
+       #define HWRM_VNIC_ALLOC                            (0x40UL)
+       #define HWRM_VNIC_FREE                                     (0x41UL)
+       #define HWRM_VNIC_CFG                                      (0x42UL)
+       #define HWRM_VNIC_QCFG                                     (0x43UL)
+       #define HWRM_VNIC_TPA_CFG                                  (0x44UL)
+       #define HWRM_VNIC_TPA_QCFG                                 (0x45UL)
+       #define HWRM_VNIC_RSS_CFG                                  (0x46UL)
+       #define HWRM_VNIC_RSS_QCFG                                 (0x47UL)
+       #define HWRM_VNIC_PLCMODES_CFG                             (0x48UL)
+       #define HWRM_VNIC_PLCMODES_QCFG                    (0x49UL)
+       #define HWRM_VNIC_QCAPS                            (0x4aUL)
+       #define HWRM_RING_ALLOC                            (0x50UL)
+       #define HWRM_RING_FREE                                     (0x51UL)
+       #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS                 (0x52UL)
+       #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS              (0x53UL)
+       #define HWRM_RING_RESET                            (0x5eUL)
+       #define HWRM_RING_GRP_ALLOC                                (0x60UL)
+       #define HWRM_RING_GRP_FREE                                 (0x61UL)
+       #define RESERVED5                                          (0x64UL)
+       #define RESERVED6                                          (0x65UL)
+       #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC                     (0x70UL)
+       #define HWRM_VNIC_RSS_COS_LB_CTX_FREE                      (0x71UL)
+       #define HWRM_CFA_L2_FILTER_ALLOC                           (0x90UL)
+       #define HWRM_CFA_L2_FILTER_FREE                    (0x91UL)
+       #define HWRM_CFA_L2_FILTER_CFG                             (0x92UL)
+       #define HWRM_CFA_L2_SET_RX_MASK                    (0x93UL)
+       #define HWRM_CFA_VLAN_ANTISPOOF_CFG                        (0x94UL)
+       #define HWRM_CFA_TUNNEL_FILTER_ALLOC                       (0x95UL)
+       #define HWRM_CFA_TUNNEL_FILTER_FREE                        (0x96UL)
+       #define HWRM_CFA_ENCAP_RECORD_ALLOC                        (0x97UL)
+       #define HWRM_CFA_ENCAP_RECORD_FREE                         (0x98UL)
+       #define HWRM_CFA_NTUPLE_FILTER_ALLOC                       (0x99UL)
+       #define HWRM_CFA_NTUPLE_FILTER_FREE                        (0x9aUL)
+       #define HWRM_CFA_NTUPLE_FILTER_CFG                         (0x9bUL)
+       #define HWRM_CFA_EM_FLOW_ALLOC                             (0x9cUL)
+       #define HWRM_CFA_EM_FLOW_FREE                              (0x9dUL)
+       #define HWRM_CFA_EM_FLOW_CFG                               (0x9eUL)
+       #define HWRM_TUNNEL_DST_PORT_QUERY                         (0xa0UL)
+       #define HWRM_TUNNEL_DST_PORT_ALLOC                         (0xa1UL)
+       #define HWRM_TUNNEL_DST_PORT_FREE                          (0xa2UL)
+       #define HWRM_STAT_CTX_ALLOC                                (0xb0UL)
+       #define HWRM_STAT_CTX_FREE                                 (0xb1UL)
+       #define HWRM_STAT_CTX_QUERY                                (0xb2UL)
+       #define HWRM_STAT_CTX_CLR_STATS                    (0xb3UL)
+       #define HWRM_FW_RESET                                      (0xc0UL)
+       #define HWRM_FW_QSTATUS                            (0xc1UL)
+       #define HWRM_FW_SET_TIME                                   (0xc8UL)
+       #define HWRM_FW_GET_TIME                                   (0xc9UL)
+       #define HWRM_FW_SET_STRUCTURED_DATA                        (0xcaUL)
+       #define HWRM_FW_GET_STRUCTURED_DATA                        (0xcbUL)
+       #define HWRM_FW_IPC_MAILBOX                                (0xccUL)
+       #define HWRM_EXEC_FWD_RESP                                 (0xd0UL)
+       #define HWRM_REJECT_FWD_RESP                               (0xd1UL)
+       #define HWRM_FWD_RESP                                      (0xd2UL)
+       #define HWRM_FWD_ASYNC_EVENT_CMPL                          (0xd3UL)
+       #define HWRM_TEMP_MONITOR_QUERY                    (0xe0UL)
+       #define HWRM_WOL_FILTER_ALLOC                              (0xf0UL)
+       #define HWRM_WOL_FILTER_FREE                               (0xf1UL)
+       #define HWRM_WOL_FILTER_QCFG                               (0xf2UL)
+       #define HWRM_WOL_REASON_QCFG                               (0xf3UL)
+       #define HWRM_CFA_METER_PROFILE_ALLOC                       (0xf5UL)
+       #define HWRM_CFA_METER_PROFILE_FREE                        (0xf6UL)
+       #define HWRM_CFA_METER_PROFILE_CFG                         (0xf7UL)
+       #define HWRM_CFA_METER_INSTANCE_ALLOC                      (0xf8UL)
+       #define HWRM_CFA_METER_INSTANCE_FREE                       (0xf9UL)
+       #define HWRM_CFA_VFR_ALLOC                                 (0xfdUL)
+       #define HWRM_CFA_VFR_FREE                                  (0xfeUL)
+       #define HWRM_CFA_VF_PAIR_ALLOC                             (0x100UL)
+       #define HWRM_CFA_VF_PAIR_FREE                              (0x101UL)
+       #define HWRM_CFA_VF_PAIR_INFO                              (0x102UL)
+       #define HWRM_CFA_FLOW_ALLOC                                (0x103UL)
+       #define HWRM_CFA_FLOW_FREE                                 (0x104UL)
+       #define HWRM_CFA_FLOW_FLUSH                                (0x105UL)
+       #define HWRM_CFA_FLOW_STATS                                (0x106UL)
+       #define HWRM_CFA_FLOW_INFO                                 (0x107UL)
+       #define HWRM_CFA_DECAP_FILTER_ALLOC                        (0x108UL)
+       #define HWRM_CFA_DECAP_FILTER_FREE                         (0x109UL)
+       #define HWRM_CFA_VLAN_ANTISPOOF_QCFG                       (0x10aUL)
+       #define HWRM_SELFTEST_QLIST                                (0x200UL)
+       #define HWRM_SELFTEST_EXEC                                 (0x201UL)
+       #define HWRM_SELFTEST_IRQ                                  (0x202UL)
+       #define HWRM_DBG_READ_DIRECT                               (0xff10UL)
+       #define HWRM_DBG_READ_INDIRECT                             (0xff11UL)
+       #define HWRM_DBG_WRITE_DIRECT                              (0xff12UL)
+       #define HWRM_DBG_WRITE_INDIRECT                    (0xff13UL)
+       #define HWRM_DBG_DUMP                                      (0xff14UL)
+       #define HWRM_DBG_ERASE_NVM                                 (0xff15UL)
+       #define HWRM_DBG_CFG                                       (0xff16UL)
+       #define HWRM_NVM_FACTORY_DEFAULTS                          (0xffeeUL)
+       #define HWRM_NVM_VALIDATE_OPTION                           (0xffefUL)
+       #define HWRM_NVM_FLUSH                                     (0xfff0UL)
+       #define HWRM_NVM_GET_VARIABLE                              (0xfff1UL)
+       #define HWRM_NVM_SET_VARIABLE                              (0xfff2UL)
+       #define HWRM_NVM_INSTALL_UPDATE                    (0xfff3UL)
+       #define HWRM_NVM_MODIFY                            (0xfff4UL)
+       #define HWRM_NVM_VERIFY_UPDATE                             (0xfff5UL)
+       #define HWRM_NVM_GET_DEV_INFO                              (0xfff6UL)
+       #define HWRM_NVM_ERASE_DIR_ENTRY                           (0xfff7UL)
+       #define HWRM_NVM_MOD_DIR_ENTRY                             (0xfff8UL)
+       #define HWRM_NVM_FIND_DIR_ENTRY                    (0xfff9UL)
+       #define HWRM_NVM_GET_DIR_ENTRIES                           (0xfffaUL)
+       #define HWRM_NVM_GET_DIR_INFO                              (0xfffbUL)
+       #define HWRM_NVM_RAW_DUMP                                  (0xfffcUL)
+       #define HWRM_NVM_READ                                      (0xfffdUL)
+       #define HWRM_NVM_WRITE                                     (0xfffeUL)
+       #define HWRM_NVM_RAW_WRITE_BLK                             (0xffffUL)
+       __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+       __le16 error_code;
+       #define HWRM_ERR_CODE_SUCCESS                              (0x0UL)
+       #define HWRM_ERR_CODE_FAIL                                 (0x1UL)
+       #define HWRM_ERR_CODE_INVALID_PARAMS                       (0x2UL)
+       #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED               (0x3UL)
+       #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR                 (0x4UL)
+       #define HWRM_ERR_CODE_INVALID_FLAGS                        (0x5UL)
+       #define HWRM_ERR_CODE_INVALID_ENABLES                      (0x6UL)
+       #define HWRM_ERR_CODE_HWRM_ERROR                           (0xfUL)
+       #define HWRM_ERR_CODE_UNKNOWN_ERR                          (0xfffeUL)
+       #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED            (0xffffUL)
+       __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 opaque_0;
+       __le16 opaque_1;
+       u8 cmd_err;
+       u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+       __le64 tx_64b_frames;
+       __le64 tx_65b_127b_frames;
+       __le64 tx_128b_255b_frames;
+       __le64 tx_256b_511b_frames;
+       __le64 tx_512b_1023b_frames;
+       __le64 tx_1024b_1518_frames;
+       __le64 tx_good_vlan_frames;
+       __le64 tx_1519b_2047_frames;
+       __le64 tx_2048b_4095b_frames;
+       __le64 tx_4096b_9216b_frames;
+       __le64 tx_9217b_16383b_frames;
+       __le64 tx_good_frames;
+       __le64 tx_total_frames;
+       __le64 tx_ucast_frames;
+       __le64 tx_mcast_frames;
+       __le64 tx_bcast_frames;
+       __le64 tx_pause_frames;
+       __le64 tx_pfc_frames;
+       __le64 tx_jabber_frames;
+       __le64 tx_fcs_err_frames;
+       __le64 tx_control_frames;
+       __le64 tx_oversz_frames;
+       __le64 tx_single_dfrl_frames;
+       __le64 tx_multi_dfrl_frames;
+       __le64 tx_single_coll_frames;
+       __le64 tx_multi_coll_frames;
+       __le64 tx_late_coll_frames;
+       __le64 tx_excessive_coll_frames;
+       __le64 tx_frag_frames;
+       __le64 tx_err;
+       __le64 tx_tagged_frames;
+       __le64 tx_dbl_tagged_frames;
+       __le64 tx_runt_frames;
+       __le64 tx_fifo_underruns;
+       __le64 tx_pfc_ena_frames_pri0;
+       __le64 tx_pfc_ena_frames_pri1;
+       __le64 tx_pfc_ena_frames_pri2;
+       __le64 tx_pfc_ena_frames_pri3;
+       __le64 tx_pfc_ena_frames_pri4;
+       __le64 tx_pfc_ena_frames_pri5;
+       __le64 tx_pfc_ena_frames_pri6;
+       __le64 tx_pfc_ena_frames_pri7;
+       __le64 tx_eee_lpi_events;
+       __le64 tx_eee_lpi_duration;
+       __le64 tx_llfc_logical_msgs;
+       __le64 tx_hcfc_msgs;
+       __le64 tx_total_collisions;
+       __le64 tx_bytes;
+       __le64 tx_xthol_frames;
+       __le64 tx_stat_discard;
+       __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+       __le64 rx_64b_frames;
+       __le64 rx_65b_127b_frames;
+       __le64 rx_128b_255b_frames;
+       __le64 rx_256b_511b_frames;
+       __le64 rx_512b_1023b_frames;
+       __le64 rx_1024b_1518_frames;
+       __le64 rx_good_vlan_frames;
+       __le64 rx_1519b_2047b_frames;
+       __le64 rx_2048b_4095b_frames;
+       __le64 rx_4096b_9216b_frames;
+       __le64 rx_9217b_16383b_frames;
+       __le64 rx_total_frames;
+       __le64 rx_ucast_frames;
+       __le64 rx_mcast_frames;
+       __le64 rx_bcast_frames;
+       __le64 rx_fcs_err_frames;
+       __le64 rx_ctrl_frames;
+       __le64 rx_pause_frames;
+       __le64 rx_pfc_frames;
+       __le64 rx_unsupported_opcode_frames;
+       __le64 rx_unsupported_da_pausepfc_frames;
+       __le64 rx_wrong_sa_frames;
+       __le64 rx_align_err_frames;
+       __le64 rx_oor_len_frames;
+       __le64 rx_code_err_frames;
+       __le64 rx_false_carrier_frames;
+       __le64 rx_ovrsz_frames;
+       __le64 rx_jbr_frames;
+       __le64 rx_mtu_err_frames;
+       __le64 rx_match_crc_frames;
+       __le64 rx_promiscuous_frames;
+       __le64 rx_tagged_frames;
+       __le64 rx_double_tagged_frames;
+       __le64 rx_trunc_frames;
+       __le64 rx_good_frames;
+       __le64 rx_pfc_xon2xoff_frames_pri0;
+       __le64 rx_pfc_xon2xoff_frames_pri1;
+       __le64 rx_pfc_xon2xoff_frames_pri2;
+       __le64 rx_pfc_xon2xoff_frames_pri3;
+       __le64 rx_pfc_xon2xoff_frames_pri4;
+       __le64 rx_pfc_xon2xoff_frames_pri5;
+       __le64 rx_pfc_xon2xoff_frames_pri6;
+       __le64 rx_pfc_xon2xoff_frames_pri7;
+       __le64 rx_pfc_ena_frames_pri0;
+       __le64 rx_pfc_ena_frames_pri1;
+       __le64 rx_pfc_ena_frames_pri2;
+       __le64 rx_pfc_ena_frames_pri3;
+       __le64 rx_pfc_ena_frames_pri4;
+       __le64 rx_pfc_ena_frames_pri5;
+       __le64 rx_pfc_ena_frames_pri6;
+       __le64 rx_pfc_ena_frames_pri7;
+       __le64 rx_sch_crc_err_frames;
+       __le64 rx_undrsz_frames;
+       __le64 rx_frag_frames;
+       __le64 rx_eee_lpi_events;
+       __le64 rx_eee_lpi_duration;
+       __le64 rx_llfc_physical_msgs;
+       __le64 rx_llfc_logical_msgs;
+       __le64 rx_llfc_msgs_with_crc_err;
+       __le64 rx_hcfc_msgs;
+       __le64 rx_hcfc_msgs_with_crc_err;
+       __le64 rx_bytes;
+       __le64 rx_runt_bytes;
+       __le64 rx_runt_frames;
+       __le64 rx_stat_discard;
+       __le64 rx_stat_err;
+};
+
+/* Periodic Statistics Context DMA to host (160 bytes) */
+struct ctx_hw_stats {
+       __le64 rx_ucast_pkts;
+       __le64 rx_mcast_pkts;
+       __le64 rx_bcast_pkts;
+       __le64 rx_discard_pkts;
+       __le64 rx_drop_pkts;
+       __le64 rx_ucast_bytes;
+       __le64 rx_mcast_bytes;
+       __le64 rx_bcast_bytes;
+       __le64 tx_ucast_pkts;
+       __le64 tx_mcast_pkts;
+       __le64 tx_bcast_pkts;
+       __le64 tx_discard_pkts;
+       __le64 tx_drop_pkts;
+       __le64 tx_ucast_bytes;
+       __le64 tx_mcast_bytes;
+       __le64 tx_bcast_bytes;
+       __le64 tpa_pkts;
+       __le64 tpa_bytes;
+       __le64 tpa_events;
+       __le64 tpa_aborts;
+};
+
+/* Structure data header (16 bytes) */
+struct hwrm_struct_hdr {
+       __le16 struct_id;
+       #define STRUCT_HDR_STRUCT_ID_LLDP_CFG                      0x41bUL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_ETS                      0x41dUL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_PFC                      0x41fUL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_APP                      0x421UL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE    0x422UL
+       #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC                  0x424UL
+       #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE                   0x426UL
+       #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE            0x1UL
+       #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION              0xaUL
+       __le16 len;
+       u8 version;
+       u8 count;
+       __le16 subtype;
+       __le16 next_offset;
+       #define STRUCT_HDR_NEXT_OFFSET_LAST                        0x0UL
+       __le16 unused_0[3];
+};
+
+/* DCBX ETS configuration structure (1053) (32 bytes) */
+struct hwrm_struct_data_dcbx_ets {
+       u8 destination;
+       #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION    0x1UL
+       #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION  0x2UL
+       u8 max_tcs;
+       __le16 unused_0;
+       u8 pri0_to_tc_map;
+       u8 pri1_to_tc_map;
+       u8 pri2_to_tc_map;
+       u8 pri3_to_tc_map;
+       u8 pri4_to_tc_map;
+       u8 pri5_to_tc_map;
+       u8 pri6_to_tc_map;
+       u8 pri7_to_tc_map;
+       u8 tc0_to_bw_map;
+       u8 tc1_to_bw_map;
+       u8 tc2_to_bw_map;
+       u8 tc3_to_bw_map;
+       u8 tc4_to_bw_map;
+       u8 tc5_to_bw_map;
+       u8 tc6_to_bw_map;
+       u8 tc7_to_bw_map;
+       u8 tc0_to_tsa_map;
+       #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP   0x0UL
+       #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS  0x1UL
+       #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS  0x2UL
+       #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL
+       u8 tc1_to_tsa_map;
+       u8 tc2_to_tsa_map;
+       u8 tc3_to_tsa_map;
+       u8 tc4_to_tsa_map;
+       u8 tc5_to_tsa_map;
+       u8 tc6_to_tsa_map;
+       u8 tc7_to_tsa_map;
+       __le32 unused_1;
+};
+
+/* DCBX PFC configuration structure (1055) (8 bytes) */
+struct hwrm_struct_data_dcbx_pfc {
+       u8 pfc_priority_bitmap;
+       u8 max_pfc_tcs;
+       u8 mbc;
+       u8 unused_0[5];
+};
+
+/* DCBX Application configuration structure (1057) (8 bytes) */
+struct hwrm_struct_data_dcbx_app {
+       __be16 protocol_id;
+       u8 protocol_selector;
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT   0x2UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT   0x3UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+       u8 priority;
+       u8 valid;
+       u8 unused_0[3];
+};
+
+/* DCBX feature states configuration structure (1058) (8 bytes) */
+struct hwrm_struct_data_dcbx_feature_state {
+       u8 dcbx_mode;
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL
+       u8 ets_state;
+       u8 pfc_state;
+       u8 app_state;
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL
+       u8 unused_0[3];
+       u8 resets;
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS   0x1UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC   0x2UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP   0x4UL
+       #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL
+};
+
+/* LLDP TLVs transmit configuration structure (1051) (8 bytes) */
+struct hwrm_struct_data_lldp {
+       u8 admin_state;
+       #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE               0x0UL
+       #define STRUCT_DATA_LLDP_ADMIN_STATE_TX            0x1UL
+       #define STRUCT_DATA_LLDP_ADMIN_STATE_RX            0x2UL
+       #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE                0x3UL
+       u8 port_description_state;
+       #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE   0x0UL
+       #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE    0x1UL
+       u8 system_name_state;
+       #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE         0x0UL
+       #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE          0x1UL
+       u8 system_desc_state;
+       #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE         0x0UL
+       #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE          0x1UL
+       u8 system_cap_state;
+       #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE          0x0UL
+       #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE           0x1UL
+       u8 mgmt_addr_state;
+       #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE           0x0UL
+       #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE    0x1UL
+       u8 async_event_notification_state;
+       #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL
+       #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL
+       u8 unused_0;
+};
+
+/* LLDP generic TLV configuration (1060) (16 bytes) */
+struct hwrm_struct_data_lldp_generic {
+       u8 tlv_type;
+       #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS          0x1UL
+       #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT             0x2UL
+       #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME     0x3UL
+       #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL
+       #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME       0x5UL
+       #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL
+       u8 subtype;
+       u8 length;
+       u8 unused_0;
+       __le32 unused_1;
+       __le32 tlv_value[64];
+};
+
+/* LLDP device TLV configuration (1062) (64 bytes) */
+struct hwrm_struct_data_lldp_device {
+       __le16 ttl;
+       u8 mgmt_addr_len;
+       u8 mgmt_addr_type;
+       __le32 unused_0;
+       __le32 mgmt_addr[8];
+       __le32 system_caps;
+       u8 intf_num_type;
+       u8 mgmt_addr_oid_length;
+       u8 unused_1;
+       u8 unused_2;
+       __le32 intf_num;
+       __le32 unused_3;
+       __le32 mgmt_addr_oid[32];
+};
+
+/* port description (10) (8 bytes) */
+struct hwrm_struct_data_port_description {
+       u8 port_id;
+       u8 unused_0[7];
+};
+
+#endif
diff --git a/ubuntu/bnxt/bnxt_nvm_defs.h b/ubuntu/bnxt/bnxt_nvm_defs.h
new file mode 100644 (file)
index 0000000..c5ccc9b
--- /dev/null
@@ -0,0 +1,75 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+       BNX_DIR_TYPE_UNUSED = 0,
+       BNX_DIR_TYPE_PKG_LOG = 1,
+       BNX_DIR_TYPE_UPDATE = 2,
+       BNX_DIR_TYPE_CHIMP_PATCH = 3,
+       BNX_DIR_TYPE_BOOTCODE = 4,
+       BNX_DIR_TYPE_VPD = 5,
+       BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+       BNX_DIR_TYPE_AVS = 7,
+       BNX_DIR_TYPE_PCIE = 8,
+       BNX_DIR_TYPE_PORT_MACRO = 9,
+       BNX_DIR_TYPE_APE_FW = 10,
+       BNX_DIR_TYPE_APE_PATCH = 11,
+       BNX_DIR_TYPE_KONG_FW = 12,
+       BNX_DIR_TYPE_KONG_PATCH = 13,
+       BNX_DIR_TYPE_BONO_FW = 14,
+       BNX_DIR_TYPE_BONO_PATCH = 15,
+       BNX_DIR_TYPE_TANG_FW = 16,
+       BNX_DIR_TYPE_TANG_PATCH = 17,
+       BNX_DIR_TYPE_BOOTCODE_2 = 18,
+       BNX_DIR_TYPE_CCM = 19,
+       BNX_DIR_TYPE_PCI_CFG = 20,
+       BNX_DIR_TYPE_TSCF_UCODE = 21,
+       BNX_DIR_TYPE_ISCSI_BOOT = 22,
+       BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+       BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+       BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+       BNX_DIR_TYPE_EXT_PHY = 27,
+       BNX_DIR_TYPE_SHARED_CFG = 40,
+       BNX_DIR_TYPE_PORT_CFG = 41,
+       BNX_DIR_TYPE_FUNC_CFG = 42,
+       BNX_DIR_TYPE_MGMT_CFG = 48,
+       BNX_DIR_TYPE_MGMT_DATA = 49,
+       BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+       BNX_DIR_TYPE_MGMT_WEB_META = 51,
+       BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+       BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST                  0
+
+#define BNX_DIR_EXT_NONE                       0
+#define BNX_DIR_EXT_INACTIVE                   (1 << 0)
+#define BNX_DIR_EXT_UPDATE                     (1 << 1)
+
+#define BNX_DIR_ATTR_NONE                      0
+#define BNX_DIR_ATTR_NO_CHKSUM                 (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM               (1 << 1)
+
+#define BNX_PKG_LOG_MAX_LENGTH                 4096
+
+enum bnxnvm_pkglog_field_index {
+       BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP       = 0,
+       BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION           = 1,
+       BNX_PKG_LOG_FIELD_IDX_PKG_VERSION               = 2,
+       BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP             = 3,
+       BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM              = 4,
+       BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS           = 5,
+       BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK            = 6
+};
+
+#endif                         /* Don't add anything after this line */
diff --git a/ubuntu/bnxt/bnxt_ptp.c b/ubuntu/bnxt/bnxt_ptp.c
new file mode 100644 (file)
index 0000000..9e358c0
--- /dev/null
@@ -0,0 +1,461 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#ifdef HAVE_IEEE1588_SUPPORT
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include <linux/timekeeping.h>
+#endif
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ptp.h"
+
+#ifdef HAVE_IEEE1588_SUPPORT
+static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
+                           const struct timespec64 *ts)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       u64 ns = timespec64_to_ns(ts);
+
+       ns = timespec64_to_ns(ts);
+       timecounter_init(&ptp->tc, &ptp->cc, ns);
+       return 0;
+}
+
+static int bnxt_ptp_gettime(struct ptp_clock_info *ptp_info,
+                           struct timespec64 *ts)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       u64 ns;
+
+       ns = timecounter_read(&ptp->tc);
+       *ts = ns_to_timespec64(ns);
+       return 0;
+}
+
+static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+
+       timecounter_adjtime(&ptp->tc, delta);
+       return 0;
+}
+
+static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       s32 period, period1, period2, dif, dif1, dif2;
+       s32 step, best_step = 0, best_period = 0;
+       s32 best_dif = BNXT_MAX_PHC_DRIFT;
+       u32 drift_sign = 1;
+
+       /* Frequency adjustment requires programming 3 values:
+        * 1-bit direction
+        * 5-bit adjustment step in 1 ns unit
+        * 24-bit period in 1 us unit between adjustments
+        */
+       if (ppb < 0) {
+               ppb = -ppb;
+               drift_sign = 0;
+       }
+
+       if (ppb == 0) {
+               /* no adjustment */
+               best_step = 0;
+               best_period = 0xFFFFFF;
+       } else if (ppb >= BNXT_MAX_PHC_DRIFT) {
+               /* max possible adjustment */
+               best_step = 31;
+               best_period = 1;
+       } else {
+               /* Find the best possible adjustment step and period */
+               for (step = 0; step <= 31; step++) {
+                       period1 = step * 1000000 / ppb;
+                       period2 = period1 + 1;
+                       if (period1 != 0)
+                               dif1 = ppb - (step * 1000000 / period1);
+                       else
+                               dif1 = BNXT_MAX_PHC_DRIFT;
+                       if (dif1 < 0)
+                               dif1 = -dif1;
+                       dif2 = ppb - (step * 1000000 / period2);
+                       if (dif2 < 0)
+                               dif2 = -dif2;
+                       dif = (dif1 < dif2) ? dif1 : dif2;
+                       period = (dif1 < dif2) ? period1 : period2;
+                       if (dif < best_dif) {
+                               best_dif = dif;
+                               best_step = step;
+                               best_period = period;
+                       }
+               }
+       }
+       writel((drift_sign << BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT) |
+              (best_step << BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT) |
+              (best_period & BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK),
+              ptp->bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME_ADJ);
+
+       return 0;
+}
+
+static int bnxt_ptp_enable(struct ptp_clock_info *ptp,
+                          struct ptp_clock_request *rq, int on)
+{
+        return -ENOTSUPP;
+}
+
+static void bnxt_clr_rx_ts(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       struct bnxt_pf_info *pf = &bp->pf;
+       u16 port_id;
+       int i = 0;
+       u32 fifo;
+
+       if (!ptp)
+               return;
+
+       port_id = pf->port_id;
+       fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]);
+       while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < 10)) {
+               writel(1 << port_id, bp->bar0 +
+                      ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
+               fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]);
+               i++;
+       }
+}
+
+static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+       struct hwrm_port_mac_cfg_input req = {0};
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u32 flags = 0;
+
+       if (!ptp)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+
+       if (ptp->rx_filter)
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+       else
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+       if (ptp->tx_tstamp_en)
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+       else
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+       req.flags = cpu_to_le32(flags);
+       req.enables = cpu_to_le32(
+               PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+       req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+       if (hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+               return -EIO;
+
+       return 0;
+}
+
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwtstamp_config stmpconf;
+       struct bnxt_ptp_cfg *ptp;
+       u16 old_rxctl, new_rxctl;
+       int old_rx_filter, rc;
+       u8 old_tx_tstamp_en;
+
+       ptp = bp->ptp_cfg;
+       if (!ptp)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
+               return -EFAULT;
+
+       if (stmpconf.flags)
+               return -EINVAL;
+
+       if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
+           stmpconf.tx_type != HWTSTAMP_TX_OFF)
+               return -ERANGE;
+
+       old_rx_filter = ptp->rx_filter;
+       old_rxctl = ptp->rxctl;
+       old_tx_tstamp_en = ptp->tx_tstamp_en;
+       switch (stmpconf.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               new_rxctl = 0;
+               ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+               new_rxctl = BNXT_PTP_MSG_EVENTS;
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+               new_rxctl = BNXT_PTP_MSG_SYNC;
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               new_rxctl = BNXT_PTP_MSG_DELAY_REQ;
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+               ptp->tx_tstamp_en = 1;
+       else
+               ptp->tx_tstamp_en = 0;
+
+       if (!old_rxctl && new_rxctl) {
+               rc = bnxt_hwrm_ptp_cfg(bp);
+               if (rc)
+                       goto ts_set_err;
+               ptp->rxctl = new_rxctl;
+               bnxt_clr_rx_ts(bp);
+       }
+
+       rc = bnxt_hwrm_ptp_cfg(bp);
+       if (rc)
+               goto ts_set_err;
+
+       stmpconf.rx_filter = ptp->rx_filter;
+       return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+               -EFAULT : 0;
+
+ts_set_err:
+       ptp->rx_filter = old_rx_filter;
+       ptp->rxctl = old_rxctl;
+       ptp->tx_tstamp_en = old_tx_tstamp_en;
+       return rc;
+}
+
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwtstamp_config stmpconf;
+       struct bnxt_ptp_cfg *ptp;
+
+       ptp = bp->ptp_cfg;
+       if (!ptp)
+               return -EOPNOTSUPP;
+
+       stmpconf.flags = 0;
+       stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+       stmpconf.rx_filter = ptp->rx_filter;
+       return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+               -EFAULT : 0;
+}
+
+static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
+{
+       u32 reg_base = *reg_arr & 0xfffff000;
+       u32 win_off;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               if ((reg_arr[i] & 0xfffff000) != reg_base)
+                       return -ERANGE;
+       }
+       win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+       writel(reg_base, bp->bar0 + win_off);
+       return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u32 *reg_arr, reg_base;
+       int rc, i;
+
+       reg_arr = ptp->rx_regs;
+       rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
+       if (rc)
+               return rc;
+
+       reg_arr = ptp->tx_regs;
+       rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
+       if (rc)
+               return rc;
+
+       reg_base = ptp->rx_regs[BNXT_PTP_RX_TS_L] & 0xfffff000;
+       for (i = 0; i < BNXT_PTP_RX_REGS; i++)
+               ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
+
+       reg_base = ptp->tx_regs[BNXT_PTP_TX_TS_L] & 0xfffff000;
+       for (i = 0; i < BNXT_PTP_TX_REGS; i++)
+               ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
+
+       return 0;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+       writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
+       writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
+}
+
+static u64 bnxt_cc_read(const struct cyclecounter *cc)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
+       struct bnxt *bp = ptp->bp;
+       u64 ns;
+
+       ns = readl(bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME);
+       ns |= (u64)readl(bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME + 4) << 32;
+       return ns;
+}
+
+int bnxt_get_tx_ts(struct bnxt *bp, u64 *ts)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u32 fifo;
+
+       fifo = readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]);
+       if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
+               return -EAGAIN;
+
+       fifo = readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]);
+       *ts = readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]);
+       *ts |= (u64)readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H]) <<
+              32;
+       readl(bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]);
+       return 0;
+}
+
+int bnxt_get_rx_ts(struct bnxt *bp, u64 *ts)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       struct bnxt_pf_info *pf = &bp->pf;
+       u16 port_id;
+       u32 fifo;
+
+       if (!ptp)
+               return -ENODEV;
+
+       fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]);
+       if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
+               return -EAGAIN;
+
+       port_id = pf->port_id;
+       writel(1 << port_id, bp->bar0 +
+              ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
+
+       fifo = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]);
+       if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
+               bnxt_clr_rx_ts(bp);
+               return -EBUSY;
+       }
+
+       *ts = readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]);
+       *ts |= (u64)readl(bp->bar0 + ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H]) <<
+              32;
+
+       return 0;
+}
+
+static const struct ptp_clock_info bnxt_ptp_caps = {
+        .owner          = THIS_MODULE,
+        .name           = "bnxt clock",
+        .max_adj        = BNXT_MAX_PHC_DRIFT,
+        .n_alarm        = 0,
+        .n_ext_ts       = 0,
+        .n_per_out      = 1,
+        .n_pins         = 0,
+        .pps            = 0,
+        .adjfreq        = bnxt_ptp_adjfreq,
+        .adjtime        = bnxt_ptp_adjtime,
+        .gettime64      = bnxt_ptp_gettime,
+        .settime64      = bnxt_ptp_settime,
+        .enable         = bnxt_ptp_enable,
+};
+
+int bnxt_ptp_init(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       int rc;
+
+       if (!ptp)
+               return 0;
+       rc = bnxt_map_ptp_regs(bp);
+       if (rc)
+               return rc;
+
+       atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = bnxt_cc_read;
+       ptp->cc.mask = CYCLECOUNTER_MASK(64);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
+
+       timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+       ptp->ptp_info = bnxt_ptp_caps;
+       ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
+       if (IS_ERR(ptp->ptp_clock))
+               ptp->ptp_clock = NULL;
+
+       return 0;
+}
+
+void bnxt_ptp_free(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+       if (!ptp)
+               return;
+
+       if (ptp->ptp_clock)
+               ptp_clock_unregister(ptp->ptp_clock);
+
+       ptp->ptp_clock = NULL;
+       bnxt_unmap_ptp_regs(bp);
+}
+
+#else
+
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+       return -EOPNOTSUPP;
+}
+
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+       return -EOPNOTSUPP;
+}
+
+int bnxt_ptp_init(struct bnxt *bp)
+{
+       return 0;
+}
+
+void bnxt_ptp_free(struct bnxt *bp)
+{
+       return;
+}
+
+#endif
diff --git a/ubuntu/bnxt/bnxt_ptp.h b/ubuntu/bnxt/bnxt_ptp.h
new file mode 100644 (file)
index 0000000..676a4a5
--- /dev/null
@@ -0,0 +1,70 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_PTP_H
+#define BNXT_PTP_H
+
+#define BNXT_MAX_PHC_DRIFT     31000000
+
+struct bnxt_ptp_cfg {
+#ifdef HAVE_IEEE1588_SUPPORT
+       struct ptp_clock_info   ptp_info;
+       struct ptp_clock        *ptp_clock;
+       struct cyclecounter     cc;
+       struct timecounter      tc;
+#endif
+       struct bnxt             *bp;
+       atomic_t                tx_avail;
+#define BNXT_MAX_TX_TS 1
+       u16                     rxctl;
+#define BNXT_PTP_MSG_SYNC                      (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ                 (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ                        (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP               (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP                 (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP                        (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP     (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE                  (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING                 (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT                        (1 << 13)
+#define BNXT_PTP_MSG_EVENTS            (BNXT_PTP_MSG_SYNC |            \
+                                        BNXT_PTP_MSG_DELAY_REQ |       \
+                                        BNXT_PTP_MSG_PDELAY_REQ |      \
+                                        BNXT_PTP_MSG_PDELAY_RESP)
+       u8                      tx_tstamp_en:1;
+       int                     rx_filter;
+
+#define BNXT_PTP_RX_TS_L       0
+#define BNXT_PTP_RX_TS_H       1
+#define BNXT_PTP_RX_SEQ                2
+#define BNXT_PTP_RX_FIFO       3
+#define BNXT_PTP_RX_FIFO_PENDING 0x1
+#define BNXT_PTP_RX_FIFO_ADV   4
+#define BNXT_PTP_RX_REGS       5
+
+#define BNXT_PTP_TX_TS_L       0
+#define BNXT_PTP_TX_TS_H       1
+#define BNXT_PTP_TX_SEQ                2
+#define BNXT_PTP_TX_FIFO       3
+#define BNXT_PTP_TX_FIFO_EMPTY  0x2
+#define BNXT_PTP_TX_REGS       4
+       u32                     rx_regs[BNXT_PTP_RX_REGS];
+       u32                     rx_mapped_regs[BNXT_PTP_RX_REGS];
+       u32                     tx_regs[BNXT_PTP_TX_REGS];
+       u32                     tx_mapped_regs[BNXT_PTP_TX_REGS];
+};
+
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_get_tx_ts(struct bnxt *bp, u64 *ts);
+int bnxt_get_rx_ts(struct bnxt *bp, u64 *ts);
+int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_free(struct bnxt *bp);
+
+#endif
diff --git a/ubuntu/bnxt/bnxt_sriov.c b/ubuntu/bnxt/bnxt_sriov.c
new file mode 100644 (file)
index 0000000..d6e31d9
--- /dev/null
@@ -0,0 +1,1025 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+                                         struct bnxt_vf_info *vf,
+                                         u16 event_id)
+{
+       int rc = 0;
+       struct hwrm_fwd_async_event_cmpl_input req = {0};
+       struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_async_event_cmpl *async_cmpl;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+       if (vf)
+               req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+       else
+               /* broadcast this async event to all VFs */
+               req.encap_async_event_target_id = cpu_to_le16(0xffff);
+       async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+       async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+       async_cmpl->event_id = cpu_to_le16(event_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+                          rc);
+               goto fwd_async_event_cmpl_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_async_event_cmpl_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+#ifdef HAVE_NDO_GET_VF_CONFIG
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+       if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+               netdev_err(bp->dev, "vf ndo called though PF is down\n");
+               return -EINVAL;
+       }
+       if (!bp->pf.active_vfs) {
+               netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+               return -EINVAL;
+       }
+       if (vf_id >= bp->pf.max_vfs) {
+               netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+#ifdef HAVE_VF_SPOOFCHK
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       bool old_setting = false;
+       u32 func_flags;
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10701)
+               return -ENOTSUPP;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       vf = &bp->pf.vf[vf_id];
+       if (vf->flags & BNXT_VF_SPOOFCHK)
+               old_setting = true;
+       if (old_setting == setting)
+               return 0;
+
+       if (setting)
+               func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+       else
+               func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+       /*TODO: if the driver supports VLAN filter on guest VLAN,
+        * the spoof check should also include vlan anti-spoofing
+        */
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(func_flags);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               vf->func_flags = func_flags;
+               if (setting)
+                       vf->flags |= BNXT_VF_SPOOFCHK;
+               else
+                       vf->flags &= ~BNXT_VF_SPOOFCHK;
+       }
+       return rc;
+}
+#endif
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+                      struct ifla_vf_info *ivi)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       ivi->vf = vf_id;
+       vf = &bp->pf.vf[vf_id];
+
+       memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+#ifdef HAVE_IFLA_TX_RATE
+       ivi->max_tx_rate = vf->max_tx_rate;
+       ivi->min_tx_rate = vf->min_tx_rate;
+#else
+       ivi->tx_rate = vf->max_tx_rate;
+#endif
+       ivi->vlan = vf->vlan;
+       if (vf->flags & BNXT_VF_QOS)
+               ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
+       else
+               ivi->qos = 0;
+#ifdef HAVE_VF_SPOOFCHK
+       ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
+#endif
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       if (!(vf->flags & BNXT_VF_LINK_FORCED))
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->flags & BNXT_VF_LINK_UP)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+#endif
+
+       return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+       /* reject bc or mc mac addr, zero mac addr means allow
+        * VF to use its own mac addr
+        */
+       if (is_multicast_ether_addr(mac)) {
+               netdev_err(dev, "Invalid VF ethernet address\n");
+               return -EINVAL;
+       }
+       vf = &bp->pf.vf[vf_id];
+
+       memcpy(vf->mac_addr, mac, ETH_ALEN);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(vf->func_flags);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+       memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef NEW_NDO_SET_VF_VLAN
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
+                    __be16 vlan_proto)
+#else
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+#endif
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       u16 vlan_tag;
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10201)
+               return -ENOTSUPP;
+
+#ifdef NEW_NDO_SET_VF_VLAN
+       if (vlan_proto != htons(ETH_P_8021Q))
+               return -EPROTONOSUPPORT;
+#endif
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       /* TODO: needed to implement proper handling of user priority,
+        * currently fail the command if there is valid priority
+        */
+       if (vlan_id > 4095 || qos)
+               return -EINVAL;
+
+       vf = &bp->pf.vf[vf_id];
+       vlan_tag = vlan_id;
+       if (vlan_tag == vf->vlan)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(vf->func_flags);
+       req.dflt_vlan = cpu_to_le16(vlan_tag);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               vf->vlan = vlan_tag;
+       return rc;
+}
+
+#ifdef HAVE_IFLA_TX_RATE
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+                  int max_tx_rate)
+#else
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int max_tx_rate)
+#endif
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       u32 pf_link_speed;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       vf = &bp->pf.vf[vf_id];
+       pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+       if (max_tx_rate > pf_link_speed) {
+               netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+                           max_tx_rate, vf_id);
+               return -EINVAL;
+       }
+
+#ifdef HAVE_IFLA_TX_RATE
+       if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+               netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+                           min_tx_rate, vf_id);
+               return -EINVAL;
+       }
+       if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+               return 0;
+#else
+       if (max_tx_rate == vf->max_tx_rate)
+               return 0;
+#endif
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(vf->fw_fid);
+       req.flags = cpu_to_le32(vf->func_flags);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+       req.max_bw = cpu_to_le32(max_tx_rate);
+#ifdef HAVE_IFLA_TX_RATE
+       req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+       req.min_bw = cpu_to_le32(min_tx_rate);
+#endif
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+#ifdef HAVE_IFLA_TX_RATE
+               vf->min_tx_rate = min_tx_rate;
+#endif
+               vf->max_tx_rate = max_tx_rate;
+       }
+       return rc;
+}
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_vf_info *vf;
+       int rc;
+
+       rc = bnxt_vf_ndo_prep(bp, vf_id);
+       if (rc)
+               return rc;
+
+       vf = &bp->pf.vf[vf_id];
+
+       vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+       switch (link) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               vf->flags |= BNXT_VF_LINK_UP;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               vf->flags |= BNXT_VF_LINK_FORCED;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+               break;
+       default:
+               netdev_err(bp->dev, "Invalid link option\n");
+               rc = -EINVAL;
+               break;
+       }
+       if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+               rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
+                       ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
+       return rc;
+}
+#endif
+#endif
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+       int i;
+       struct bnxt_vf_info *vf;
+
+       for (i = 0; i < num_vfs; i++) {
+               vf = &bp->pf.vf[i];
+               memset(vf, 0, sizeof(*vf));
+       }
+       return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
+{
+       int i, rc = 0;
+       struct bnxt_pf_info *pf = &bp->pf;
+       struct hwrm_func_vf_resc_free_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
+               req.vf_id = cpu_to_le16(i);
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+       struct pci_dev *pdev = bp->pdev;
+       int i;
+
+       kfree(bp->pf.vf_event_bmap);
+       bp->pf.vf_event_bmap = NULL;
+
+       for (i = 0; i < 4; i++) {
+               if (bp->pf.hwrm_cmd_req_addr[i]) {
+                       dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+                                         bp->pf.hwrm_cmd_req_addr[i],
+                                         bp->pf.hwrm_cmd_req_dma_addr[i]);
+                       bp->pf.hwrm_cmd_req_addr[i] = NULL;
+               }
+       }
+
+       kfree(bp->pf.vf);
+       bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+       struct pci_dev *pdev = bp->pdev;
+       u32 nr_pages, size, i, j, k = 0;
+
+       bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+       if (!bp->pf.vf)
+               return -ENOMEM;
+
+       bnxt_set_vf_attr(bp, num_vfs);
+
+       size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+       nr_pages = size / BNXT_PAGE_SIZE;
+       if (size & (BNXT_PAGE_SIZE - 1))
+               nr_pages++;
+
+       for (i = 0; i < nr_pages; i++) {
+               bp->pf.hwrm_cmd_req_addr[i] =
+                       dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+                                          &bp->pf.hwrm_cmd_req_dma_addr[i],
+                                          GFP_KERNEL);
+
+               if (!bp->pf.hwrm_cmd_req_addr[i])
+                       return -ENOMEM;
+
+               for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+                       struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+                       vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+                                               j * BNXT_HWRM_REQ_MAX_SIZE;
+                       vf->hwrm_cmd_req_dma_addr =
+                               bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+                               BNXT_HWRM_REQ_MAX_SIZE;
+                       k++;
+               }
+       }
+
+       /* Max 128 VF's */
+       bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+       if (!bp->pf.vf_event_bmap)
+               return -ENOMEM;
+
+       bp->pf.hwrm_cmd_req_pages = nr_pages;
+       return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+       struct hwrm_func_buf_rgtr_input req = {0};
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+       req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+       req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+       req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+       req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+       req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+       req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+       req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
+{
+       u32 rc = 0, mtu, i;
+       u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+       u16 vf_ring_grps, max_stat_ctxs;
+       struct hwrm_func_cfg_input req = {0};
+       struct bnxt_pf_info *pf = &bp->pf;
+       int total_vf_tx_rings = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+       max_stat_ctxs = pf->max_stat_ctxs;
+
+       /* Remaining rings are distributed equally amongs VF's for now */
+       vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+       vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+       if (bp->flags & BNXT_FLAG_AGG_RINGS)
+               vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+                             num_vfs;
+       else
+               vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
+       vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+       vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+       vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+       vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+                                 FUNC_CFG_REQ_ENABLES_MRU |
+                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
+                                 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
+
+       mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       req.mru = cpu_to_le16(mtu);
+       req.mtu = cpu_to_le16(mtu);
+
+       req.num_rsscos_ctxs = cpu_to_le16(1);
+       req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+       req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+       req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+       req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+       req.num_l2_ctxs = cpu_to_le16(4);
+
+       req.num_vnics = cpu_to_le16(vf_vnics);
+       /* FIXME spec currently uses 1 bit for stats ctx */
+       req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       for (i = 0; i < num_vfs; i++) {
+               int vf_tx_rsvd = vf_tx_rings;
+
+               req.fid = cpu_to_le16(pf->first_vf_id + i);
+               rc = _hwrm_send_message(bp, &req, sizeof(req),
+                                       HWRM_CMD_TIMEOUT);
+               if (rc)
+                       break;
+               pf->active_vfs = i + 1;
+               pf->vf[i].fw_fid = le16_to_cpu(req.fid);
+               rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
+                                             &vf_tx_rsvd);
+               if (rc)
+                       break;
+               total_vf_tx_rings += vf_tx_rsvd;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       if (!rc) {
+               pf->max_tx_rings -= total_vf_tx_rings;
+               pf->max_rx_rings -= vf_rx_rings * num_vfs;
+               pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+               pf->max_cp_rings -= vf_cp_rings * num_vfs;
+               pf->max_rsscos_ctxs -= num_vfs;
+               pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+               pf->max_vnics -= vf_vnics * num_vfs;
+       }
+       return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+       int rc = 0, vfs_supported;
+       int min_rx_rings, min_tx_rings, min_rss_ctxs;
+       int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+       /* Check if we can enable requested num of vf's. At a mininum
+        * we require 1 RX 1 TX rings for each VF. In this minimum conf
+        * features like TPA will not be available.
+        */
+       vfs_supported = *num_vfs;
+
+       while (vfs_supported) {
+               min_rx_rings = vfs_supported;
+               min_tx_rings = vfs_supported;
+               min_rss_ctxs = vfs_supported;
+
+               if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+                       if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+                           min_rx_rings)
+                               rx_ok = 1;
+               } else {
+                       if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+                           min_rx_rings)
+                               rx_ok = 1;
+               }
+               if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+                       rx_ok = 0;
+
+               if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+                       tx_ok = 1;
+
+               if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+                       rss_ok = 1;
+
+               if (tx_ok && rx_ok && rss_ok)
+                       break;
+
+               vfs_supported--;
+       }
+
+       if (!vfs_supported) {
+               netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+               return -EINVAL;
+       }
+
+       if (vfs_supported != *num_vfs) {
+               netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+                           *num_vfs, vfs_supported);
+               *num_vfs = vfs_supported;
+       }
+
+       rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+       if (rc)
+               goto err_out1;
+
+       /* Reserve resources for VFs */
+       rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
+       if (rc)
+               goto err_out2;
+
+       /* Register buffers for VFs */
+       rc = bnxt_hwrm_func_buf_rgtr(bp);
+       if (rc)
+               goto err_out2;
+
+       bnxt_ulp_sriov_cfg(bp, *num_vfs);
+
+       rc = pci_enable_sriov(bp->pdev, *num_vfs);
+       if (rc)
+               goto err_out2;
+
+       return 0;
+
+err_out2:
+       /* Free the resources reserved for various VF's */
+       bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+
+err_out1:
+       bnxt_free_vf_resources(bp);
+
+       return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+       u16 num_vfs = pci_num_vf(bp->pdev);
+
+       if (!num_vfs)
+               return;
+
+       if (pci_vfs_assigned(bp->pdev)) {
+               bnxt_hwrm_fwd_async_event_cmpl(
+                       bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
+               netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
+                           num_vfs);
+       } else {
+               pci_disable_sriov(bp->pdev);
+               /* Free the HW resources reserved for various VF's */
+               bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
+       }
+
+       bnxt_free_vf_resources(bp);
+
+       bp->pf.active_vfs = 0;
+       /* Reclaim all resources for the PF. */
+       rtnl_lock();
+       bnxt_restore_pf_fw_resources(bp);
+       rtnl_unlock();
+
+       bnxt_ulp_sriov_cfg(bp, 0);
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+               netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+               return 0;
+       }
+
+       rtnl_lock();
+       if (!netif_running(dev)) {
+               netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+               rtnl_unlock();
+               return 0;
+       }
+       bp->sriov_cfg = true;
+       rtnl_unlock();
+
+       if (pci_vfs_assigned(bp->pdev)) {
+               netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
+               num_vfs = 0;
+               goto sriov_cfg_exit;
+       }
+
+       /* Check if enabled VFs is same as requested */
+       if (num_vfs && num_vfs == bp->pf.active_vfs)
+               goto sriov_cfg_exit;
+
+       /* if there are previous existing VFs, clean them up */
+       bnxt_sriov_disable(bp);
+       if (!num_vfs)
+               goto sriov_cfg_exit;
+
+       bnxt_sriov_enable(bp, &num_vfs);
+
+sriov_cfg_exit:
+       bp->sriov_cfg = false;
+       wake_up(&bp->sriov_cfg_wait);
+
+       return num_vfs;
+}
+
+#ifndef PCIE_SRIOV_CONFIGURE
+
+static struct workqueue_struct *bnxt_iov_wq;
+
+void bnxt_sriov_init(unsigned int num_vfs)
+{
+       if (num_vfs)
+               bnxt_iov_wq = create_singlethread_workqueue("bnxt_iov_wq");
+}
+
+void bnxt_sriov_exit(void)
+{
+       if (bnxt_iov_wq)
+               destroy_workqueue(bnxt_iov_wq);
+       bnxt_iov_wq = NULL;
+}
+
+static void bnxt_iov_task(struct work_struct *work)
+{
+       struct bnxt *bp;
+
+       bp = container_of(work, struct bnxt, iov_task);
+       bnxt_sriov_configure(bp->pdev, bp->req_vfs);
+}
+
+void bnxt_start_sriov(struct bnxt *bp, int num_vfs)
+{
+       int pos, req_vfs;
+
+       if (!num_vfs || !BNXT_PF(bp))
+               return;
+
+       pos = pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV);
+       if (!pos) {
+               return;
+       } else {
+               u16 t_vf = 0;
+
+               pci_read_config_word(bp->pdev, pos + PCI_SRIOV_TOTAL_VF, &t_vf);
+               req_vfs = min_t(int, num_vfs, (int)t_vf);
+       }
+
+       if (!bnxt_iov_wq) {
+               netdev_warn(bp->dev, "Work queue not available to start SRIOV\n");
+               return;
+       }
+       bp->req_vfs = req_vfs;
+       INIT_WORK(&bp->iov_task, bnxt_iov_task);
+       queue_work(bnxt_iov_wq, &bp->iov_task);
+}
+#endif
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+                             void *encap_resp, __le64 encap_resp_addr,
+                             __le16 encap_resp_cpr, u32 msg_size)
+{
+       int rc = 0;
+       struct hwrm_fwd_resp_input req = {0};
+       struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+       /* Set the new target id */
+       req.target_id = cpu_to_le16(vf->fw_fid);
+       req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+       req.encap_resp_len = cpu_to_le16(msg_size);
+       req.encap_resp_addr = encap_resp_addr;
+       req.encap_resp_cmpl_ring = encap_resp_cpr;
+       memcpy(req.encap_resp, encap_resp, msg_size);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+               goto fwd_resp_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_resp_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+                                 u32 msg_size)
+{
+       int rc = 0;
+       struct hwrm_reject_fwd_resp_input req = {0};
+       struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+       /* Set the new target id */
+       req.target_id = cpu_to_le16(vf->fw_fid);
+       req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+       memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+               goto fwd_err_resp_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_err_resp_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+                                  u32 msg_size)
+{
+       int rc = 0;
+       struct hwrm_exec_fwd_resp_input req = {0};
+       struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+       /* Set the new target id */
+       req.target_id = cpu_to_le16(vf->fw_fid);
+       req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
+       memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+               goto exec_fwd_resp_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+exec_fwd_resp_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+       u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+       struct hwrm_cfa_l2_filter_alloc_input *req =
+               (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+       if (!is_valid_ether_addr(vf->mac_addr) ||
+           ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+               return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+       else
+               return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+       int rc = 0;
+
+       if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+               /* real link */
+               rc = bnxt_hwrm_exec_fwd_resp(
+                       bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+       } else {
+               struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+               struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+               phy_qcfg_req =
+               (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+               mutex_lock(&bp->hwrm_cmd_lock);
+               memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+                      sizeof(phy_qcfg_resp));
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+               if (vf->flags & BNXT_VF_LINK_UP) {
+                       /* if physical link is down, force link up on VF */
+                       if (phy_qcfg_resp.link !=
+                           PORT_PHY_QCFG_RESP_LINK_LINK) {
+                               phy_qcfg_resp.link =
+                                       PORT_PHY_QCFG_RESP_LINK_LINK;
+                               phy_qcfg_resp.link_speed = cpu_to_le16(
+                                       PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
+                               phy_qcfg_resp.duplex_cfg =
+                                       PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
+                               phy_qcfg_resp.duplex_state =
+                                       PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
+                               phy_qcfg_resp.pause =
+                                       (PORT_PHY_QCFG_RESP_PAUSE_TX |
+                                        PORT_PHY_QCFG_RESP_PAUSE_RX);
+                       }
+               } else {
+                       /* force link down */
+                       phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+                       phy_qcfg_resp.link_speed = 0;
+                       phy_qcfg_resp.duplex_state =
+                               PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
+                       phy_qcfg_resp.pause = 0;
+               }
+               rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+                                       phy_qcfg_req->resp_addr,
+                                       phy_qcfg_req->cmpl_ring,
+                                       sizeof(phy_qcfg_resp));
+       }
+       return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+       int rc = 0;
+       struct input *encap_req = vf->hwrm_cmd_req_addr;
+       u32 req_type = le16_to_cpu(encap_req->req_type);
+
+       switch (req_type) {
+       case HWRM_CFA_L2_FILTER_ALLOC:
+               rc = bnxt_vf_validate_set_mac(bp, vf);
+               break;
+       case HWRM_FUNC_CFG:
+               /* TODO Validate if VF is allowed to change mac address,
+                * mtu, num of rings etc
+                */
+               rc = bnxt_hwrm_exec_fwd_resp(
+                       bp, vf, sizeof(struct hwrm_func_cfg_input));
+               break;
+       case HWRM_PORT_PHY_QCFG:
+               rc = bnxt_vf_set_link(bp, vf);
+               break;
+       default:
+               break;
+       }
+       return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+       u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+       /* Scan through VF's and process commands */
+       while (1) {
+               vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+               if (vf_id >= active_vfs)
+                       break;
+
+               clear_bit(vf_id, bp->pf.vf_event_bmap);
+               bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+               i = vf_id + 1;
+       }
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+       struct hwrm_func_qcaps_input req = {0};
+       struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+               goto update_vf_mac_exit;
+
+       /* Store MAC address from the firmware.  There are 2 cases:
+        * 1. MAC address is valid.  It is assigned from the PF and we
+        *    need to override the current VF MAC address with it.
+        * 2. MAC address is zero.  The VF will use a random MAC address by
+        *    default but the stored zero MAC will allow the VF user to change
+        *    the random MAC address using ndo_set_mac_address() if he wants.
+        */
+       if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
+               memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
+
+       /* overwrite netdev dev_addr with admin VF MAC */
+       if (is_valid_ether_addr(bp->vf.mac_addr))
+               memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+       struct hwrm_func_vf_cfg_input req = {0};
+       int rc = 0;
+
+       if (!BNXT_VF(bp))
+               return 0;
+
+       if (bp->hwrm_spec_code < 0x10202) {
+               if (is_valid_ether_addr(bp->vf.mac_addr))
+                       rc = -EADDRNOTAVAIL;
+               goto mac_done;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+       req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+       memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+       if (rc) {
+               rc = -EADDRNOTAVAIL;
+               netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+                           mac);
+       }
+       return rc;
+}
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+       netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+       return 0;
+}
+#endif
diff --git a/ubuntu/bnxt/bnxt_sriov.h b/ubuntu/bnxt/bnxt_sriov.h
new file mode 100644 (file)
index 0000000..ae56212
--- /dev/null
@@ -0,0 +1,44 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+#ifdef HAVE_NDO_GET_VF_CONFIG
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+#ifdef NEW_NDO_SET_VF_VLAN
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
+#else
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+#endif
+#ifdef HAVE_IFLA_TX_RATE
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+#else
+int bnxt_set_vf_bw(struct net_device *, int, int);
+#endif
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+#endif
+#ifdef HAVE_VF_SPOOFCHK
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+#endif
+#endif
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+#ifndef PCIE_SRIOV_CONFIGURE
+void bnxt_start_sriov(struct bnxt *, int);
+void bnxt_sriov_init(unsigned int);
+void bnxt_sriov_exit(void);
+#endif
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+int bnxt_approve_mac(struct bnxt *, u8 *);
+#endif
diff --git a/ubuntu/bnxt/bnxt_ulp.c b/ubuntu/bnxt/bnxt_ulp.c
new file mode 100644 (file)
index 0000000..6c8eb79
--- /dev/null
@@ -0,0 +1,348 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+
+static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+                            struct bnxt_ulp_ops *ulp_ops, void *handle)
+{
+       struct net_device *dev = edev->net;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ulp *ulp;
+
+       ASSERT_RTNL();
+       if (ulp_id >= BNXT_MAX_ULP)
+               return -EINVAL;
+
+       ulp = &edev->ulp_tbl[ulp_id];
+       if (rcu_access_pointer(ulp->ulp_ops)) {
+               netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
+               return -EBUSY;
+       }
+       if (ulp_id == BNXT_ROCE_ULP) {
+               unsigned int max_stat_ctxs;
+
+               max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+               if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+                   bp->num_stat_ctxs == max_stat_ctxs)
+                       return -ENOMEM;
+               bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
+                                           BNXT_MIN_ROCE_STAT_CTXS);
+       }
+
+       atomic_set(&ulp->ref_count, 0);
+       ulp->handle = handle;
+       rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+       if (ulp_id == BNXT_ROCE_ULP) {
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       bnxt_hwrm_vnic_cfg(bp, 0);
+       }
+
+       return 0;
+}
+
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+{
+       struct net_device *dev = edev->net;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ulp *ulp;
+       int i = 0;
+
+       ASSERT_RTNL();
+       if (ulp_id >= BNXT_MAX_ULP)
+               return -EINVAL;
+
+       ulp = &edev->ulp_tbl[ulp_id];
+       if (!rcu_access_pointer(ulp->ulp_ops)) {
+               netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
+               return -EINVAL;
+       }
+       if (ulp_id == BNXT_ROCE_ULP) {
+               unsigned int max_stat_ctxs;
+
+               max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+               bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+       }
+       if (ulp->max_async_event_id)
+               bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+
+       RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+       synchronize_rcu();
+       ulp->max_async_event_id = 0;
+       ulp->async_events_bmap = NULL;
+       while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+               msleep(100);
+               i++;
+       }
+       return 0;
+}
+
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+                             struct bnxt_msix_entry *ent, int num_msix)
+{
+       struct net_device *dev = edev->net;
+       struct bnxt *bp = netdev_priv(dev);
+       int max_idx, max_cp_rings;
+       int avail_msix, i, idx;
+
+       ASSERT_RTNL();
+       if (ulp_id != BNXT_ROCE_ULP)
+               return -EINVAL;
+
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+               return -ENODEV;
+
+       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+       max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+       avail_msix = max_idx - bp->cp_nr_rings;
+       if (!avail_msix)
+               return -ENOMEM;
+       if (avail_msix > num_msix)
+               avail_msix = num_msix;
+
+       idx = max_idx - avail_msix;
+       for (i = 0; i < avail_msix; i++) {
+               ent[i].vector = bp->irq_tbl[idx + i].vector;
+               ent[i].ring_idx = idx + i;
+               ent[i].db_offset = (idx + i) * 0x80;
+       }
+       bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
+       bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+       edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+       return avail_msix;
+}
+
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+{
+       struct net_device *dev = edev->net;
+       struct bnxt *bp = netdev_priv(dev);
+       int max_cp_rings, msix_requested;
+
+       ASSERT_RTNL();
+       if (ulp_id != BNXT_ROCE_ULP)
+               return -EINVAL;
+
+       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+       msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
+       bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
+       edev->ulp_tbl[ulp_id].msix_requested = 0;
+       bnxt_set_max_func_irqs(bp, bp->total_irqs);
+       return 0;
+}
+
+void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
+{
+       ASSERT_RTNL();
+       if (bnxt_ulp_registered(bp->edev, ulp_id)) {
+               struct bnxt_en_dev *edev = bp->edev;
+               unsigned int msix_req, max;
+
+               msix_req = edev->ulp_tbl[ulp_id].msix_requested;
+               max = bnxt_get_max_func_cp_rings(bp);
+               bnxt_set_max_func_cp_rings(bp, max - msix_req);
+               max = bnxt_get_max_func_stat_ctxs(bp);
+               bnxt_set_max_func_stat_ctxs(bp, max - 1);
+       }
+}
+
+static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+                        struct bnxt_fw_msg *fw_msg)
+{
+       struct net_device *dev = edev->net;
+       struct bnxt *bp = netdev_priv(dev);
+       struct input *req;
+       int rc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       req = fw_msg->msg;
+       req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+       rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
+                               fw_msg->timeout);
+       if (!rc) {
+               struct output *resp = bp->hwrm_cmd_resp_addr;
+               u32 len = le16_to_cpu(resp->resp_len);
+
+               if (fw_msg->resp_max_len < len)
+                       len = fw_msg->resp_max_len;
+
+               memcpy(fw_msg->resp, resp, len);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static void bnxt_ulp_get(struct bnxt_ulp *ulp)
+{
+       atomic_inc(&ulp->ref_count);
+}
+
+static void bnxt_ulp_put(struct bnxt_ulp *ulp)
+{
+       atomic_dec(&ulp->ref_count);
+}
+
+void bnxt_ulp_stop(struct bnxt *bp)
+{
+       struct bnxt_en_dev *edev = bp->edev;
+       struct bnxt_ulp_ops *ops;
+       int i;
+
+       if (!edev)
+               return;
+
+       for (i = 0; i < BNXT_MAX_ULP; i++) {
+               struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+               ops = rtnl_dereference(ulp->ulp_ops);
+               if (!ops || !ops->ulp_stop)
+                       continue;
+               ops->ulp_stop(ulp->handle);
+       }
+}
+
+void bnxt_ulp_start(struct bnxt *bp)
+{
+       struct bnxt_en_dev *edev = bp->edev;
+       struct bnxt_ulp_ops *ops;
+       int i;
+
+       if (!edev)
+               return;
+
+       for (i = 0; i < BNXT_MAX_ULP; i++) {
+               struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+               ops = rtnl_dereference(ulp->ulp_ops);
+               if (!ops || !ops->ulp_start)
+                       continue;
+               ops->ulp_start(ulp->handle);
+       }
+}
+
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+       struct bnxt_en_dev *edev = bp->edev;
+       struct bnxt_ulp_ops *ops;
+       int i;
+
+       if (!edev)
+               return;
+
+       for (i = 0; i < BNXT_MAX_ULP; i++) {
+               struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+               rcu_read_lock();
+               ops = rcu_dereference(ulp->ulp_ops);
+               if (!ops || !ops->ulp_sriov_config) {
+                       rcu_read_unlock();
+                       continue;
+               }
+               bnxt_ulp_get(ulp);
+               rcu_read_unlock();
+               ops->ulp_sriov_config(ulp->handle, num_vfs);
+               bnxt_ulp_put(ulp);
+       }
+}
+
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+       u16 event_id = le16_to_cpu(cmpl->event_id);
+       struct bnxt_en_dev *edev = bp->edev;
+       struct bnxt_ulp_ops *ops;
+       int i;
+
+       if (!edev)
+               return;
+
+       rcu_read_lock();
+       for (i = 0; i < BNXT_MAX_ULP; i++) {
+               struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+               ops = rcu_dereference(ulp->ulp_ops);
+               if (!ops || !ops->ulp_async_notifier)
+                       continue;
+               if (!ulp->async_events_bmap ||
+                   event_id > ulp->max_async_event_id)
+                       continue;
+
+               /* Read max_async_event_id first before testing the bitmap. */
+               smp_rmb();
+               if (test_bit(event_id, ulp->async_events_bmap))
+                       ops->ulp_async_notifier(ulp->handle, cmpl);
+       }
+       rcu_read_unlock();
+}
+
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+                                     unsigned long *events_bmap, u16 max_id)
+{
+       struct net_device *dev = edev->net;
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ulp *ulp;
+
+       if (ulp_id >= BNXT_MAX_ULP)
+               return -EINVAL;
+
+       ulp = &edev->ulp_tbl[ulp_id];
+       ulp->async_events_bmap = events_bmap;
+       /* Make sure bnxt_ulp_async_events() sees this order */
+       smp_wmb();
+       ulp->max_async_event_id = max_id;
+       bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
+       return 0;
+}
+
+static const struct bnxt_en_ops bnxt_en_ops_tbl = {
+       .bnxt_register_device   = bnxt_register_dev,
+       .bnxt_unregister_device = bnxt_unregister_dev,
+       .bnxt_request_msix      = bnxt_req_msix_vecs,
+       .bnxt_free_msix         = bnxt_free_msix_vecs,
+       .bnxt_send_fw_msg       = bnxt_send_msg,
+       .bnxt_register_fw_async_events  = bnxt_register_async_events,
+};
+
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_en_dev *edev;
+
+       edev = bp->edev;
+       if (!edev) {
+               edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+               if (!edev)
+                       return ERR_PTR(-ENOMEM);
+               edev->en_ops = &bnxt_en_ops_tbl;
+               if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+                       edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+               if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+                       edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
+               edev->net = dev;
+               edev->pdev = bp->pdev;
+               bp->edev = edev;
+       }
+       return bp->edev;
+}
diff --git a/ubuntu/bnxt/bnxt_ulp.h b/ubuntu/bnxt/bnxt_ulp.h
new file mode 100644 (file)
index 0000000..ff3474a
--- /dev/null
@@ -0,0 +1,94 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ULP_H
+#define BNXT_ULP_H
+
+#include <linux/version.h>
+
+#define BNXT_ROCE_ULP  0
+#define BNXT_OTHER_ULP 1
+#define BNXT_MAX_ULP   2
+
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS        1
+
+struct hwrm_async_event_cmpl;
+struct bnxt;
+
+struct bnxt_ulp_ops {
+       /* async_notifier() cannot sleep (in BH context) */
+       void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+       void (*ulp_stop)(void *);
+       void (*ulp_start)(void *);
+       void (*ulp_sriov_config)(void *, int);
+};
+
+struct bnxt_msix_entry {
+       u32     vector;
+       u32     ring_idx;
+       u32     db_offset;
+};
+
+struct bnxt_fw_msg {
+       void    *msg;
+       int     msg_len;
+       void    *resp;
+       int     resp_max_len;
+       int     timeout;
+};
+
+struct bnxt_ulp {
+       void            *handle;
+       struct bnxt_ulp_ops __rcu *ulp_ops;
+       unsigned long   *async_events_bmap;
+       u16             max_async_event_id;
+       u16             msix_requested;
+       atomic_t        ref_count;
+};
+
+struct bnxt_en_dev {
+       struct net_device *net;
+       struct pci_dev *pdev;
+       u32 flags;
+       #define BNXT_EN_FLAG_ROCEV1_CAP         0x1
+       #define BNXT_EN_FLAG_ROCEV2_CAP         0x2
+       #define BNXT_EN_FLAG_ROCE_CAP           (BNXT_EN_FLAG_ROCEV1_CAP | \
+                                                BNXT_EN_FLAG_ROCEV2_CAP)
+       const struct bnxt_en_ops        *en_ops;
+       struct bnxt_ulp                 ulp_tbl[BNXT_MAX_ULP];
+};
+
+struct bnxt_en_ops {
+       int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+                                   struct bnxt_ulp_ops *, void *);
+       int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
+       int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+                                struct bnxt_msix_entry *, int);
+       int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
+       int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+                               struct bnxt_fw_msg *);
+       int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+                                            unsigned long *, u16);
+};
+
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+{
+       if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+               return true;
+       return false;
+}
+
+void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
+void bnxt_ulp_stop(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
+#endif
diff --git a/ubuntu/bnxt/bnxt_xdp.c b/ubuntu/bnxt/bnxt_xdp.c
new file mode 100644 (file)
index 0000000..9a9d80f
--- /dev/null
@@ -0,0 +1,252 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#ifdef HAVE_NDO_XDP
+#include <linux/bpf.h>
+#ifdef HAVE_BPF_TRACE
+#include <linux/bpf_trace.h>
+#endif
+#include <linux/filter.h>
+#endif
+#include "bnxt_compat.h"
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_xdp.h"
+
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                  dma_addr_t mapping, u32 len, u16 rx_prod)
+{
+       struct bnxt_sw_tx_bd *tx_buf;
+       struct tx_bd *txbd;
+       u32 flags;
+       u16 prod;
+
+       prod = txr->tx_prod;
+       tx_buf = &txr->tx_buf_ring[prod];
+       tx_buf->rx_prod = rx_prod;
+
+       txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+       flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
+               TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+       txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+       txbd->tx_bd_opaque = prod;
+       txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+       prod = NEXT_TX(prod);
+       txr->tx_prod = prod;
+}
+
+#ifdef HAVE_NDO_XDP
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+       struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       struct bnxt_sw_tx_bd *tx_buf;
+       u16 tx_cons = txr->tx_cons;
+       u16 last_tx_cons = tx_cons;
+       u16 rx_prod;
+       int i;
+
+       for (i = 0; i < nr_pkts; i++) {
+               last_tx_cons = tx_cons;
+               tx_cons = NEXT_TX(tx_cons);
+       }
+       txr->tx_cons = tx_cons;
+       if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
+               rx_prod = rxr->rx_prod;
+       } else {
+               tx_buf = &txr->tx_buf_ring[last_tx_cons];
+               rx_prod = tx_buf->rx_prod;
+       }
+       writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+}
+
+/* returns the following:
+ * true    - packet consumed by XDP and new buffer is allocated.
+ * false   - packet should be passed to the stack.
+ */
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+                struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+{
+       struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_sw_rx_bd *rx_buf;
+       struct pci_dev *pdev;
+       struct xdp_buff xdp;
+       dma_addr_t mapping;
+       void *orig_data;
+       u32 tx_avail;
+       u32 offset;
+       u32 act;
+
+       if (!xdp_prog)
+               return false;
+
+       pdev = bp->pdev;
+       txr = rxr->bnapi->tx_ring;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       offset = bp->rx_offset;
+
+#if XDP_PACKET_HEADROOM
+       xdp.data_hard_start = *data_ptr - offset;
+#endif
+       xdp.data = *data_ptr;
+       xdp.data_end = *data_ptr + *len;
+       orig_data = xdp.data;
+       mapping = rx_buf->mapping - bp->rx_dma_offset;
+
+       dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+       rcu_read_lock();
+       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       rcu_read_unlock();
+
+       tx_avail = bnxt_tx_avail(bp, txr);
+       /* If the tx ring is not full, we must not update the rx producer yet
+        * because we may still be transmitting on some BDs.
+        */
+       if (tx_avail != bp->tx_ring_size)
+               *event &= ~BNXT_RX_EVENT;
+
+       if (orig_data != xdp.data) {
+#if XDP_PACKET_HEADROOM
+               offset = xdp.data - xdp.data_hard_start;
+               *data_ptr = xdp.data_hard_start + offset;
+               *len = xdp.data_end - xdp.data;
+#endif
+       }
+       switch (act) {
+       case XDP_PASS:
+               return false;
+
+       case XDP_TX:
+               if (tx_avail < 1) {
+                       trace_xdp_exception(bp->dev, xdp_prog, act);
+                       bnxt_reuse_rx_data(rxr, cons, page);
+                       return true;
+               }
+
+               *event = BNXT_TX_EVENT;
+               dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
+                                          bp->rx_dir);
+               bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
+                             NEXT_RX(rxr->rx_prod));
+               bnxt_reuse_rx_data(rxr, cons, page);
+               return true;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               /* Fall thru */
+       case XDP_ABORTED:
+               trace_xdp_exception(bp->dev, xdp_prog, act);
+               /* Fall thru */
+       case XDP_DROP:
+               bnxt_reuse_rx_data(rxr, cons, page);
+               break;
+       }
+       return true;
+}
+
+/* Under rtnl_lock */
+static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+{
+       struct net_device *dev = bp->dev;
+       int tx_xdp = 0, rc, tc;
+       struct bpf_prog *old;
+
+       if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+               netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+                           bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
+               return -EOPNOTSUPP;
+       }
+       if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
+               netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
+               return -EOPNOTSUPP;
+       }
+       if (prog)
+               tx_xdp = bp->rx_nr_rings;
+
+       tc = netdev_get_num_tc(dev);
+       if (!tc)
+               tc = 1;
+       rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+                               true, tc, tx_xdp);
+       if (rc) {
+               netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
+               return rc;
+       }
+       if (netif_running(dev))
+               bnxt_close_nic(bp, true, false);
+
+       old = xchg(&bp->xdp_prog, prog);
+       if (old)
+               bpf_prog_put(old);
+
+       if (prog) {
+               bnxt_set_rx_skb_mode(bp, true);
+       } else {
+               int rx, tx;
+
+               bnxt_set_rx_skb_mode(bp, false);
+               bnxt_get_max_rings(bp, &rx, &tx, true);
+               if (rx > 1) {
+                       bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+                       bp->dev->hw_features |= NETIF_F_LRO;
+               }
+       }
+       bp->tx_nr_rings_xdp = tx_xdp;
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
+       bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+       bnxt_set_tpa_flags(bp);
+       bnxt_set_ring_params(bp);
+
+       if (netif_running(dev))
+               return bnxt_open_nic(bp, true, false);
+
+       return 0;
+}
+
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               rc = bnxt_xdp_set(bp, xdp->prog);
+               break;
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = !!bp->xdp_prog;
+#ifdef HAVE_IFLA_XDP_PROG_ID
+               xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
+#endif
+               rc = 0;
+               break;
+       default:
+               rc = -EINVAL;
+               break;
+       }
+       return rc;
+}
+#else
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+}
+
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+                void *page, u8 **data_ptr, unsigned int *len, u8 *event)
+{
+       return false;
+}
+#endif
diff --git a/ubuntu/bnxt/bnxt_xdp.h b/ubuntu/bnxt/bnxt_xdp.h
new file mode 100644 (file)
index 0000000..affdbe5
--- /dev/null
@@ -0,0 +1,27 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_XDP_H
+#define BNXT_XDP_H
+
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                  dma_addr_t mapping, u32 len, u16 rx_prod);
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+#ifdef HAVE_NDO_XDP
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+                struct page *page, u8 **data_ptr, unsigned int *len,
+                u8 *event);
+#else
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+                void *page, u8 **data_ptr, unsigned int *len,
+                u8 *event);
+#endif
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
+#endif