]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/core.c
mlxsw: pci: Add lag related resources to resources query
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
CommitLineData
93c1edb2
JP
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/device.h>
40#include <linux/export.h>
41#include <linux/err.h>
42#include <linux/if_link.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/u64_stats_sync.h>
46#include <linux/netdevice.h>
caf7297e 47#include <linux/completion.h>
4ec14b76
IS
48#include <linux/skbuff.h>
49#include <linux/etherdevice.h>
50#include <linux/types.h>
4ec14b76
IS
51#include <linux/string.h>
52#include <linux/gfp.h>
53#include <linux/random.h>
54#include <linux/jiffies.h>
55#include <linux/mutex.h>
56#include <linux/rcupdate.h>
57#include <linux/slab.h>
dd9bdb04 58#include <linux/workqueue.h>
4ec14b76 59#include <asm/byteorder.h>
c4745500 60#include <net/devlink.h>
b38a75d2 61#include <trace/events/devlink.h>
93c1edb2
JP
62
63#include "core.h"
64#include "item.h"
65#include "cmd.h"
66#include "port.h"
67#include "trap.h"
4ec14b76
IS
68#include "emad.h"
69#include "reg.h"
93c1edb2
JP
70
71static LIST_HEAD(mlxsw_core_driver_list);
72static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
73
74static const char mlxsw_core_driver_name[] = "mlxsw_core";
75
76static struct dentry *mlxsw_core_dbg_root;
77
dd9bdb04
JP
78static struct workqueue_struct *mlxsw_wq;
79
93c1edb2
JP
80struct mlxsw_core_pcpu_stats {
81 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
82 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
83 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
84 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
85 struct u64_stats_sync syncp;
86 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
87 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
88 u32 trap_rx_invalid;
89 u32 port_rx_invalid;
90};
91
92struct mlxsw_core {
93 struct mlxsw_driver *driver;
94 const struct mlxsw_bus *bus;
95 void *bus_priv;
96 const struct mlxsw_bus_info *bus_info;
97 struct list_head rx_listener_list;
4ec14b76
IS
98 struct list_head event_listener_list;
99 struct {
caf7297e
JP
100 atomic64_t tid;
101 struct list_head trans_list;
102 spinlock_t trans_list_lock; /* protects trans_list writes */
4ec14b76
IS
103 bool use_emad;
104 } emad;
93c1edb2
JP
105 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
106 struct dentry *dbg_dir;
107 struct {
108 struct debugfs_blob_wrapper vsd_blob;
109 struct debugfs_blob_wrapper psid_blob;
110 } dbg;
8060646a
JP
111 struct {
112 u8 *mapping; /* lag_id+port_index to local_port mapping */
113 } lag;
57d316ba 114 struct mlxsw_resources resources;
89309da3 115 struct mlxsw_hwmon *hwmon;
93c1edb2
JP
116 unsigned long driver_priv[0];
117 /* driver_priv has to be always the last item */
118};
119
b2f10571
JP
120void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
121{
122 return mlxsw_core->driver_priv;
123}
124EXPORT_SYMBOL(mlxsw_core_driver_priv);
125
93c1edb2
JP
126struct mlxsw_rx_listener_item {
127 struct list_head list;
128 struct mlxsw_rx_listener rxl;
129 void *priv;
130};
131
4ec14b76
IS
132struct mlxsw_event_listener_item {
133 struct list_head list;
134 struct mlxsw_event_listener el;
135 void *priv;
136};
137
138/******************
139 * EMAD processing
140 ******************/
141
142/* emad_eth_hdr_dmac
143 * Destination MAC in EMAD's Ethernet header.
144 * Must be set to 01:02:c9:00:00:01
145 */
146MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
147
148/* emad_eth_hdr_smac
149 * Source MAC in EMAD's Ethernet header.
150 * Must be set to 00:02:c9:01:02:03
151 */
152MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
153
154/* emad_eth_hdr_ethertype
155 * Ethertype in EMAD's Ethernet header.
156 * Must be set to 0x8932
157 */
158MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
159
160/* emad_eth_hdr_mlx_proto
161 * Mellanox protocol.
162 * Must be set to 0x0.
163 */
164MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
165
166/* emad_eth_hdr_ver
167 * Mellanox protocol version.
168 * Must be set to 0x0.
169 */
170MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
171
172/* emad_op_tlv_type
173 * Type of the TLV.
174 * Must be set to 0x1 (operation TLV).
175 */
176MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
177
178/* emad_op_tlv_len
179 * Length of the operation TLV in u32.
180 * Must be set to 0x4.
181 */
182MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
183
184/* emad_op_tlv_dr
185 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
186 * EMAD. DR TLV must follow.
187 *
188 * Note: Currently not supported and must not be set.
189 */
190MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
191
192/* emad_op_tlv_status
193 * Returned status in case of EMAD response. Must be set to 0 in case
194 * of EMAD request.
195 * 0x0 - success
196 * 0x1 - device is busy. Requester should retry
197 * 0x2 - Mellanox protocol version not supported
198 * 0x3 - unknown TLV
199 * 0x4 - register not supported
200 * 0x5 - operation class not supported
201 * 0x6 - EMAD method not supported
202 * 0x7 - bad parameter (e.g. port out of range)
203 * 0x8 - resource not available
204 * 0x9 - message receipt acknowledgment. Requester should retry
205 * 0x70 - internal error
206 */
207MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
208
209/* emad_op_tlv_register_id
210 * Register ID of register within register TLV.
211 */
212MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
213
214/* emad_op_tlv_r
215 * Response bit. Setting to 1 indicates Response, otherwise request.
216 */
217MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
218
219/* emad_op_tlv_method
220 * EMAD method type.
221 * 0x1 - query
222 * 0x2 - write
223 * 0x3 - send (currently not supported)
224 * 0x4 - event
225 */
226MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
227
228/* emad_op_tlv_class
229 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
230 */
231MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
232
233/* emad_op_tlv_tid
234 * EMAD transaction ID. Used for pairing request and response EMADs.
235 */
236MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
237
238/* emad_reg_tlv_type
239 * Type of the TLV.
240 * Must be set to 0x3 (register TLV).
241 */
242MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
243
244/* emad_reg_tlv_len
245 * Length of the operation TLV in u32.
246 */
247MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
248
249/* emad_end_tlv_type
250 * Type of the TLV.
251 * Must be set to 0x0 (end TLV).
252 */
253MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
254
255/* emad_end_tlv_len
256 * Length of the end TLV in u32.
257 * Must be set to 1.
258 */
259MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
260
261enum mlxsw_core_reg_access_type {
262 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
263 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
264};
265
266static inline const char *
267mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
268{
269 switch (type) {
270 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
271 return "query";
272 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
273 return "write";
274 }
275 BUG();
276}
277
278static void mlxsw_emad_pack_end_tlv(char *end_tlv)
279{
280 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
281 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
282}
283
284static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
285 const struct mlxsw_reg_info *reg,
286 char *payload)
287{
288 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
289 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
290 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
291}
292
293static void mlxsw_emad_pack_op_tlv(char *op_tlv,
294 const struct mlxsw_reg_info *reg,
295 enum mlxsw_core_reg_access_type type,
caf7297e 296 u64 tid)
4ec14b76
IS
297{
298 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
299 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
300 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
301 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
302 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
303 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
ef743fdd 304 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
4ec14b76
IS
305 mlxsw_emad_op_tlv_method_set(op_tlv,
306 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
307 else
308 mlxsw_emad_op_tlv_method_set(op_tlv,
309 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
310 mlxsw_emad_op_tlv_class_set(op_tlv,
311 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
caf7297e 312 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
4ec14b76
IS
313}
314
315static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
316{
317 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
318
319 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
320 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
321 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
322 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
323 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
324
325 skb_reset_mac_header(skb);
326
327 return 0;
328}
329
330static void mlxsw_emad_construct(struct sk_buff *skb,
331 const struct mlxsw_reg_info *reg,
332 char *payload,
333 enum mlxsw_core_reg_access_type type,
caf7297e 334 u64 tid)
4ec14b76
IS
335{
336 char *buf;
337
338 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
339 mlxsw_emad_pack_end_tlv(buf);
340
341 buf = skb_push(skb, reg->len + sizeof(u32));
342 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
343
344 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
caf7297e 345 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
4ec14b76
IS
346
347 mlxsw_emad_construct_eth_hdr(skb);
348}
349
350static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
351{
352 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
353}
354
355static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
356{
357 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
358 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
359}
360
361static char *mlxsw_emad_reg_payload(const char *op_tlv)
362{
363 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
364}
365
366static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
367{
368 char *op_tlv;
369
370 op_tlv = mlxsw_emad_op_tlv(skb);
371 return mlxsw_emad_op_tlv_tid_get(op_tlv);
372}
373
374static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
375{
376 char *op_tlv;
377
378 op_tlv = mlxsw_emad_op_tlv(skb);
ef743fdd 379 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
4ec14b76
IS
380}
381
caf7297e
JP
382static int mlxsw_emad_process_status(char *op_tlv,
383 enum mlxsw_emad_op_tlv_status *p_status)
4ec14b76 384{
caf7297e 385 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
4ec14b76 386
caf7297e 387 switch (*p_status) {
4ec14b76
IS
388 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
389 return 0;
390 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
391 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
4ec14b76
IS
392 return -EAGAIN;
393 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
394 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
395 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
396 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
397 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
398 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
399 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
400 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
401 default:
4ec14b76
IS
402 return -EIO;
403 }
404}
405
caf7297e
JP
406static int
407mlxsw_emad_process_status_skb(struct sk_buff *skb,
408 enum mlxsw_emad_op_tlv_status *p_status)
4ec14b76 409{
caf7297e
JP
410 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
411}
412
413struct mlxsw_reg_trans {
414 struct list_head list;
415 struct list_head bulk_list;
416 struct mlxsw_core *core;
417 struct sk_buff *tx_skb;
418 struct mlxsw_tx_info tx_info;
419 struct delayed_work timeout_dw;
420 unsigned int retries;
421 u64 tid;
422 struct completion completion;
423 atomic_t active;
424 mlxsw_reg_trans_cb_t *cb;
425 unsigned long cb_priv;
426 const struct mlxsw_reg_info *reg;
427 enum mlxsw_core_reg_access_type type;
428 int err;
429 enum mlxsw_emad_op_tlv_status emad_status;
430 struct rcu_head rcu;
431};
432
433#define MLXSW_EMAD_TIMEOUT_MS 200
434
435static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
436{
437 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
438
439 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
4ec14b76
IS
440}
441
442static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
caf7297e 443 struct mlxsw_reg_trans *trans)
4ec14b76 444{
caf7297e 445 struct sk_buff *skb;
4ec14b76
IS
446 int err;
447
caf7297e
JP
448 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
449 if (!skb)
450 return -ENOMEM;
451
b38a75d2
JP
452 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
453 skb->data + mlxsw_core->driver->txhdr_len,
454 skb->len - mlxsw_core->driver->txhdr_len);
455
caf7297e
JP
456 atomic_set(&trans->active, 1);
457 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
458 if (err) {
459 dev_kfree_skb(skb);
460 return err;
4ec14b76 461 }
caf7297e
JP
462 mlxsw_emad_trans_timeout_schedule(trans);
463 return 0;
464}
4ec14b76 465
caf7297e
JP
466static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
467{
468 struct mlxsw_core *mlxsw_core = trans->core;
469
470 dev_kfree_skb(trans->tx_skb);
471 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
472 list_del_rcu(&trans->list);
473 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
474 trans->err = err;
475 complete(&trans->completion);
476}
477
478static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
479 struct mlxsw_reg_trans *trans)
480{
481 int err;
4ec14b76 482
caf7297e
JP
483 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
484 trans->retries++;
485 err = mlxsw_emad_transmit(trans->core, trans);
486 if (err == 0)
487 return;
488 } else {
489 err = -EIO;
4ec14b76 490 }
caf7297e
JP
491 mlxsw_emad_trans_finish(trans, err);
492}
4ec14b76 493
caf7297e
JP
494static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
495{
496 struct mlxsw_reg_trans *trans = container_of(work,
497 struct mlxsw_reg_trans,
498 timeout_dw.work);
499
500 if (!atomic_dec_and_test(&trans->active))
501 return;
502
503 mlxsw_emad_transmit_retry(trans->core, trans);
4ec14b76
IS
504}
505
caf7297e
JP
506static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
507 struct mlxsw_reg_trans *trans,
508 struct sk_buff *skb)
509{
510 int err;
511
512 if (!atomic_dec_and_test(&trans->active))
513 return;
514
515 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
516 if (err == -EAGAIN) {
517 mlxsw_emad_transmit_retry(mlxsw_core, trans);
518 } else {
519 if (err == 0) {
520 char *op_tlv = mlxsw_emad_op_tlv(skb);
521
522 if (trans->cb)
523 trans->cb(mlxsw_core,
524 mlxsw_emad_reg_payload(op_tlv),
525 trans->reg->len, trans->cb_priv);
526 }
527 mlxsw_emad_trans_finish(trans, err);
528 }
529}
530
531/* called with rcu read lock held */
4ec14b76
IS
532static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
533 void *priv)
534{
535 struct mlxsw_core *mlxsw_core = priv;
caf7297e 536 struct mlxsw_reg_trans *trans;
4ec14b76 537
b38a75d2
JP
538 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
539 skb->data, skb->len);
540
caf7297e
JP
541 if (!mlxsw_emad_is_resp(skb))
542 goto free_skb;
543
544 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
545 if (mlxsw_emad_get_tid(skb) == trans->tid) {
546 mlxsw_emad_process_response(mlxsw_core, trans, skb);
547 break;
548 }
4ec14b76 549 }
caf7297e
JP
550
551free_skb:
552 dev_kfree_skb(skb);
4ec14b76
IS
553}
554
555static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
556 .func = mlxsw_emad_rx_listener_func,
557 .local_port = MLXSW_PORT_DONT_CARE,
558 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
559};
560
561static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
562{
563 char htgt_pl[MLXSW_REG_HTGT_LEN];
564 char hpkt_pl[MLXSW_REG_HPKT_LEN];
565 int err;
566
567 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
568 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
569 if (err)
570 return err;
571
572 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
4ec14b76
IS
573 MLXSW_TRAP_ID_ETHEMAD);
574 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
575}
576
577static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
578{
caf7297e 579 u64 tid;
4ec14b76
IS
580 int err;
581
582 /* Set the upper 32 bits of the transaction ID field to a random
583 * number. This allows us to discard EMADs addressed to other
584 * devices.
585 */
caf7297e
JP
586 get_random_bytes(&tid, 4);
587 tid <<= 32;
588 atomic64_set(&mlxsw_core->emad.tid, tid);
4ec14b76 589
caf7297e
JP
590 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
591 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
4ec14b76
IS
592
593 err = mlxsw_core_rx_listener_register(mlxsw_core,
594 &mlxsw_emad_rx_listener,
595 mlxsw_core);
596 if (err)
597 return err;
598
599 err = mlxsw_emad_traps_set(mlxsw_core);
600 if (err)
601 goto err_emad_trap_set;
602
603 mlxsw_core->emad.use_emad = true;
604
605 return 0;
606
607err_emad_trap_set:
608 mlxsw_core_rx_listener_unregister(mlxsw_core,
609 &mlxsw_emad_rx_listener,
610 mlxsw_core);
611 return err;
612}
613
614static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
615{
616 char hpkt_pl[MLXSW_REG_HPKT_LEN];
617
18ea5445 618 mlxsw_core->emad.use_emad = false;
4ec14b76 619 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
4ec14b76
IS
620 MLXSW_TRAP_ID_ETHEMAD);
621 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
622
623 mlxsw_core_rx_listener_unregister(mlxsw_core,
624 &mlxsw_emad_rx_listener,
625 mlxsw_core);
626}
627
628static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
629 u16 reg_len)
630{
631 struct sk_buff *skb;
632 u16 emad_len;
633
634 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
635 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
636 sizeof(u32) + mlxsw_core->driver->txhdr_len);
637 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
638 return NULL;
639
640 skb = netdev_alloc_skb(NULL, emad_len);
641 if (!skb)
642 return NULL;
643 memset(skb->data, 0, emad_len);
644 skb_reserve(skb, emad_len);
645
646 return skb;
647}
648
caf7297e
JP
649static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
650 const struct mlxsw_reg_info *reg,
651 char *payload,
652 enum mlxsw_core_reg_access_type type,
653 struct mlxsw_reg_trans *trans,
654 struct list_head *bulk_list,
655 mlxsw_reg_trans_cb_t *cb,
656 unsigned long cb_priv, u64 tid)
657{
658 struct sk_buff *skb;
659 int err;
660
661 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
662 trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
663 mlxsw_core_reg_access_type_str(type));
664
665 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
666 if (!skb)
667 return -ENOMEM;
668
669 list_add_tail(&trans->bulk_list, bulk_list);
670 trans->core = mlxsw_core;
671 trans->tx_skb = skb;
672 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
673 trans->tx_info.is_emad = true;
674 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
675 trans->tid = tid;
676 init_completion(&trans->completion);
677 trans->cb = cb;
678 trans->cb_priv = cb_priv;
679 trans->reg = reg;
680 trans->type = type;
681
682 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
683 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
684
685 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
686 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
687 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
688 err = mlxsw_emad_transmit(mlxsw_core, trans);
689 if (err)
690 goto err_out;
691 return 0;
692
693err_out:
694 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
695 list_del_rcu(&trans->list);
696 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
697 list_del(&trans->bulk_list);
698 dev_kfree_skb(trans->tx_skb);
699 return err;
700}
701
93c1edb2
JP
702/*****************
703 * Core functions
704 *****************/
705
706static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
707{
708 struct mlxsw_core *mlxsw_core = file->private;
709 struct mlxsw_core_pcpu_stats *p;
710 u64 rx_packets, rx_bytes;
711 u64 tmp_rx_packets, tmp_rx_bytes;
712 u32 rx_dropped, rx_invalid;
713 unsigned int start;
714 int i;
715 int j;
716 static const char hdr[] =
717 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
718
719 seq_printf(file, hdr);
720 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
721 rx_packets = 0;
722 rx_bytes = 0;
723 rx_dropped = 0;
724 for_each_possible_cpu(j) {
725 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
726 do {
727 start = u64_stats_fetch_begin(&p->syncp);
728 tmp_rx_packets = p->trap_rx_packets[i];
729 tmp_rx_bytes = p->trap_rx_bytes[i];
730 } while (u64_stats_fetch_retry(&p->syncp, start));
731
732 rx_packets += tmp_rx_packets;
733 rx_bytes += tmp_rx_bytes;
734 rx_dropped += p->trap_rx_dropped[i];
735 }
736 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
737 i, rx_packets, rx_bytes, rx_dropped);
738 }
739 rx_invalid = 0;
740 for_each_possible_cpu(j) {
741 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
742 rx_invalid += p->trap_rx_invalid;
743 }
744 seq_printf(file, "trap INV %10u\n",
745 rx_invalid);
746
747 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
748 rx_packets = 0;
749 rx_bytes = 0;
750 rx_dropped = 0;
751 for_each_possible_cpu(j) {
752 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
753 do {
754 start = u64_stats_fetch_begin(&p->syncp);
755 tmp_rx_packets = p->port_rx_packets[i];
756 tmp_rx_bytes = p->port_rx_bytes[i];
757 } while (u64_stats_fetch_retry(&p->syncp, start));
758
759 rx_packets += tmp_rx_packets;
760 rx_bytes += tmp_rx_bytes;
761 rx_dropped += p->port_rx_dropped[i];
762 }
763 seq_printf(file, "port %3d %12llu %12llu %10u\n",
764 i, rx_packets, rx_bytes, rx_dropped);
765 }
766 rx_invalid = 0;
767 for_each_possible_cpu(j) {
768 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
769 rx_invalid += p->port_rx_invalid;
770 }
771 seq_printf(file, "port INV %10u\n",
772 rx_invalid);
773 return 0;
774}
775
776static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
777{
778 struct mlxsw_core *mlxsw_core = inode->i_private;
779
780 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
781}
782
783static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
784 .owner = THIS_MODULE,
785 .open = mlxsw_core_rx_stats_dbg_open,
786 .release = single_release,
787 .read = seq_read,
788 .llseek = seq_lseek
789};
790
93c1edb2
JP
791int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
792{
793 spin_lock(&mlxsw_core_driver_list_lock);
794 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
795 spin_unlock(&mlxsw_core_driver_list_lock);
796 return 0;
797}
798EXPORT_SYMBOL(mlxsw_core_driver_register);
799
800void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
801{
802 spin_lock(&mlxsw_core_driver_list_lock);
803 list_del(&mlxsw_driver->list);
804 spin_unlock(&mlxsw_core_driver_list_lock);
805}
806EXPORT_SYMBOL(mlxsw_core_driver_unregister);
807
808static struct mlxsw_driver *__driver_find(const char *kind)
809{
810 struct mlxsw_driver *mlxsw_driver;
811
812 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
813 if (strcmp(mlxsw_driver->kind, kind) == 0)
814 return mlxsw_driver;
815 }
816 return NULL;
817}
818
819static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
820{
821 struct mlxsw_driver *mlxsw_driver;
822
823 spin_lock(&mlxsw_core_driver_list_lock);
824 mlxsw_driver = __driver_find(kind);
825 if (!mlxsw_driver) {
826 spin_unlock(&mlxsw_core_driver_list_lock);
827 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
828 spin_lock(&mlxsw_core_driver_list_lock);
829 mlxsw_driver = __driver_find(kind);
830 }
831 if (mlxsw_driver) {
832 if (!try_module_get(mlxsw_driver->owner))
833 mlxsw_driver = NULL;
834 }
835
836 spin_unlock(&mlxsw_core_driver_list_lock);
837 return mlxsw_driver;
838}
839
840static void mlxsw_core_driver_put(const char *kind)
841{
842 struct mlxsw_driver *mlxsw_driver;
843
844 spin_lock(&mlxsw_core_driver_list_lock);
845 mlxsw_driver = __driver_find(kind);
846 spin_unlock(&mlxsw_core_driver_list_lock);
847 if (!mlxsw_driver)
848 return;
849 module_put(mlxsw_driver->owner);
850}
851
852static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
853{
854 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
855
856 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
857 mlxsw_core_dbg_root);
858 if (!mlxsw_core->dbg_dir)
859 return -ENOMEM;
860 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
861 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
862 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
863 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
864 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
865 &mlxsw_core->dbg.vsd_blob);
866 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
867 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
868 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
869 &mlxsw_core->dbg.psid_blob);
870 return 0;
871}
872
873static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
874{
875 debugfs_remove_recursive(mlxsw_core->dbg_dir);
876}
877
284ef803
JP
878static int mlxsw_devlink_port_split(struct devlink *devlink,
879 unsigned int port_index,
880 unsigned int count)
881{
882 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
883
884 if (port_index >= MLXSW_PORT_MAX_PORTS)
885 return -EINVAL;
886 if (!mlxsw_core->driver->port_split)
887 return -EOPNOTSUPP;
b2f10571 888 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
284ef803
JP
889}
890
891static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
892 unsigned int port_index)
893{
894 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
895
896 if (port_index >= MLXSW_PORT_MAX_PORTS)
897 return -EINVAL;
898 if (!mlxsw_core->driver->port_unsplit)
899 return -EOPNOTSUPP;
b2f10571 900 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
284ef803
JP
901}
902
a6179bf0
JP
903static int
904mlxsw_devlink_sb_pool_get(struct devlink *devlink,
905 unsigned int sb_index, u16 pool_index,
906 struct devlink_sb_pool_info *pool_info)
907{
908 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
909 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
910
911 if (!mlxsw_driver->sb_pool_get)
912 return -EOPNOTSUPP;
913 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
914 pool_index, pool_info);
915}
916
917static int
918mlxsw_devlink_sb_pool_set(struct devlink *devlink,
919 unsigned int sb_index, u16 pool_index, u32 size,
920 enum devlink_sb_threshold_type threshold_type)
921{
922 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
923 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
924
925 if (!mlxsw_driver->sb_pool_set)
926 return -EOPNOTSUPP;
927 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
928 pool_index, size, threshold_type);
929}
930
931static void *__dl_port(struct devlink_port *devlink_port)
932{
933 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
934}
935
936static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
937 unsigned int sb_index, u16 pool_index,
938 u32 *p_threshold)
939{
940 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
941 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
942 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
943
944 if (!mlxsw_driver->sb_port_pool_get)
945 return -EOPNOTSUPP;
946 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
947 pool_index, p_threshold);
948}
949
950static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
951 unsigned int sb_index, u16 pool_index,
952 u32 threshold)
953{
954 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
955 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
956 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
957
958 if (!mlxsw_driver->sb_port_pool_set)
959 return -EOPNOTSUPP;
960 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
961 pool_index, threshold);
962}
963
964static int
965mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
966 unsigned int sb_index, u16 tc_index,
967 enum devlink_sb_pool_type pool_type,
968 u16 *p_pool_index, u32 *p_threshold)
969{
970 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
971 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
972 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
973
974 if (!mlxsw_driver->sb_tc_pool_bind_get)
975 return -EOPNOTSUPP;
976 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
977 tc_index, pool_type,
978 p_pool_index, p_threshold);
979}
980
981static int
982mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
983 unsigned int sb_index, u16 tc_index,
984 enum devlink_sb_pool_type pool_type,
985 u16 pool_index, u32 threshold)
986{
987 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
988 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
989 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
990
991 if (!mlxsw_driver->sb_tc_pool_bind_set)
992 return -EOPNOTSUPP;
993 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
994 tc_index, pool_type,
995 pool_index, threshold);
996}
997
1ceecc88
JP
998static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
999 unsigned int sb_index)
1000{
1001 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1002 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1003
1004 if (!mlxsw_driver->sb_occ_snapshot)
1005 return -EOPNOTSUPP;
1006 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1007}
1008
1009static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1010 unsigned int sb_index)
1011{
1012 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1013 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1014
1015 if (!mlxsw_driver->sb_occ_max_clear)
1016 return -EOPNOTSUPP;
1017 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1018}
1019
1020static int
1021mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1022 unsigned int sb_index, u16 pool_index,
1023 u32 *p_cur, u32 *p_max)
1024{
1025 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1026 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1027 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1028
1029 if (!mlxsw_driver->sb_occ_port_pool_get)
1030 return -EOPNOTSUPP;
1031 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1032 pool_index, p_cur, p_max);
1033}
1034
1035static int
1036mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1037 unsigned int sb_index, u16 tc_index,
1038 enum devlink_sb_pool_type pool_type,
1039 u32 *p_cur, u32 *p_max)
1040{
1041 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1042 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1043 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1044
1045 if (!mlxsw_driver->sb_occ_tc_port_bind_get)
1046 return -EOPNOTSUPP;
1047 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1048 sb_index, tc_index,
1049 pool_type, p_cur, p_max);
1050}
1051
284ef803 1052static const struct devlink_ops mlxsw_devlink_ops = {
1ceecc88
JP
1053 .port_split = mlxsw_devlink_port_split,
1054 .port_unsplit = mlxsw_devlink_port_unsplit,
1055 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1056 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1057 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1058 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1059 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1060 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1061 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1062 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1063 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1064 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
284ef803
JP
1065};
1066
93c1edb2
JP
1067int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1068 const struct mlxsw_bus *mlxsw_bus,
1069 void *bus_priv)
1070{
1071 const char *device_kind = mlxsw_bus_info->device_kind;
1072 struct mlxsw_core *mlxsw_core;
1073 struct mlxsw_driver *mlxsw_driver;
c4745500 1074 struct devlink *devlink;
93c1edb2
JP
1075 size_t alloc_size;
1076 int err;
1077
1078 mlxsw_driver = mlxsw_core_driver_get(device_kind);
1079 if (!mlxsw_driver)
1080 return -EINVAL;
1081 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
284ef803 1082 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
c4745500 1083 if (!devlink) {
93c1edb2 1084 err = -ENOMEM;
c4745500 1085 goto err_devlink_alloc;
93c1edb2
JP
1086 }
1087
c4745500 1088 mlxsw_core = devlink_priv(devlink);
93c1edb2 1089 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
4ec14b76 1090 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
93c1edb2
JP
1091 mlxsw_core->driver = mlxsw_driver;
1092 mlxsw_core->bus = mlxsw_bus;
1093 mlxsw_core->bus_priv = bus_priv;
1094 mlxsw_core->bus_info = mlxsw_bus_info;
1095
1096 mlxsw_core->pcpu_stats =
1097 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
1098 if (!mlxsw_core->pcpu_stats) {
1099 err = -ENOMEM;
1100 goto err_alloc_stats;
1101 }
4ec14b76 1102
8060646a
JP
1103 if (mlxsw_driver->profile->used_max_lag &&
1104 mlxsw_driver->profile->used_max_port_per_lag) {
1105 alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
1106 mlxsw_driver->profile->max_port_per_lag;
1107 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1108 if (!mlxsw_core->lag.mapping) {
1109 err = -ENOMEM;
1110 goto err_alloc_lag_mapping;
1111 }
1112 }
1113
57d316ba
NF
1114 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
1115 &mlxsw_core->resources);
93c1edb2
JP
1116 if (err)
1117 goto err_bus_init;
1118
4ec14b76
IS
1119 err = mlxsw_emad_init(mlxsw_core);
1120 if (err)
1121 goto err_emad_init;
1122
c4745500
JP
1123 err = devlink_register(devlink, mlxsw_bus_info->dev);
1124 if (err)
1125 goto err_devlink_register;
1126
b38a75d2
JP
1127 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1128 if (err)
1129 goto err_hwmon_init;
1130
b2f10571 1131 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
93c1edb2
JP
1132 if (err)
1133 goto err_driver_init;
1134
1135 err = mlxsw_core_debugfs_init(mlxsw_core);
1136 if (err)
1137 goto err_debugfs_init;
1138
1139 return 0;
1140
1141err_debugfs_init:
b2f10571 1142 mlxsw_core->driver->fini(mlxsw_core);
93c1edb2 1143err_driver_init:
b38a75d2 1144err_hwmon_init:
c4745500
JP
1145 devlink_unregister(devlink);
1146err_devlink_register:
4ec14b76
IS
1147 mlxsw_emad_fini(mlxsw_core);
1148err_emad_init:
93c1edb2
JP
1149 mlxsw_bus->fini(bus_priv);
1150err_bus_init:
8060646a
JP
1151 kfree(mlxsw_core->lag.mapping);
1152err_alloc_lag_mapping:
93c1edb2
JP
1153 free_percpu(mlxsw_core->pcpu_stats);
1154err_alloc_stats:
c4745500
JP
1155 devlink_free(devlink);
1156err_devlink_alloc:
93c1edb2
JP
1157 mlxsw_core_driver_put(device_kind);
1158 return err;
1159}
1160EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1161
1162void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
1163{
1164 const char *device_kind = mlxsw_core->bus_info->device_kind;
c4745500 1165 struct devlink *devlink = priv_to_devlink(mlxsw_core);
93c1edb2
JP
1166
1167 mlxsw_core_debugfs_fini(mlxsw_core);
b2f10571 1168 mlxsw_core->driver->fini(mlxsw_core);
c4745500 1169 devlink_unregister(devlink);
4ec14b76 1170 mlxsw_emad_fini(mlxsw_core);
93c1edb2 1171 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
8060646a 1172 kfree(mlxsw_core->lag.mapping);
93c1edb2 1173 free_percpu(mlxsw_core->pcpu_stats);
c4745500 1174 devlink_free(devlink);
93c1edb2
JP
1175 mlxsw_core_driver_put(device_kind);
1176}
1177EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1178
307c2431 1179bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
d003462a
IS
1180 const struct mlxsw_tx_info *tx_info)
1181{
d003462a
IS
1182 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1183 tx_info);
1184}
1185EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1186
307c2431 1187int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
93c1edb2
JP
1188 const struct mlxsw_tx_info *tx_info)
1189{
93c1edb2
JP
1190 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1191 tx_info);
1192}
1193EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1194
1195static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1196 const struct mlxsw_rx_listener *rxl_b)
1197{
1198 return (rxl_a->func == rxl_b->func &&
1199 rxl_a->local_port == rxl_b->local_port &&
1200 rxl_a->trap_id == rxl_b->trap_id);
1201}
1202
1203static struct mlxsw_rx_listener_item *
1204__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1205 const struct mlxsw_rx_listener *rxl,
1206 void *priv)
1207{
1208 struct mlxsw_rx_listener_item *rxl_item;
1209
1210 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1211 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1212 rxl_item->priv == priv)
1213 return rxl_item;
1214 }
1215 return NULL;
1216}
1217
1218int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1219 const struct mlxsw_rx_listener *rxl,
1220 void *priv)
1221{
1222 struct mlxsw_rx_listener_item *rxl_item;
1223
1224 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1225 if (rxl_item)
1226 return -EEXIST;
1227 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1228 if (!rxl_item)
1229 return -ENOMEM;
1230 rxl_item->rxl = *rxl;
1231 rxl_item->priv = priv;
1232
1233 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1234 return 0;
1235}
1236EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1237
1238void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1239 const struct mlxsw_rx_listener *rxl,
1240 void *priv)
1241{
1242 struct mlxsw_rx_listener_item *rxl_item;
1243
1244 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1245 if (!rxl_item)
1246 return;
1247 list_del_rcu(&rxl_item->list);
1248 synchronize_rcu();
1249 kfree(rxl_item);
1250}
1251EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1252
4ec14b76
IS
1253static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1254 void *priv)
1255{
1256 struct mlxsw_event_listener_item *event_listener_item = priv;
1257 struct mlxsw_reg_info reg;
1258 char *payload;
1259 char *op_tlv = mlxsw_emad_op_tlv(skb);
1260 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1261
1262 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1263 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1264 payload = mlxsw_emad_reg_payload(op_tlv);
1265 event_listener_item->el.func(&reg, payload, event_listener_item->priv);
1266 dev_kfree_skb(skb);
1267}
1268
1269static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1270 const struct mlxsw_event_listener *el_b)
1271{
1272 return (el_a->func == el_b->func &&
1273 el_a->trap_id == el_b->trap_id);
1274}
1275
1276static struct mlxsw_event_listener_item *
1277__find_event_listener_item(struct mlxsw_core *mlxsw_core,
1278 const struct mlxsw_event_listener *el,
1279 void *priv)
1280{
1281 struct mlxsw_event_listener_item *el_item;
1282
1283 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1284 if (__is_event_listener_equal(&el_item->el, el) &&
1285 el_item->priv == priv)
1286 return el_item;
1287 }
1288 return NULL;
1289}
1290
1291int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1292 const struct mlxsw_event_listener *el,
1293 void *priv)
1294{
1295 int err;
1296 struct mlxsw_event_listener_item *el_item;
1297 const struct mlxsw_rx_listener rxl = {
1298 .func = mlxsw_core_event_listener_func,
1299 .local_port = MLXSW_PORT_DONT_CARE,
1300 .trap_id = el->trap_id,
1301 };
1302
1303 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1304 if (el_item)
1305 return -EEXIST;
1306 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1307 if (!el_item)
1308 return -ENOMEM;
1309 el_item->el = *el;
1310 el_item->priv = priv;
1311
1312 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1313 if (err)
1314 goto err_rx_listener_register;
1315
1316 /* No reason to save item if we did not manage to register an RX
1317 * listener for it.
1318 */
1319 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1320
1321 return 0;
1322
1323err_rx_listener_register:
1324 kfree(el_item);
1325 return err;
1326}
1327EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1328
1329void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1330 const struct mlxsw_event_listener *el,
1331 void *priv)
1332{
1333 struct mlxsw_event_listener_item *el_item;
1334 const struct mlxsw_rx_listener rxl = {
1335 .func = mlxsw_core_event_listener_func,
1336 .local_port = MLXSW_PORT_DONT_CARE,
1337 .trap_id = el->trap_id,
1338 };
1339
1340 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1341 if (!el_item)
1342 return;
1343 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1344 list_del(&el_item->list);
1345 kfree(el_item);
1346}
1347EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1348
caf7297e
JP
1349static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1350{
1351 return atomic64_inc_return(&mlxsw_core->emad.tid);
1352}
1353
4ec14b76
IS
1354static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1355 const struct mlxsw_reg_info *reg,
1356 char *payload,
caf7297e
JP
1357 enum mlxsw_core_reg_access_type type,
1358 struct list_head *bulk_list,
1359 mlxsw_reg_trans_cb_t *cb,
1360 unsigned long cb_priv)
4ec14b76 1361{
caf7297e
JP
1362 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1363 struct mlxsw_reg_trans *trans;
4ec14b76 1364 int err;
4ec14b76 1365
caf7297e
JP
1366 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1367 if (!trans)
4ec14b76
IS
1368 return -ENOMEM;
1369
caf7297e
JP
1370 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1371 bulk_list, cb, cb_priv, tid);
1372 if (err) {
1373 kfree(trans);
1374 return err;
1375 }
1376 return 0;
1377}
4ec14b76 1378
caf7297e
JP
1379int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1380 const struct mlxsw_reg_info *reg, char *payload,
1381 struct list_head *bulk_list,
1382 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1383{
1384 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1385 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1386 bulk_list, cb, cb_priv);
1387}
1388EXPORT_SYMBOL(mlxsw_reg_trans_query);
4ec14b76 1389
caf7297e
JP
1390int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1391 const struct mlxsw_reg_info *reg, char *payload,
1392 struct list_head *bulk_list,
1393 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1394{
1395 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1396 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1397 bulk_list, cb, cb_priv);
1398}
1399EXPORT_SYMBOL(mlxsw_reg_trans_write);
4ec14b76 1400
caf7297e
JP
1401static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1402{
1403 struct mlxsw_core *mlxsw_core = trans->core;
1404 int err;
4ec14b76 1405
caf7297e
JP
1406 wait_for_completion(&trans->completion);
1407 cancel_delayed_work_sync(&trans->timeout_dw);
1408 err = trans->err;
4ec14b76 1409
caf7297e
JP
1410 if (trans->retries)
1411 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1412 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1413 if (err)
1414 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1415 trans->tid, trans->reg->id,
1416 mlxsw_reg_id_str(trans->reg->id),
1417 mlxsw_core_reg_access_type_str(trans->type),
1418 trans->emad_status,
1419 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1420
1421 list_del(&trans->bulk_list);
1422 kfree_rcu(trans, rcu);
4ec14b76
IS
1423 return err;
1424}
1425
caf7297e
JP
1426int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1427{
1428 struct mlxsw_reg_trans *trans;
1429 struct mlxsw_reg_trans *tmp;
1430 int sum_err = 0;
1431 int err;
1432
1433 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1434 err = mlxsw_reg_trans_wait(trans);
1435 if (err && sum_err == 0)
1436 sum_err = err; /* first error to be returned */
1437 }
1438 return sum_err;
1439}
1440EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1441
4ec14b76
IS
1442static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1443 const struct mlxsw_reg_info *reg,
1444 char *payload,
1445 enum mlxsw_core_reg_access_type type)
1446{
caf7297e 1447 enum mlxsw_emad_op_tlv_status status;
4ec14b76
IS
1448 int err, n_retry;
1449 char *in_mbox, *out_mbox, *tmp;
1450
caf7297e
JP
1451 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1452 reg->id, mlxsw_reg_id_str(reg->id),
1453 mlxsw_core_reg_access_type_str(type));
1454
4ec14b76
IS
1455 in_mbox = mlxsw_cmd_mbox_alloc();
1456 if (!in_mbox)
1457 return -ENOMEM;
1458
1459 out_mbox = mlxsw_cmd_mbox_alloc();
1460 if (!out_mbox) {
1461 err = -ENOMEM;
1462 goto free_in_mbox;
1463 }
1464
caf7297e
JP
1465 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1466 mlxsw_core_tid_get(mlxsw_core));
4ec14b76
IS
1467 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1468 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1469
1470 n_retry = 0;
1471retry:
1472 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1473 if (!err) {
caf7297e
JP
1474 err = mlxsw_emad_process_status(out_mbox, &status);
1475 if (err) {
1476 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1477 goto retry;
1478 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1479 status, mlxsw_emad_op_tlv_status_str(status));
1480 }
4ec14b76
IS
1481 }
1482
1483 if (!err)
1484 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1485 reg->len);
1486
4ec14b76
IS
1487 mlxsw_cmd_mbox_free(out_mbox);
1488free_in_mbox:
1489 mlxsw_cmd_mbox_free(in_mbox);
caf7297e
JP
1490 if (err)
1491 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1492 reg->id, mlxsw_reg_id_str(reg->id),
1493 mlxsw_core_reg_access_type_str(type));
4ec14b76
IS
1494 return err;
1495}
1496
caf7297e
JP
1497static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1498 char *payload, size_t payload_len,
1499 unsigned long cb_priv)
1500{
1501 char *orig_payload = (char *) cb_priv;
1502
1503 memcpy(orig_payload, payload, payload_len);
1504}
1505
4ec14b76
IS
1506static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1507 const struct mlxsw_reg_info *reg,
1508 char *payload,
1509 enum mlxsw_core_reg_access_type type)
1510{
caf7297e 1511 LIST_HEAD(bulk_list);
4ec14b76
IS
1512 int err;
1513
4ec14b76
IS
1514 /* During initialization EMAD interface is not available to us,
1515 * so we default to command interface. We switch to EMAD interface
1516 * after setting the appropriate traps.
1517 */
1518 if (!mlxsw_core->emad.use_emad)
caf7297e 1519 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
4ec14b76
IS
1520 payload, type);
1521
caf7297e
JP
1522 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1523 payload, type, &bulk_list,
1524 mlxsw_core_reg_access_cb,
1525 (unsigned long) payload);
4ec14b76 1526 if (err)
caf7297e
JP
1527 return err;
1528 return mlxsw_reg_trans_bulk_wait(&bulk_list);
4ec14b76
IS
1529}
1530
1531int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1532 const struct mlxsw_reg_info *reg, char *payload)
1533{
1534 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1535 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1536}
1537EXPORT_SYMBOL(mlxsw_reg_query);
1538
1539int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1540 const struct mlxsw_reg_info *reg, char *payload)
1541{
1542 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1543 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1544}
1545EXPORT_SYMBOL(mlxsw_reg_write);
1546
93c1edb2
JP
1547void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1548 struct mlxsw_rx_info *rx_info)
1549{
1550 struct mlxsw_rx_listener_item *rxl_item;
1551 const struct mlxsw_rx_listener *rxl;
1552 struct mlxsw_core_pcpu_stats *pcpu_stats;
8060646a 1553 u8 local_port;
93c1edb2
JP
1554 bool found = false;
1555
8060646a
JP
1556 if (rx_info->is_lag) {
1557 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1558 __func__, rx_info->u.lag_id,
1559 rx_info->trap_id);
1560 /* Upper layer does not care if the skb came from LAG or not,
1561 * so just get the local_port for the lag port and push it up.
1562 */
1563 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1564 rx_info->u.lag_id,
1565 rx_info->lag_port_index);
1566 } else {
1567 local_port = rx_info->u.sys_port;
1568 }
1569
1570 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1571 __func__, local_port, rx_info->trap_id);
93c1edb2
JP
1572
1573 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1574 (local_port >= MLXSW_PORT_MAX_PORTS))
1575 goto drop;
1576
1577 rcu_read_lock();
1578 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1579 rxl = &rxl_item->rxl;
1580 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1581 rxl->local_port == local_port) &&
1582 rxl->trap_id == rx_info->trap_id) {
1583 found = true;
1584 break;
1585 }
1586 }
1587 rcu_read_unlock();
1588 if (!found)
1589 goto drop;
1590
1591 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1592 u64_stats_update_begin(&pcpu_stats->syncp);
1593 pcpu_stats->port_rx_packets[local_port]++;
1594 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1595 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1596 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1597 u64_stats_update_end(&pcpu_stats->syncp);
1598
1599 rxl->func(skb, local_port, rxl_item->priv);
1600 return;
1601
1602drop:
1603 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1604 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1605 else
1606 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1607 if (local_port >= MLXSW_PORT_MAX_PORTS)
1608 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1609 else
1610 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1611 dev_kfree_skb(skb);
1612}
1613EXPORT_SYMBOL(mlxsw_core_skb_receive);
1614
8060646a
JP
1615static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1616 u16 lag_id, u8 port_index)
1617{
1618 return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
1619 port_index;
1620}
1621
1622void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1623 u16 lag_id, u8 port_index, u8 local_port)
1624{
1625 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1626 lag_id, port_index);
1627
1628 mlxsw_core->lag.mapping[index] = local_port;
1629}
1630EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1631
1632u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1633 u16 lag_id, u8 port_index)
1634{
1635 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1636 lag_id, port_index);
1637
1638 return mlxsw_core->lag.mapping[index];
1639}
1640EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1641
1642void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1643 u16 lag_id, u8 local_port)
1644{
1645 int i;
1646
1647 for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
1648 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1649 lag_id, i);
1650
1651 if (mlxsw_core->lag.mapping[index] == local_port)
1652 mlxsw_core->lag.mapping[index] = 0;
1653 }
1654}
1655EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1656
57d316ba
NF
1657struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
1658{
1659 return &mlxsw_core->resources;
1660}
1661EXPORT_SYMBOL(mlxsw_core_resources_get);
1662
932762b6
JP
1663int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
1664 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
1665 struct net_device *dev, bool split, u32 split_group)
1666{
1667 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1668 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1669
1670 if (split)
1671 devlink_port_split_set(devlink_port, split_group);
1672 devlink_port_type_eth_set(devlink_port, dev);
1673 return devlink_port_register(devlink, devlink_port, local_port);
1674}
1675EXPORT_SYMBOL(mlxsw_core_port_init);
1676
1677void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
1678{
1679 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1680
1681 devlink_port_unregister(devlink_port);
1682}
1683EXPORT_SYMBOL(mlxsw_core_port_fini);
1684
caf7297e
JP
1685static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1686 const char *buf, size_t size)
1687{
1688 __be32 *m = (__be32 *) buf;
1689 int i;
1690 int count = size / sizeof(__be32);
1691
1692 for (i = count - 1; i >= 0; i--)
1693 if (m[i])
1694 break;
1695 i++;
1696 count = i ? i : 1;
1697 for (i = 0; i < count; i += 4)
1698 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1699 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1700 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1701}
1702
93c1edb2
JP
1703int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1704 u32 in_mod, bool out_mbox_direct,
1705 char *in_mbox, size_t in_mbox_size,
1706 char *out_mbox, size_t out_mbox_size)
1707{
1708 u8 status;
1709 int err;
1710
1711 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1712 if (!mlxsw_core->bus->cmd_exec)
1713 return -EOPNOTSUPP;
1714
1715 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1716 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1717 if (in_mbox) {
1718 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1719 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1720 }
1721
1722 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1723 opcode_mod, in_mod, out_mbox_direct,
1724 in_mbox, in_mbox_size,
1725 out_mbox, out_mbox_size, &status);
1726
1727 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1728 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1729 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1730 in_mod, status, mlxsw_cmd_status_str(status));
1731 } else if (err == -ETIMEDOUT) {
1732 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1733 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1734 in_mod);
1735 }
1736
1737 if (!err && out_mbox) {
1738 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1739 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1740 }
1741 return err;
1742}
1743EXPORT_SYMBOL(mlxsw_cmd_exec);
1744
dd9bdb04
JP
1745int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1746{
1747 return queue_delayed_work(mlxsw_wq, dwork, delay);
1748}
1749EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1750
93c1edb2
JP
1751static int __init mlxsw_core_module_init(void)
1752{
dd9bdb04
JP
1753 int err;
1754
3d5479e9 1755 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
dd9bdb04 1756 if (!mlxsw_wq)
93c1edb2 1757 return -ENOMEM;
dd9bdb04
JP
1758 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1759 if (!mlxsw_core_dbg_root) {
1760 err = -ENOMEM;
1761 goto err_debugfs_create_dir;
1762 }
93c1edb2 1763 return 0;
dd9bdb04
JP
1764
1765err_debugfs_create_dir:
1766 destroy_workqueue(mlxsw_wq);
1767 return err;
93c1edb2
JP
1768}
1769
1770static void __exit mlxsw_core_module_exit(void)
1771{
1772 debugfs_remove_recursive(mlxsw_core_dbg_root);
dd9bdb04 1773 destroy_workqueue(mlxsw_wq);
93c1edb2
JP
1774}
1775
1776module_init(mlxsw_core_module_init);
1777module_exit(mlxsw_core_module_exit);
1778
1779MODULE_LICENSE("Dual BSD/GPL");
1780MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1781MODULE_DESCRIPTION("Mellanox switch device core driver");