ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
- if (dp && dp->lag_dev) {
+ if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
- port = dsa_lag_id(dst, dp->lag_dev) - 1;
+ port = dsa_port_lag_id_get(dp) - 1;
}
}
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (dsa_to_port(ds, port)->lag_dev)
+ if (dsa_to_port(ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag_dev,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
if (!mv88e6xxx_has_lag(chip))
return false;
- id = dsa_lag_id(ds->dst, lag_dev);
- if (id <= 0 || id > ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag_dev)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
return true;
}
-static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds,
- struct net_device *lag_dev)
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int id;
/* DSA LAG IDs are one-based, hardware is zero-based */
- id = dsa_lag_id(ds->dst, lag_dev) - 1;
+ id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
- dsa_lag_foreach_port(dp, ds->dst, lag_dev)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct net_device *lag_dev;
unsigned int id, num_tx;
struct dsa_port *dp;
+ struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
/* Disable all masks for ports that _are_ members of a LAG. */
dsa_switch_for_each_port(dp, ds) {
- if (!dp->lag_dev)
+ if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
- lag_dev = dsa_lag_dev(ds->dst, id);
- if (!lag_dev)
+ lag = dsa_lag_by_id(ds->dst, id);
+ if (!lag)
continue;
num_tx = 0;
- dsa_lag_foreach_port(dp, ds->dst, lag_dev) {
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
if (dp->lag_tx_enabled)
num_tx++;
}
continue;
nth = 0;
- dsa_lag_foreach_port(dp, ds->dst, lag_dev) {
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
if (!dp->lag_tx_enabled)
continue;
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
- struct net_device *lag_dev)
+ struct dsa_lag lag)
{
int err;
err = mv88e6xxx_lag_sync_masks(ds);
if (!err)
- err = mv88e6xxx_lag_sync_map(ds, lag_dev);
+ err = mv88e6xxx_lag_sync_map(ds, lag);
return err;
}
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag_dev,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag_dev, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
/* DSA LAG IDs are one-based */
- id = dsa_lag_id(ds->dst, lag_dev) - 1;
+ id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
if (err)
goto err_unlock;
- err = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
if (err)
goto err_clear_trunk;
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag_dev)
+ struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
mv88e6xxx_reg_lock(chip);
- err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
mv88e6xxx_reg_unlock(chip);
return err_sync ? : err_trunk;
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag_dev,
+ int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag_dev, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
if (err)
goto unlock;
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag_dev)
+ int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
mv88e6xxx_reg_lock(chip);
- err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag_dev);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
mv88e6xxx_reg_unlock(chip);
return err_sync ? : err_pvt;
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ return ocelot_port_lag_join(ocelot, port, lag.dev, info);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
return 0;
}
}
static bool
-qca8k_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag_dev,
+qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
- id = dsa_lag_id(ds->dst, lag_dev);
- if (id <= 0 || id > ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag_dev)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
}
static int
-qca8k_lag_setup_hash(struct dsa_switch *ds,
- struct net_device *lag_dev,
+qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
+ struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
+ unsigned int i;
u32 hash = 0;
- int i, id;
-
- id = dsa_lag_id(ds->dst, lag_dev);
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
- if (i != id && dsa_lag_dev(ds->dst, i)) {
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
static int
qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
- struct net_device *lag_dev, bool delete)
+ struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
/* DSA LAG IDs are one-based, hardware is zero-based */
- id = dsa_lag_id(ds->dst, lag_dev) - 1;
+ id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
}
static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag_dev,
+qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
- if (!qca8k_lag_can_offload(ds, lag_dev, info))
+ if (!qca8k_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
- ret = qca8k_lag_setup_hash(ds, lag_dev, info);
+ ret = qca8k_lag_setup_hash(ds, lag, info);
if (ret)
return ret;
- return qca8k_lag_refresh_portmap(ds, port, lag_dev, false);
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
}
static int
qca8k_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag_dev)
+ struct dsa_lag lag)
{
- return qca8k_lag_refresh_portmap(ds, port, lag_dev, true);
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
static void
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
+struct dsa_lag {
+ struct net_device *dev;
+ unsigned int id;
+ refcount_t refcount;
+};
+
struct dsa_switch_tree {
struct list_head list;
/* Maps offloaded LAG netdevs to a zero-based linear ID for
* drivers that need it.
*/
- struct net_device **lags;
+ struct dsa_lag **lags;
/* Tagging protocol operations */
const struct dsa_device_ops *tag_ops;
#define dsa_lag_foreach_port(_dp, _dst, _lag) \
list_for_each_entry((_dp), &(_dst)->ports, list) \
- if ((_dp)->lag_dev == (_lag))
+ if (dsa_port_offloads_lag((_dp), (_lag)))
#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
-static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
- unsigned int id)
+static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
+ unsigned int id)
{
/* DSA LAG IDs are one-based, dst->lags is zero-based */
return dst->lags[id - 1];
unsigned int id;
dsa_lags_foreach_id(id, dst) {
- if (dsa_lag_dev(dst, id) == lag_dev)
- return id;
+ struct dsa_lag *lag = dsa_lag_by_id(dst, id);
+
+ if (lag->dev == lag_dev)
+ return lag->id;
}
return -ENODEV;
struct devlink_port devlink_port;
struct phylink *pl;
struct phylink_config pl_config;
- struct net_device *lag_dev;
+ struct dsa_lag *lag;
struct net_device *hsr_dev;
struct list_head list;
return dp->vlan_filtering;
}
+static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->id : 0;
+}
+
+static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
+{
+ return dp->lag ? dp->lag->dev : NULL;
+}
+
+static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
+ const struct dsa_lag *lag)
+{
+ return dsa_port_lag_dev_get(dp) == lag->dev;
+}
+
static inline
struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
{
if (!dp->bridge)
return NULL;
- if (dp->lag_dev)
- return dp->lag_dev;
+ if (dp->lag)
+ return dp->lag->dev;
else if (dp->hsr_dev)
return dp->hsr_dev;
int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
int port);
int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag_dev,
+ int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag_dev);
+ int port, struct dsa_lag lag);
/*
* PTP functionality
*/
int (*port_lag_change)(struct dsa_switch *ds, int port);
int (*port_lag_join)(struct dsa_switch *ds, int port,
- struct net_device *lag_dev,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int (*port_lag_leave)(struct dsa_switch *ds, int port,
- struct net_device *lag_dev);
+ struct dsa_lag lag);
/*
* HSR integration
}
/**
- * dsa_lag_map() - Map LAG netdev to a linear LAG ID
+ * dsa_lag_map() - Map LAG structure to a linear LAG array
* @dst: Tree in which to record the mapping.
- * @lag_dev: Netdev that is to be mapped to an ID.
+ * @lag: LAG structure that is to be mapped to the tree's array.
*
- * dsa_lag_id/dsa_lag_dev can then be used to translate between the
+ * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
* two spaces. The size of the mapping space is determined by the
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
* it unset if it is not needed, in which case these functions become
* no-ops.
*/
-void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag_dev)
+void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
unsigned int id;
- if (dsa_lag_id(dst, lag_dev) > 0)
- /* Already mapped */
- return;
-
for (id = 1; id <= dst->lags_len; id++) {
- if (!dsa_lag_dev(dst, id)) {
- dst->lags[id - 1] = lag_dev;
+ if (!dsa_lag_by_id(dst, id)) {
+ dst->lags[id - 1] = lag;
+ lag->id = id;
return;
}
}
/**
* dsa_lag_unmap() - Remove a LAG ID mapping
* @dst: Tree in which the mapping is recorded.
- * @lag_dev: Netdev that was mapped.
+ * @lag: LAG structure that was mapped.
*
* As there may be multiple users of the mapping, it is only removed
* if there are no other references to it.
*/
-void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag_dev)
+void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
{
- struct dsa_port *dp;
unsigned int id;
- dsa_lag_foreach_port(dp, dst, lag_dev)
- /* There are remaining users of this mapping */
- return;
-
dsa_lags_foreach_id(id, dst) {
- if (dsa_lag_dev(dst, id) == lag_dev) {
+ if (dsa_lag_by_id(dst, id) == lag) {
dst->lags[id - 1] = NULL;
+ lag->id = 0;
break;
}
}
}
+struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
+ const struct net_device *lag_dev)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_lag_dev_get(dp) == lag_dev)
+ return dp->lag;
+
+ return NULL;
+}
+
struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
const struct net_device *br)
{
/* DSA_NOTIFIER_LAG_* */
struct dsa_notifier_lag_info {
- struct net_device *lag_dev;
+ struct dsa_lag lag;
int sw_index;
int port;
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
/* dsa2.c */
-void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag_dev);
-void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag_dev);
+void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
+void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
+struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
+ const struct net_device *lag_dev);
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
int dsa_broadcast(unsigned long e, void *v);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
};
bool tx_enabled;
- if (!dp->lag_dev)
+ if (!dp->lag)
return 0;
/* On statically configured aggregates (e.g. loadbalance
return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
}
+static int dsa_port_lag_create(struct dsa_port *dp,
+ struct net_device *lag_dev)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_lag *lag;
+
+ lag = dsa_tree_lag_find(ds->dst, lag_dev);
+ if (lag) {
+ refcount_inc(&lag->refcount);
+ dp->lag = lag;
+ return 0;
+ }
+
+ lag = kzalloc(sizeof(*lag), GFP_KERNEL);
+ if (!lag)
+ return -ENOMEM;
+
+ refcount_set(&lag->refcount, 1);
+ lag->dev = lag_dev;
+ dsa_lag_map(ds->dst, lag);
+ dp->lag = lag;
+
+ return 0;
+}
+
+static void dsa_port_lag_destroy(struct dsa_port *dp)
+{
+ struct dsa_lag *lag = dp->lag;
+
+ dp->lag = NULL;
+ dp->lag_tx_enabled = false;
+
+ if (!refcount_dec_and_test(&lag->refcount))
+ return;
+
+ dsa_lag_unmap(dp->ds->dst, lag);
+ kfree(lag);
+}
+
int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
- .lag_dev = lag_dev,
.info = uinfo,
};
struct net_device *bridge_dev;
int err;
- dsa_lag_map(dp->ds->dst, lag_dev);
- dp->lag_dev = lag_dev;
+ err = dsa_port_lag_create(dp, lag_dev);
+ if (err)
+ goto err_lag_create;
+ info.lag = *dp->lag;
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
if (err)
goto err_lag_join;
err_bridge_join:
dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
err_lag_join:
- dp->lag_dev = NULL;
- dsa_lag_unmap(dp->ds->dst, lag_dev);
+ dsa_port_lag_destroy(dp);
+err_lag_create:
return err;
}
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
- .lag_dev = lag_dev,
};
int err;
- if (!dp->lag_dev)
+ if (!dp->lag)
return;
/* Port might have been part of a LAG that in turn was
if (br)
dsa_port_bridge_leave(dp, br);
- dp->lag_tx_enabled = false;
- dp->lag_dev = NULL;
+ info.lag = *dp->lag;
+
+ dsa_port_lag_destroy(dp);
err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
if (err)
dev_err(dp->ds->dev,
"port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n",
dp->index, ERR_PTR(err));
-
- dsa_lag_unmap(dp->ds->dst, lag_dev);
}
/* Must be called under rcu_read_lock() */
continue;
dp = dsa_slave_to_port(lower);
- if (!dp->lag_dev)
+ if (!dp->lag)
/* Software LAG */
continue;
continue;
dp = dsa_slave_to_port(lower);
- if (!dp->lag_dev)
+ if (!dp->lag)
/* Software LAG */
continue;
struct dsa_notifier_lag_info *info)
{
if (ds->index == info->sw_index && ds->ops->port_lag_join)
- return ds->ops->port_lag_join(ds, info->port, info->lag_dev,
+ return ds->ops->port_lag_join(ds, info->port, info->lag,
info->info);
if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
return ds->ops->crosschip_lag_join(ds, info->sw_index,
- info->port, info->lag_dev,
+ info->port, info->lag,
info->info);
return -EOPNOTSUPP;
struct dsa_notifier_lag_info *info)
{
if (ds->index == info->sw_index && ds->ops->port_lag_leave)
- return ds->ops->port_lag_leave(ds, info->port, info->lag_dev);
+ return ds->ops->port_lag_leave(ds, info->port, info->lag);
if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
return ds->ops->crosschip_lag_leave(ds, info->sw_index,
- info->port, info->lag_dev);
+ info->port, info->lag);
return -EOPNOTSUPP;
}
if (trunk) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_lag *lag;
/* The exact source port is not available in the tag,
* so we inject the frame directly on the upper
* team/bond.
*/
- skb->dev = dsa_lag_dev(cpu_dp->dst, source_port + 1);
+ lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
+ skb->dev = lag ? lag->dev : NULL;
} else {
skb->dev = dsa_master_find_slave(dev, source_device,
source_port);