struct net_device;
struct nfp_app;
+#define NFP_FL_STATS_ENTRY_RS BIT(20)
+#define NFP_FL_STATS_ELEM_RS 4
+#define NFP_FL_REPEATED_HASH_MAX BIT(17)
#define NFP_FLOWER_HASH_BITS 19
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
u8 init_unallocated;
};
+struct nfp_fl_stats_id {
+ struct circ_buf free_list;
+ u32 init_unalloc;
+ u8 repeated_em_count;
+};
+
/**
* struct nfp_flower_priv - Flower APP per-vNIC priv data
* @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
+ * @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules
struct nfp_net *nn;
u32 mask_id_seed;
u64 flower_version;
+ struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
__be32 shortcut;
};
+struct nfp_fl_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 used;
+};
+
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
struct hlist_node link;
struct rcu_head rcu;
+ spinlock_t lock; /* lock stats */
+ struct nfp_fl_stats stats;
char *unmasked_data;
char *mask_data;
char *action_data;
};
+struct nfp_fl_stats_frame {
+ __be32 stats_con_id;
+ __be32 pkt_count;
+ __be64 byte_count;
+ __be64 stats_cookie;
+};
+
int nfp_flower_metadata_init(struct nfp_app *app);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+
#endif
u8 mask_id;
};
+static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct circ_buf *ring;
+
+ ring = &priv->stats_ids.free_list;
+ /* Check if buffer is full. */
+ if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
+ NFP_FL_STATS_ELEM_RS -
+ NFP_FL_STATS_ELEM_RS + 1))
+ return -ENOBUFS;
+
+ memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
+ ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
+ (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+
+ return 0;
+}
+
+static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ u32 freed_stats_id, temp_stats_id;
+ struct circ_buf *ring;
+
+ ring = &priv->stats_ids.free_list;
+ freed_stats_id = NFP_FL_STATS_ENTRY_RS;
+ /* Check for unallocated entries first. */
+ if (priv->stats_ids.init_unalloc > 0) {
+ *stats_context_id = priv->stats_ids.init_unalloc - 1;
+ priv->stats_ids.init_unalloc--;
+ return 0;
+ }
+
+ /* Check if buffer is empty. */
+ if (ring->head == ring->tail) {
+ *stats_context_id = freed_stats_id;
+ return -ENOENT;
+ }
+
+ memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
+ *stats_context_id = temp_stats_id;
+ memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
+ ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
+ (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+
+ return 0;
+}
+
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
return NULL;
}
+static void
+nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
+{
+ struct nfp_fl_payload *nfp_flow;
+ unsigned long flower_cookie;
+
+ flower_cookie = be64_to_cpu(stats->stats_cookie);
+
+ rcu_read_lock();
+ nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
+ if (!nfp_flow)
+ goto exit_rcu_unlock;
+
+ if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
+ goto exit_rcu_unlock;
+
+ spin_lock(&nfp_flow->lock);
+ nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
+ nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
+ nfp_flow->stats.used = jiffies;
+ spin_unlock(&nfp_flow->lock);
+
+exit_rcu_unlock:
+ rcu_read_unlock();
+}
+
+void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
+{
+ unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ struct nfp_fl_stats_frame *stats_frame;
+ unsigned char *msg;
+ int i;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ stats_frame = (struct nfp_fl_stats_frame *)msg;
+ for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
+ nfp_flower_update_stats(app, stats_frame + i);
+}
+
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
u8 new_mask_id;
+ u32 stats_cxt;
+
+ if (nfp_get_stats_entry(app, &stats_cxt))
+ return -ENOENT;
+
+ nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
+ nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
- &nfp_flow->meta.flags, &new_mask_id))
+ &nfp_flow->meta.flags, &new_mask_id)) {
+ if (nfp_release_stats_entry(app, stats_cxt))
+ return -EINVAL;
return -ENOENT;
+ }
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
priv->flower_version++;
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+ nfp_flow->stats.pkts = 0;
+ nfp_flow->stats.bytes = 0;
+ nfp_flow->stats.used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie);
if (check_entry) {
+ if (nfp_release_stats_entry(app, stats_cxt))
+ return -EINVAL;
+
if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
NULL, &new_mask_id))
{
struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0;
+ u32 temp_ctx_id;
nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- return 0;
+ /* Release the stats ctx id. */
+ temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+ return nfp_release_stats_entry(app, temp_ctx_id);
}
int nfp_flower_metadata_init(struct nfp_app *app)
if (!priv->mask_ids.last_used)
goto err_free_mask_id;
+ /* Init ring buffer and unallocated stats_ids. */
+ priv->stats_ids.free_list.buf =
+ vmalloc(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ if (!priv->stats_ids.free_list.buf)
+ goto err_free_last_used;
+
+ priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
+
return 0;
+err_free_last_used:
+ kfree(priv->stats_ids.free_list.buf);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
return -ENOMEM;
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
+ vfree(priv->stats_ids.free_list.buf);
}