void ofputil_append_queue_stat(struct list *replies,
const struct ofputil_queue_stats *oqs);
+struct bucket_counter {
+ uint64_t packet_count; /* Number of packets processed by bucket. */
+ uint64_t byte_count; /* Number of bytes processed by bucket. */
+};
+
/* Bucket for use in groups. */
struct ofputil_bucket {
struct list list_node;
* failover groups. */
struct ofpact *ofpacts; /* Series of "struct ofpact"s. */
size_t ofpacts_len; /* Length of ofpacts, in bytes. */
+
+ struct bucket_counter stats;
};
/* Protocol-independent group_mod. */
struct list buckets; /* Contains "struct ofputil_bucket"s. */
};
-struct bucket_counter {
- uint64_t packet_count; /* Number of packets processed by bucket. */
- uint64_t byte_count; /* Number of bytes processed by bucket. */
-};
-
/* Group stats reply, independent of protocol. */
struct ofputil_group_stats {
uint32_t group_id; /* Group identifier. */
XC_LEARN,
XC_NORMAL,
XC_FIN_TIMEOUT,
+ XC_GROUP,
};
/* xlate_cache entries hold enough information to perform the side effects of
uint16_t idle;
uint16_t hard;
} fin;
+ struct {
+ struct group_dpif *group;
+ struct ofputil_bucket *bucket;
+ } group;
} u;
};
return true;
}
-static const struct ofputil_bucket *
+static struct ofputil_bucket *
group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
int depth);
static bool
bucket_is_alive(const struct xlate_ctx *ctx,
- const struct ofputil_bucket *bucket, int depth)
+ struct ofputil_bucket *bucket, int depth)
{
if (depth >= MAX_LIVENESS_RECURSION) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
group_is_alive(ctx, bucket->watch_group, depth + 1));
}
-static const struct ofputil_bucket *
+static struct ofputil_bucket *
group_first_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group, int depth)
{
return NULL;
}
-static const struct ofputil_bucket *
+static struct ofputil_bucket *
group_best_live_bucket(const struct xlate_ctx *ctx,
const struct group_dpif *group,
uint32_t basis)
{
- const struct ofputil_bucket *best_bucket = NULL;
+ struct ofputil_bucket *best_bucket = NULL;
uint32_t best_score = 0;
int i = 0;
- const struct ofputil_bucket *bucket;
+ struct ofputil_bucket *bucket;
const struct list *buckets;
group_dpif_get_buckets(group, &buckets);
}
static void
-xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
+xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
+ struct ofputil_bucket *bucket)
+{
+ if (ctx->xin->resubmit_stats) {
+ group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
+ }
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
+ entry->u.group.group = group_dpif_ref(group);
+ entry->u.group.bucket = bucket;
+ }
+}
+
+static void
+xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket)
{
uint64_t action_list_stub[1024 / 8];
struct ofpbuf action_list, action_set;
static void
xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
- const struct ofputil_bucket *bucket;
+ struct ofputil_bucket *bucket;
const struct list *buckets;
struct flow old_flow = ctx->xin->flow;
* just before applying the all or indirect group. */
ctx->xin->flow = old_flow;
}
+ xlate_group_stats(ctx, group, NULL);
}
static void
xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
- const struct ofputil_bucket *bucket;
+ struct ofputil_bucket *bucket;
bucket = group_first_live_bucket(ctx, group, 0);
if (bucket) {
xlate_group_bucket(ctx, bucket);
+ xlate_group_stats(ctx, group, bucket);
}
}
xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
struct flow_wildcards *wc = &ctx->xout->wc;
- const struct ofputil_bucket *bucket;
+ struct ofputil_bucket *bucket;
uint32_t basis;
basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
if (bucket) {
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
xlate_group_bucket(ctx, bucket);
+ xlate_group_stats(ctx, group, bucket);
}
}
xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
entry->u.fin.idle, entry->u.fin.hard);
break;
+ case XC_GROUP:
+ group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
+ stats);
+ break;
default:
OVS_NOT_REACHED();
}
/* 'u.fin.rule' is always already held as a XC_RULE, which
* has already released it's reference above. */
break;
+ case XC_GROUP:
+ group_dpif_unref(entry->u.group.group);
+ break;
default:
OVS_NOT_REACHED();
}
struct ovs_mutex stats_mutex;
uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
- struct bucket_counter *bucket_stats OVS_GUARDED; /* Bucket statistics. */
};
struct ofbundle {
group_construct_stats(struct group_dpif *group)
OVS_REQUIRES(group->stats_mutex)
{
+ struct ofputil_bucket *bucket;
+ const struct list *buckets;
+
group->packet_count = 0;
group->byte_count = 0;
- if (!group->bucket_stats) {
- group->bucket_stats = xcalloc(group->up.n_buckets,
- sizeof *group->bucket_stats);
- } else {
- memset(group->bucket_stats, 0, group->up.n_buckets *
- sizeof *group->bucket_stats);
+
+ group_dpif_get_buckets(group, &buckets);
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ bucket->stats.packet_count = 0;
+ bucket->stats.byte_count = 0;
+ }
+}
+
+void
+group_dpif_credit_stats(struct group_dpif *group,
+ struct ofputil_bucket *bucket,
+ const struct dpif_flow_stats *stats)
+{
+ ovs_mutex_lock(&group->stats_mutex);
+ group->packet_count += stats->n_packets;
+ group->byte_count += stats->n_bytes;
+ if (bucket) {
+ bucket->stats.packet_count += stats->n_packets;
+ bucket->stats.byte_count += stats->n_bytes;
+ } else { /* Credit to all buckets */
+ const struct list *buckets;
+
+ group_dpif_get_buckets(group, &buckets);
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ bucket->stats.packet_count += stats->n_packets;
+ bucket->stats.byte_count += stats->n_bytes;
+ }
}
+ ovs_mutex_unlock(&group->stats_mutex);
}
static enum ofperr
return 0;
}
-static void
-group_destruct__(struct group_dpif *group)
- OVS_REQUIRES(group->stats_mutex)
-{
- free(group->bucket_stats);
- group->bucket_stats = NULL;
-}
-
static void
group_destruct(struct ofgroup *group_)
{
struct group_dpif *group = group_dpif_cast(group_);
- ovs_mutex_lock(&group->stats_mutex);
- group_destruct__(group);
- ovs_mutex_unlock(&group->stats_mutex);
ovs_mutex_destroy(&group->stats_mutex);
}
group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
{
struct group_dpif *group = group_dpif_cast(group_);
+ struct ofputil_bucket *bucket;
+ const struct list *buckets;
+ struct bucket_counter *bucket_stats;
ovs_mutex_lock(&group->stats_mutex);
ogs->packet_count = group->packet_count;
ogs->byte_count = group->byte_count;
- memcpy(ogs->bucket_stats, group->bucket_stats,
- group->up.n_buckets * sizeof *group->bucket_stats);
+
+ group_dpif_get_buckets(group, &buckets);
+ bucket_stats = ogs->bucket_stats;
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ bucket_stats->packet_count = bucket->stats.packet_count;
+ bucket_stats->byte_count = bucket->stats.byte_count;
+ bucket_stats++;
+ }
ovs_mutex_unlock(&group->stats_mutex);
return 0;
struct rule_dpif *no_packet_in_rule,
struct rule_dpif **rule, bool take_ref);
+void group_dpif_credit_stats(struct group_dpif *,
+ struct ofputil_bucket *,
+ const struct dpif_flow_stats *);
bool group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
struct group_dpif **group);