]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types
authorAhmed S. Darwish <a.darwish@linutronix.de>
Sat, 16 Oct 2021 08:49:09 +0000 (10:49 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 18 Oct 2021 11:54:41 +0000 (12:54 +0100)
The only factor differentiating per-CPU bstats data type (struct
gnet_stats_basic_cpu) from the packed non-per-CPU one (struct
gnet_stats_basic_packed) was a u64_stats sync point inside the former.
The two data types are now equivalent: earlier commits added a u64_stats
sync point to the latter.

Combine both data types into "struct gnet_stats_basic_sync". This
eliminates redundancy and simplifies the bstats read/write APIs.

Use u64_stats_t for bstats "packets" and "bytes" data types. On 64-bit
architectures, u64_stats sync points do not use sequence counter
protection.

Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
30 files changed:
drivers/net/ethernet/netronome/nfp/abm/qdisc.c
include/net/act_api.h
include/net/gen_stats.h
include/net/netfilter/xt_rateest.h
include/net/pkt_cls.h
include/net/sch_generic.h
net/core/gen_estimator.c
net/core/gen_stats.c
net/netfilter/xt_RATEEST.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_ife.c
net/sched/act_mpls.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_ets.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_qfq.c

index 2473fb5f75e5e5d4cac8ffd92e1d3784243c1bbb..2a5cc64227e9f0e42b289eb365aa5a11add1c4fe 100644 (file)
@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
 static void
 nfp_abm_stats_calculate(struct nfp_alink_stats *new,
                        struct nfp_alink_stats *old,
-                       struct gnet_stats_basic_packed *bstats,
+                       struct gnet_stats_basic_sync *bstats,
                        struct gnet_stats_queue *qstats)
 {
        _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
index f19f7f4a463cd9f84d1d8c132a749544243bab2d..b5b624c7e488836b7031a4c948f75038371d5cec 100644 (file)
@@ -30,13 +30,13 @@ struct tc_action {
        atomic_t                        tcfa_bindcnt;
        int                             tcfa_action;
        struct tcf_t                    tcfa_tm;
-       struct gnet_stats_basic_packed  tcfa_bstats;
-       struct gnet_stats_basic_packed  tcfa_bstats_hw;
+       struct gnet_stats_basic_sync    tcfa_bstats;
+       struct gnet_stats_basic_sync    tcfa_bstats_hw;
        struct gnet_stats_queue         tcfa_qstats;
        struct net_rate_estimator __rcu *tcfa_rate_est;
        spinlock_t                      tcfa_lock;
-       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
-       struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
+       struct gnet_stats_basic_sync __percpu *cpu_bstats;
+       struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct tc_cookie        __rcu *act_cookie;
        struct tcf_chain        __rcu *goto_chain;
@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
                                            struct sk_buff *skb)
 {
        if (likely(a->cpu_bstats)) {
-               bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+               bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
                return;
        }
        spin_lock(&a->tcfa_lock);
index 304d792f797762e0f4d18c15760b6bb68ef79613..52b87588f467bd79e58d34dad4f147e868919f2f 100644 (file)
@@ -7,15 +7,17 @@
 #include <linux/rtnetlink.h>
 #include <linux/pkt_sched.h>
 
-/* Note: this used to be in include/uapi/linux/gen_stats.h */
-struct gnet_stats_basic_packed {
-       __u64   bytes;
-       __u64   packets;
-       struct u64_stats_sync syncp;
-};
-
-struct gnet_stats_basic_cpu {
-       struct gnet_stats_basic_packed bstats;
+/* Throughput stats.
+ * Must be initialized beforehand with gnet_stats_basic_sync_init().
+ *
+ * If no reads can ever occur parallel to writes (e.g. stack-allocated
+ * bstats), then the internal stat values can be written to and read
+ * from directly. Otherwise, use _bstats_set/update() for writes and
+ * gnet_stats_add_basic() for reads.
+ */
+struct gnet_stats_basic_sync {
+       u64_stats_t bytes;
+       u64_stats_t packets;
        struct u64_stats_sync syncp;
 } __aligned(2 * sizeof(u64));
 
@@ -35,7 +37,7 @@ struct gnet_dump {
        struct tc_stats   tc_stats;
 };
 
-void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b);
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
                          struct gnet_dump *d, int padattr);
 
@@ -46,16 +48,16 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
 
 int gnet_stats_copy_basic(const seqcount_t *running,
                          struct gnet_dump *d,
-                         struct gnet_stats_basic_cpu __percpu *cpu,
-                         struct gnet_stats_basic_packed *b);
+                         struct gnet_stats_basic_sync __percpu *cpu,
+                         struct gnet_stats_basic_sync *b);
 void gnet_stats_add_basic(const seqcount_t *running,
-                         struct gnet_stats_basic_packed *bstats,
-                         struct gnet_stats_basic_cpu __percpu *cpu,
-                         struct gnet_stats_basic_packed *b);
+                         struct gnet_stats_basic_sync *bstats,
+                         struct gnet_stats_basic_sync __percpu *cpu,
+                         struct gnet_stats_basic_sync *b);
 int gnet_stats_copy_basic_hw(const seqcount_t *running,
                             struct gnet_dump *d,
-                            struct gnet_stats_basic_cpu __percpu *cpu,
-                            struct gnet_stats_basic_packed *b);
+                            struct gnet_stats_basic_sync __percpu *cpu,
+                            struct gnet_stats_basic_sync *b);
 int gnet_stats_copy_rate_est(struct gnet_dump *d,
                             struct net_rate_estimator __rcu **ptr);
 int gnet_stats_copy_queue(struct gnet_dump *d,
@@ -68,14 +70,14 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
 
 int gnet_stats_finish_copy(struct gnet_dump *d);
 
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-                     struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+                     struct gnet_stats_basic_sync __percpu *cpu_bstats,
                      struct net_rate_estimator __rcu **rate_est,
                      spinlock_t *lock,
                      seqcount_t *running, struct nlattr *opt);
 void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-                         struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+                         struct gnet_stats_basic_sync __percpu *cpu_bstats,
                          struct net_rate_estimator __rcu **ptr,
                          spinlock_t *lock,
                          seqcount_t *running, struct nlattr *opt);
index 832ab69efda57c619a8b1742179b6f98fcb1eef6..4c3809e141f4ffe09b7116aec322e5e76960b853 100644 (file)
@@ -6,7 +6,7 @@
 
 struct xt_rateest {
        /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
-       struct gnet_stats_basic_packed  bstats;
+       struct gnet_stats_basic_sync    bstats;
        spinlock_t                      lock;
 
 
index 83a6d07921806e9588c03e41c885ec2eed9aac72..4a5833108083f1c4cb0da0fea570c8ab516a9705 100644 (file)
@@ -765,7 +765,7 @@ struct tc_cookie {
 };
 
 struct tc_qopt_offload_stats {
-       struct gnet_stats_basic_packed *bstats;
+       struct gnet_stats_basic_sync *bstats;
        struct gnet_stats_queue *qstats;
 };
 
@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
 };
 
 struct tc_gred_qopt_offload_stats {
-       struct gnet_stats_basic_packed bstats[MAX_DPs];
+       struct gnet_stats_basic_sync bstats[MAX_DPs];
        struct gnet_stats_queue qstats[MAX_DPs];
        struct red_stats *xstats[MAX_DPs];
 };
index d7746aea3cecf1703422d8ca439b5f4b6396e838..7882e3aa6448222af4def4abd985fe4a86d8237c 100644 (file)
@@ -97,7 +97,7 @@ struct Qdisc {
        struct netdev_queue     *dev_queue;
 
        struct net_rate_estimator __rcu *rate_est;
-       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_basic_sync __percpu *cpu_bstats;
        struct gnet_stats_queue __percpu *cpu_qstats;
        int                     pad;
        refcount_t              refcnt;
@@ -107,7 +107,7 @@ struct Qdisc {
         */
        struct sk_buff_head     gso_skb ____cacheline_aligned_in_smp;
        struct qdisc_skb_head   q;
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
        seqcount_t              running;
        struct gnet_stats_queue qstats;
        unsigned long           state;
@@ -849,16 +849,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        return sch->enqueue(skb, sch, to_free);
 }
 
-static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
                                  __u64 bytes, __u32 packets)
 {
        u64_stats_update_begin(&bstats->syncp);
-       bstats->bytes += bytes;
-       bstats->packets += packets;
+       u64_stats_add(&bstats->bytes, bytes);
+       u64_stats_add(&bstats->packets, packets);
        u64_stats_update_end(&bstats->syncp);
 }
 
-static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
                                 const struct sk_buff *skb)
 {
        _bstats_update(bstats,
@@ -866,26 +866,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
                       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
 }
 
-static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
-                                     __u64 bytes, __u32 packets)
-{
-       u64_stats_update_begin(&bstats->syncp);
-       _bstats_update(&bstats->bstats, bytes, packets);
-       u64_stats_update_end(&bstats->syncp);
-}
-
-static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
-                                    const struct sk_buff *skb)
-{
-       u64_stats_update_begin(&bstats->syncp);
-       bstats_update(&bstats->bstats, skb);
-       u64_stats_update_end(&bstats->syncp);
-}
-
 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
                                           const struct sk_buff *skb)
 {
-       bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
 }
 
 static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -1317,7 +1301,7 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
 struct mini_Qdisc {
        struct tcf_proto *filter_list;
        struct tcf_block *block;
-       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_basic_sync __percpu *cpu_bstats;
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct rcu_head rcu;
 };
@@ -1325,7 +1309,7 @@ struct mini_Qdisc {
 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
                                                const struct sk_buff *skb)
 {
-       bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
 }
 
 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
index 64978e77368f4220f83fc9352a9f555883f3da00..a73ad0bf324c4dd69223fe27fa425294cd373df6 100644 (file)
  */
 
 struct net_rate_estimator {
-       struct gnet_stats_basic_packed  *bstats;
+       struct gnet_stats_basic_sync    *bstats;
        spinlock_t              *stats_lock;
        seqcount_t              *running;
-       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_basic_sync __percpu *cpu_bstats;
        u8                      ewma_log;
        u8                      intvl_log; /* period : (250ms << intvl_log) */
 
@@ -60,9 +60,9 @@ struct net_rate_estimator {
 };
 
 static void est_fetch_counters(struct net_rate_estimator *e,
-                              struct gnet_stats_basic_packed *b)
+                              struct gnet_stats_basic_sync *b)
 {
-       gnet_stats_basic_packed_init(b);
+       gnet_stats_basic_sync_init(b);
        if (e->stats_lock)
                spin_lock(e->stats_lock);
 
@@ -76,14 +76,18 @@ static void est_fetch_counters(struct net_rate_estimator *e,
 static void est_timer(struct timer_list *t)
 {
        struct net_rate_estimator *est = from_timer(est, t, timer);
-       struct gnet_stats_basic_packed b;
+       struct gnet_stats_basic_sync b;
+       u64 b_bytes, b_packets;
        u64 rate, brate;
 
        est_fetch_counters(est, &b);
-       brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+       b_bytes = u64_stats_read(&b.bytes);
+       b_packets = u64_stats_read(&b.packets);
+
+       brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
        brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
 
-       rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+       rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
        rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
 
        write_seqcount_begin(&est->seq);
@@ -91,8 +95,8 @@ static void est_timer(struct timer_list *t)
        est->avpps += rate;
        write_seqcount_end(&est->seq);
 
-       est->last_bytes = b.bytes;
-       est->last_packets = b.packets;
+       est->last_bytes = b_bytes;
+       est->last_packets = b_packets;
 
        est->next_jiffies += ((HZ/4) << est->intvl_log);
 
@@ -121,8 +125,8 @@ static void est_timer(struct timer_list *t)
  * Returns 0 on success or a negative error code.
  *
  */
-int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-                     struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
+                     struct gnet_stats_basic_sync __percpu *cpu_bstats,
                      struct net_rate_estimator __rcu **rate_est,
                      spinlock_t *lock,
                      seqcount_t *running,
@@ -130,7 +134,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 {
        struct gnet_estimator *parm = nla_data(opt);
        struct net_rate_estimator *old, *est;
-       struct gnet_stats_basic_packed b;
+       struct gnet_stats_basic_sync b;
        int intvl_log;
 
        if (nla_len(opt) < sizeof(*parm))
@@ -164,8 +168,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        est_fetch_counters(est, &b);
        if (lock)
                local_bh_enable();
-       est->last_bytes = b.bytes;
-       est->last_packets = b.packets;
+       est->last_bytes = u64_stats_read(&b.bytes);
+       est->last_packets = u64_stats_read(&b.packets);
 
        if (lock)
                spin_lock_bh(lock);
@@ -222,8 +226,8 @@ EXPORT_SYMBOL(gen_kill_estimator);
  *
  * Returns 0 on success or a negative error code.
  */
-int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-                         struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
+                         struct gnet_stats_basic_sync __percpu *cpu_bstats,
                          struct net_rate_estimator __rcu **rate_est,
                          spinlock_t *lock,
                          seqcount_t *running, struct nlattr *opt)
index 69576972a25f0b4752e08679f7f1991f3fec9266..5f57f761def69bdb76535d4ffbae302d7e89fea5 100644 (file)
@@ -115,29 +115,29 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
 EXPORT_SYMBOL(gnet_stats_start_copy);
 
 /* Must not be inlined, due to u64_stats seqcount_t lockdep key */
-void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b)
+void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
 {
-       b->bytes = 0;
-       b->packets = 0;
+       u64_stats_set(&b->bytes, 0);
+       u64_stats_set(&b->packets, 0);
        u64_stats_init(&b->syncp);
 }
-EXPORT_SYMBOL(gnet_stats_basic_packed_init);
+EXPORT_SYMBOL(gnet_stats_basic_sync_init);
 
-static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
-                                    struct gnet_stats_basic_cpu __percpu *cpu)
+static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
+                                    struct gnet_stats_basic_sync __percpu *cpu)
 {
        u64 t_bytes = 0, t_packets = 0;
        int i;
 
        for_each_possible_cpu(i) {
-               struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
+               struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
                unsigned int start;
                u64 bytes, packets;
 
                do {
                        start = u64_stats_fetch_begin_irq(&bcpu->syncp);
-                       bytes = bcpu->bstats.bytes;
-                       packets = bcpu->bstats.packets;
+                       bytes = u64_stats_read(&bcpu->bytes);
+                       packets = u64_stats_read(&bcpu->packets);
                } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
 
                t_bytes += bytes;
@@ -147,9 +147,9 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
 }
 
 void gnet_stats_add_basic(const seqcount_t *running,
-                         struct gnet_stats_basic_packed *bstats,
-                         struct gnet_stats_basic_cpu __percpu *cpu,
-                         struct gnet_stats_basic_packed *b)
+                         struct gnet_stats_basic_sync *bstats,
+                         struct gnet_stats_basic_sync __percpu *cpu,
+                         struct gnet_stats_basic_sync *b)
 {
        unsigned int seq;
        u64 bytes = 0;
@@ -162,8 +162,8 @@ void gnet_stats_add_basic(const seqcount_t *running,
        do {
                if (running)
                        seq = read_seqcount_begin(running);
-               bytes = b->bytes;
-               packets = b->packets;
+               bytes = u64_stats_read(&b->bytes);
+               packets = u64_stats_read(&b->packets);
        } while (running && read_seqcount_retry(running, seq));
 
        _bstats_update(bstats, bytes, packets);
@@ -173,18 +173,22 @@ EXPORT_SYMBOL(gnet_stats_add_basic);
 static int
 ___gnet_stats_copy_basic(const seqcount_t *running,
                         struct gnet_dump *d,
-                        struct gnet_stats_basic_cpu __percpu *cpu,
-                        struct gnet_stats_basic_packed *b,
+                        struct gnet_stats_basic_sync __percpu *cpu,
+                        struct gnet_stats_basic_sync *b,
                         int type)
 {
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
+       u64 bstats_bytes, bstats_packets;
 
-       gnet_stats_basic_packed_init(&bstats);
+       gnet_stats_basic_sync_init(&bstats);
        gnet_stats_add_basic(running, &bstats, cpu, b);
 
+       bstats_bytes = u64_stats_read(&bstats.bytes);
+       bstats_packets = u64_stats_read(&bstats.packets);
+
        if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
-               d->tc_stats.bytes = bstats.bytes;
-               d->tc_stats.packets = bstats.packets;
+               d->tc_stats.bytes = bstats_bytes;
+               d->tc_stats.packets = bstats_packets;
        }
 
        if (d->tail) {
@@ -192,14 +196,14 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
                int res;
 
                memset(&sb, 0, sizeof(sb));
-               sb.bytes = bstats.bytes;
-               sb.packets = bstats.packets;
+               sb.bytes = bstats_bytes;
+               sb.packets = bstats_packets;
                res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
-               if (res < 0 || sb.packets == bstats.packets)
+               if (res < 0 || sb.packets == bstats_packets)
                        return res;
                /* emit 64bit stats only if needed */
-               return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets,
-                                      sizeof(bstats.packets), TCA_STATS_PAD);
+               return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
+                                      sizeof(bstats_packets), TCA_STATS_PAD);
        }
        return 0;
 }
@@ -220,8 +224,8 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
 int
 gnet_stats_copy_basic(const seqcount_t *running,
                      struct gnet_dump *d,
-                     struct gnet_stats_basic_cpu __percpu *cpu,
-                     struct gnet_stats_basic_packed *b)
+                     struct gnet_stats_basic_sync __percpu *cpu,
+                     struct gnet_stats_basic_sync *b)
 {
        return ___gnet_stats_copy_basic(running, d, cpu, b,
                                        TCA_STATS_BASIC);
@@ -244,8 +248,8 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
 int
 gnet_stats_copy_basic_hw(const seqcount_t *running,
                         struct gnet_dump *d,
-                        struct gnet_stats_basic_cpu __percpu *cpu,
-                        struct gnet_stats_basic_packed *b)
+                        struct gnet_stats_basic_sync __percpu *cpu,
+                        struct gnet_stats_basic_sync *b)
 {
        return ___gnet_stats_copy_basic(running, d, cpu, b,
                                        TCA_STATS_BASIC_HW);
index d5200725ca62c9dc3141e4cc91fbd44a55ee42fe..8aec1b529364ae1702f38a1ce4dd69c79cd6eb5e 100644 (file)
@@ -94,11 +94,11 @@ static unsigned int
 xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_rateest_target_info *info = par->targinfo;
-       struct gnet_stats_basic_packed *stats = &info->est->bstats;
+       struct gnet_stats_basic_sync *stats = &info->est->bstats;
 
        spin_lock_bh(&info->est->lock);
-       stats->bytes += skb->len;
-       stats->packets++;
+       u64_stats_add(&stats->bytes, skb->len);
+       u64_stats_inc(&stats->packets);
        spin_unlock_bh(&info->est->lock);
 
        return XT_CONTINUE;
@@ -143,7 +143,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
        if (!est)
                goto err1;
 
-       gnet_stats_basic_packed_init(&est->bstats);
+       gnet_stats_basic_sync_init(&est->bstats);
        strlcpy(est->name, info->name, sizeof(est->name));
        spin_lock_init(&est->lock);
        est->refcnt             = 1;
index 0302dad42df143ef77e09c3d89b22f1f6c069204..585829ffa0c4c2bfeaf88daef53bc68e027bd046 100644 (file)
@@ -480,18 +480,18 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                atomic_set(&p->tcfa_bindcnt, 1);
 
        if (cpustats) {
-               p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
                if (!p->cpu_bstats)
                        goto err1;
-               p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
                if (!p->cpu_bstats_hw)
                        goto err2;
                p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
                if (!p->cpu_qstats)
                        goto err3;
        }
-       gnet_stats_basic_packed_init(&p->tcfa_bstats);
-       gnet_stats_basic_packed_init(&p->tcfa_bstats_hw);
+       gnet_stats_basic_sync_init(&p->tcfa_bstats);
+       gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
        spin_lock_init(&p->tcfa_lock);
        p->tcfa_index = index;
        p->tcfa_tm.install = jiffies;
@@ -1128,13 +1128,13 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
                             u64 drops, bool hw)
 {
        if (a->cpu_bstats) {
-               _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+               _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
 
                this_cpu_ptr(a->cpu_qstats)->drops += drops;
 
                if (hw)
-                       _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
-                                          bytes, packets);
+                       _bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
+                                      bytes, packets);
                return;
        }
 
index 5c36013339e1138086d918479328f65c06925cff..f2bf896331a596a52366bd33a42041affc036c50 100644 (file)
@@ -41,7 +41,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
        int action, filter_res;
 
        tcf_lastuse_update(&prog->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
 
        filter = rcu_dereference(prog->filter);
        if (at_ingress) {
index 7064a365a1a9834244549b110771d89e1b2895d0..b757f90a2d5892eb23c0e2b8717911dc1bff5e58 100644 (file)
@@ -718,7 +718,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
        u8 *tlv_data;
        u16 metalen;
 
-       bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
        tcf_lastuse_update(&ife->tcf_tm);
 
        if (skb_at_tc_ingress(skb))
@@ -806,7 +806,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
                        exceed_mtu = true;
        }
 
-       bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
        tcf_lastuse_update(&ife->tcf_tm);
 
        if (!metalen) {         /* no metadata to send */
index e4529b428cf44a7c54c119d6ca37422a48e64120..8faa4c58305e3f3cf331acc4b6e67403cfe1932d 100644 (file)
@@ -59,7 +59,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
        int ret, mac_len;
 
        tcf_lastuse_update(&m->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb);
 
        /* Ensure 'data' points at mac_header prior calling mpls manipulating
         * functions.
index 832157a840fc36961345c9374039c49e1dedfd50..c9383805222dfc0c5adba6271afa4dbc881a735a 100644 (file)
@@ -248,7 +248,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
        int ret;
 
        tcf_lastuse_update(&police->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
 
        ret = READ_ONCE(police->tcf_action);
        p = rcu_dereference_bh(police->params);
index 230501eb9e069fbcb8de3889161e0cf9d8326b6b..ce859b0e0deb9d39692b3f6846b875f8f14cfbce 100644 (file)
@@ -163,7 +163,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
        int retval;
 
        tcf_lastuse_update(&s->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb);
        retval = READ_ONCE(s->tcf_action);
 
        psample_group = rcu_dereference_bh(s->psample_group);
index cbbe1861d3a20c263fa6fe01a7113e1812cc7f7b..e617ab4505ca46b8118bd7edce12ccf8427ccdac 100644 (file)
@@ -36,7 +36,8 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
         * then it would look like "hello_3" (without quotes)
         */
        pr_info("simple: %s_%llu\n",
-              (char *)d->tcfd_defdata, d->tcf_bstats.packets);
+               (char *)d->tcfd_defdata,
+               u64_stats_read(&d->tcf_bstats.packets));
        spin_unlock(&d->tcf_lock);
        return d->tcf_action;
 }
index 605418538347482cc27812442df10ef73ce02732..d30ecbfc8f8463d0ae6b3682cf1cd202fe925280 100644 (file)
@@ -31,7 +31,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
        int action;
 
        tcf_lastuse_update(&d->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
 
        params = rcu_dereference_bh(d->params);
        action = READ_ONCE(d->tcf_action);
index ecb9ee6660954c1195cd682032138aae53a51ce8..9b6b52c5e24ec821c65ca8f3fe08bbb1edc74ea6 100644 (file)
@@ -31,7 +31,7 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
        u64 flags;
 
        tcf_lastuse_update(&d->tcf_tm);
-       bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+       bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
 
        action = READ_ONCE(d->tcf_action);
        if (unlikely(action == TC_ACT_SHOT))
index 91820f67275c72d8c8cfeaa68ff8d02e73f2ac15..70f006cbf21260d87d38a0080db379a2a43d503f 100644 (file)
@@ -885,7 +885,7 @@ static void qdisc_offload_graft_root(struct net_device *dev,
 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                         u32 portid, u32 seq, u16 flags, int event)
 {
-       struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+       struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
        struct gnet_stats_queue __percpu *cpu_qstats = NULL;
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
index c8e1771383f9a470e24056d8fbfe9eb561313fbf..fbfe4ce9497b586aadd48e21acc68d4a34750dd4 100644 (file)
@@ -52,7 +52,7 @@ struct atm_flow_data {
        struct atm_qdisc_data   *parent;        /* parent qdisc */
        struct socket           *sock;          /* for closing */
        int                     ref;            /* reference count */
-       struct gnet_stats_basic_packed  bstats;
+       struct gnet_stats_basic_sync    bstats;
        struct gnet_stats_queue qstats;
        struct list_head        list;
        struct atm_flow_data    *excess;        /* flow for excess traffic;
@@ -548,7 +548,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
        pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
        INIT_LIST_HEAD(&p->flows);
        INIT_LIST_HEAD(&p->link.list);
-       gnet_stats_basic_packed_init(&p->link.bstats);
+       gnet_stats_basic_sync_init(&p->link.bstats);
        list_add(&p->link.list, &p->flows);
        p->link.q = qdisc_create_dflt(sch->dev_queue,
                                      &pfifo_qdisc_ops, sch->handle, extack);
index ef9e87175d35c3744004fe30146028547bead701..f0b1282fae111137d1be96595ba8456a684bbee0 100644 (file)
@@ -116,7 +116,7 @@ struct cbq_class {
        long                    avgidle;
        long                    deficit;        /* Saved deficit for WRR */
        psched_time_t           penalized;
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
        struct gnet_stats_queue qstats;
        struct net_rate_estimator __rcu *rate_est;
        struct tc_cbq_xstats    xstats;
@@ -1610,7 +1610,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (cl == NULL)
                goto failure;
 
-       gnet_stats_basic_packed_init(&cl->bstats);
+       gnet_stats_basic_sync_init(&cl->bstats);
        err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
        if (err) {
                kfree(cl);
index 319906e19a6bac87b6251cb7e881d4789838a71f..7243617a3595fdb8643e065ced36761316bf3065 100644 (file)
@@ -19,7 +19,7 @@ struct drr_class {
        struct Qdisc_class_common       common;
        unsigned int                    filter_cnt;
 
-       struct gnet_stats_basic_packed          bstats;
+       struct gnet_stats_basic_sync            bstats;
        struct gnet_stats_queue         qstats;
        struct net_rate_estimator __rcu *rate_est;
        struct list_head                alist;
@@ -106,7 +106,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        if (cl == NULL)
                return -ENOBUFS;
 
-       gnet_stats_basic_packed_init(&cl->bstats);
+       gnet_stats_basic_sync_init(&cl->bstats);
        cl->common.classid = classid;
        cl->quantum        = quantum;
        cl->qdisc          = qdisc_create_dflt(sch->dev_queue,
index 83693107371f9b9a9a0303b28ae0fdbf3be1b129..af56d155e7fca7fdae60d0dd9909515d8f754279 100644 (file)
@@ -41,7 +41,7 @@ struct ets_class {
        struct Qdisc *qdisc;
        u32 quantum;
        u32 deficit;
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
        struct gnet_stats_queue qstats;
 };
 
@@ -689,7 +689,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
                q->classes[i].qdisc = NULL;
                q->classes[i].quantum = 0;
                q->classes[i].deficit = 0;
-               gnet_stats_basic_packed_init(&q->classes[i].bstats);
+               gnet_stats_basic_sync_init(&q->classes[i].bstats);
                memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
        }
        return 0;
index ef27ff3ddee4fda6289f3d94c4aa94fd67428650..989186e7f1a02645c4a1dc6bc0d54d348e0d10a4 100644 (file)
@@ -892,12 +892,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        __skb_queue_head_init(&sch->gso_skb);
        __skb_queue_head_init(&sch->skb_bad_txq);
        qdisc_skb_head_init(&sch->q);
-       gnet_stats_basic_packed_init(&sch->bstats);
+       gnet_stats_basic_sync_init(&sch->bstats);
        spin_lock_init(&sch->q.lock);
 
        if (ops->static_flags & TCQ_F_CPUSTATS) {
                sch->cpu_bstats =
-                       netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+                       netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
                if (!sch->cpu_bstats)
                        goto errout1;
 
index 02b03d6d24ea4c6da32697ee9e2d1718c63d913a..72de08ef8335e9ea0f936775c016ffd1979d5286 100644 (file)
@@ -366,7 +366,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
        hw_stats->parent = sch->parent;
 
        for (i = 0; i < MAX_DPs; i++) {
-               gnet_stats_basic_packed_init(&hw_stats->stats.bstats[i]);
+               gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
                if (table->tab[i])
                        hw_stats->stats.xstats[i] = &table->tab[i]->stats;
        }
@@ -378,12 +378,12 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
        for (i = 0; i < MAX_DPs; i++) {
                if (!table->tab[i])
                        continue;
-               table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
-               table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
+               table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
+               table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
                table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
 
-               bytes += hw_stats->stats.bstats[i].bytes;
-               packets += hw_stats->stats.bstats[i].packets;
+               bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
+               packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
                sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
                sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
                sch->qstats.drops += hw_stats->stats.qstats[i].drops;
index ff6ff54806fcd521129091097f64c6c0925dbbd5..181c2905ff9833e8c1a12adcbf2980072a1ccc1a 100644 (file)
@@ -111,7 +111,7 @@ enum hfsc_class_flags {
 struct hfsc_class {
        struct Qdisc_class_common cl_common;
 
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
        struct gnet_stats_queue qstats;
        struct net_rate_estimator __rcu *rate_est;
        struct tcf_proto __rcu *filter_list; /* filter list */
@@ -1406,7 +1406,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
        if (err)
                return err;
 
-       gnet_stats_basic_packed_init(&q->root.bstats);
+       gnet_stats_basic_sync_init(&q->root.bstats);
        q->root.cl_common.classid = sch->handle;
        q->root.sched   = q;
        q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
index 324ecfdf842a30038857a957744b84d7e95303a9..adceb9e210f617aa77d26260723d912a8c520f3f 100644 (file)
@@ -113,8 +113,8 @@ struct htb_class {
        /*
         * Written often fields
         */
-       struct gnet_stats_basic_packed bstats;
-       struct gnet_stats_basic_packed bstats_bias;
+       struct gnet_stats_basic_sync bstats;
+       struct gnet_stats_basic_sync bstats_bias;
        struct tc_htb_xstats    xstats; /* our special stats */
 
        /* token bucket parameters */
@@ -1312,7 +1312,7 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
        struct htb_class *c;
        unsigned int i;
 
-       gnet_stats_basic_packed_init(&cl->bstats);
+       gnet_stats_basic_sync_init(&cl->bstats);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
                hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
@@ -1324,11 +1324,11 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
                        if (p != cl)
                                continue;
 
-                       bytes += c->bstats_bias.bytes;
-                       packets += c->bstats_bias.packets;
+                       bytes += u64_stats_read(&c->bstats_bias.bytes);
+                       packets += u64_stats_read(&c->bstats_bias.packets);
                        if (c->level == 0) {
-                               bytes += c->leaf.q->bstats.bytes;
-                               packets += c->leaf.q->bstats.packets;
+                               bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
+                               packets += u64_stats_read(&c->leaf.q->bstats.packets);
                        }
                }
        }
@@ -1359,10 +1359,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
                        if (cl->leaf.q)
                                cl->bstats = cl->leaf.q->bstats;
                        else
-                               gnet_stats_basic_packed_init(&cl->bstats);
+                               gnet_stats_basic_sync_init(&cl->bstats);
                        _bstats_update(&cl->bstats,
-                                      cl->bstats_bias.bytes,
-                                      cl->bstats_bias.packets);
+                                      u64_stats_read(&cl->bstats_bias.bytes),
+                                      u64_stats_read(&cl->bstats_bias.packets));
                } else {
                        htb_offload_aggregate_stats(q, cl);
                }
@@ -1582,8 +1582,8 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
 
        if (cl->parent) {
                _bstats_update(&cl->parent->bstats_bias,
-                              q->bstats.bytes,
-                              q->bstats.packets);
+                              u64_stats_read(&q->bstats.bytes),
+                              u64_stats_read(&q->bstats.packets));
        }
 
        offload_opt = (struct tc_htb_qopt_offload) {
@@ -1853,8 +1853,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                if (!cl)
                        goto failure;
 
-               gnet_stats_basic_packed_init(&cl->bstats);
-               gnet_stats_basic_packed_init(&cl->bstats_bias);
+               gnet_stats_basic_sync_init(&cl->bstats);
+               gnet_stats_basic_sync_init(&cl->bstats_bias);
 
                err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
                if (err) {
@@ -1930,8 +1930,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                                goto err_kill_estimator;
                        }
                        _bstats_update(&parent->bstats_bias,
-                                      old_q->bstats.bytes,
-                                      old_q->bstats.packets);
+                                      u64_stats_read(&old_q->bstats.bytes),
+                                      u64_stats_read(&old_q->bstats.packets));
                        qdisc_put(old_q);
                }
                new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
index 704e14a58f09d5e7176d4a6d359cb1d99e15fc40..cedd0b3ef9cfb80fc1000da2e1bd4c3dbbe96541 100644 (file)
@@ -132,7 +132,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
        unsigned int ntx;
 
        sch->q.qlen = 0;
-       gnet_stats_basic_packed_init(&sch->bstats);
+       gnet_stats_basic_sync_init(&sch->bstats);
        memset(&sch->qstats, 0, sizeof(sch->qstats));
 
        /* MQ supports lockless qdiscs. However, statistics accounting needs
index fe6b4a178fc9fff8031350393e1c27735a4a5bd6..3f7f756f92ca3bdef2ee8d75be4c09accda86c1c 100644 (file)
@@ -390,7 +390,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
        unsigned int ntx, tc;
 
        sch->q.qlen = 0;
-       gnet_stats_basic_packed_init(&sch->bstats);
+       gnet_stats_basic_sync_init(&sch->bstats);
        memset(&sch->qstats, 0, sizeof(sch->qstats));
 
        /* MQ supports lockless qdiscs. However, statistics accounting needs
@@ -500,11 +500,11 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                int i;
                __u32 qlen;
                struct gnet_stats_queue qstats = {0};
-               struct gnet_stats_basic_packed bstats;
+               struct gnet_stats_basic_sync bstats;
                struct net_device *dev = qdisc_dev(sch);
                struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
 
-               gnet_stats_basic_packed_init(&bstats);
+               gnet_stats_basic_sync_init(&bstats);
                /* Drop lock here it will be reclaimed before touching
                 * statistics this is required because the d->lock we
                 * hold here is the look on dev_queue->qdisc_sleeping
index bea68c91027a31e3e3019fc9d9353cb39ca029e0..a35200f591a2d65035e695331b64ac11fdf12fdd 100644 (file)
@@ -131,7 +131,7 @@ struct qfq_class {
 
        unsigned int filter_cnt;
 
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
        struct gnet_stats_queue qstats;
        struct net_rate_estimator __rcu *rate_est;
        struct Qdisc *qdisc;
@@ -465,7 +465,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        if (cl == NULL)
                return -ENOBUFS;
 
-       gnet_stats_basic_packed_init(&cl->bstats);
+       gnet_stats_basic_sync_init(&cl->bstats);
        cl->common.classid = classid;
        cl->deficit = lmax;