]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
net-memcg: pass in gfp_t mask to mem_cgroup_charge_skmem()
authorWei Wang <weiwan@google.com>
Tue, 17 Aug 2021 19:40:03 +0000 (12:40 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 18 Aug 2021 10:39:44 +0000 (11:39 +0100)
Add gfp_t mask as an input parameter to mem_cgroup_charge_skmem(),
to give more control to the networking stack and enable it to change
memcg charging behavior. In the future, the networking stack may decide
to avoid oom-kills when fallbacks are more appropriate.

One behavior change in mem_cgroup_charge_skmem() by this patch is to
avoid force charging by default and let the caller decide when and if
force charging is needed through the presence or absence of
__GFP_NOFAIL.

Signed-off-by: Wei Wang <weiwan@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/memcontrol.h
include/net/sock.h
mm/memcontrol.c
net/core/sock.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp_output.c

index bfe5c486f4add865bcef9ed13f087dab3eb0eba8..f0ee30881ca93adeb66cd2c4ed246156aab98cb9 100644 (file)
@@ -1581,7 +1581,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
 #endif /* CONFIG_CGROUP_WRITEBACK */
 
 struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+                            gfp_t gfp_mask);
 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
 #ifdef CONFIG_MEMCG
 extern struct static_key_false memcg_sockets_enabled_key;
index 6e761451c9278abc46a1c20a25d0c19e2886a222..95b25777b53ec8a3e0bb82532193b5237b80d511 100644 (file)
@@ -2400,6 +2400,11 @@ static inline gfp_t gfp_any(void)
        return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
 }
 
+static inline gfp_t gfp_memcg_charge(void)
+{
+       return in_softirq() ? GFP_NOWAIT : GFP_KERNEL;
+}
+
 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
 {
        return noblock ? 0 : sk->sk_rcvtimeo;
index 8ef06f9e0db1f4549548d5e6531ac2927dd5b17d..be585ceaba98433491b5a5a565e3c48a5723faa2 100644 (file)
@@ -7048,14 +7048,14 @@ void mem_cgroup_sk_free(struct sock *sk)
  * mem_cgroup_charge_skmem - charge socket memory
  * @memcg: memcg to charge
  * @nr_pages: number of pages to charge
+ * @gfp_mask: reclaim mode
  *
  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
- * @memcg's configured limit, %false if the charge had to be forced.
+ * @memcg's configured limit, %false if it doesn't.
  */
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+                            gfp_t gfp_mask)
 {
-       gfp_t gfp_mask = GFP_KERNEL;
-
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
                struct page_counter *fail;
 
@@ -7063,21 +7063,19 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
                        memcg->tcpmem_pressure = 0;
                        return true;
                }
-               page_counter_charge(&memcg->tcpmem, nr_pages);
                memcg->tcpmem_pressure = 1;
+               if (gfp_mask & __GFP_NOFAIL) {
+                       page_counter_charge(&memcg->tcpmem, nr_pages);
+                       return true;
+               }
                return false;
        }
 
-       /* Don't block in the packet receive path */
-       if (in_softirq())
-               gfp_mask = GFP_NOWAIT;
-
-       mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
-
-       if (try_charge(memcg, gfp_mask, nr_pages) == 0)
+       if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
+               mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
                return true;
+       }
 
-       try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
        return false;
 }
 
index aada649e07e8dc8bb8f74f02c61f21704566cda1..950f1e70dbf5d5c231bb70b706aecc0f35e0a495 100644 (file)
@@ -2728,10 +2728,12 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 {
        struct proto *prot = sk->sk_prot;
        long allocated = sk_memory_allocated_add(sk, amt);
+       bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
        bool charged = true;
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
-           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
+       if (memcg_charge &&
+           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                                               gfp_memcg_charge())))
                goto suppress_allocation;
 
        /* Under limit. */
@@ -2785,8 +2787,14 @@ suppress_allocation:
                /* Fail only if socket is _under_ its sndbuf.
                 * In this case we cannot block, so that we have to fail.
                 */
-               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
+               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
+                       /* Force charge with __GFP_NOFAIL */
+                       if (memcg_charge && !charged) {
+                               mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                                       gfp_memcg_charge() | __GFP_NOFAIL);
+                       }
                        return 1;
+               }
        }
 
        if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
@@ -2794,7 +2802,7 @@ suppress_allocation:
 
        sk_memory_allocated_sub(sk, amt);
 
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+       if (memcg_charge && charged)
                mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
 
        return 0;
index 754013fa393bb1da99f34c1bc7ad793d8c54ce4e..f25d02ad4a8af41790261a0c79188111ed408efc 100644 (file)
@@ -534,7 +534,8 @@ out:
                                   atomic_read(&newsk->sk_rmem_alloc));
                mem_cgroup_sk_alloc(newsk);
                if (newsk->sk_memcg && amt)
-                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
+                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
+                                               GFP_KERNEL | __GFP_NOFAIL);
 
                release_sock(newsk);
        }
index 29553fce8502861087830b94cc4fbebfce6e60dc..6d72f3ea48c4ef0d193ec804653e4d4321f3f20a 100644 (file)
@@ -3373,7 +3373,8 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
        sk_memory_allocated_add(sk, amt);
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
-               mem_cgroup_charge_skmem(sk->sk_memcg, amt);
+               mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                                       gfp_memcg_charge() | __GFP_NOFAIL);
 }
 
 /* Send a FIN. The caller locks the socket for us.