]> git.proxmox.com Git - pve-kernel.git/commitdiff
rebase patches on top of Ubuntu-6.5.0-27.27
authorThomas Lamprecht <t.lamprecht@proxmox.com>
Mon, 11 Mar 2024 12:38:05 +0000 (13:38 +0100)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Mon, 11 Mar 2024 12:38:05 +0000 (13:38 +0100)
(generated with debian/scripts/import-upstream-tag)

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
patches/kernel/0004-pci-Enable-overrides-for-missing-ACS-capabilities-4..patch
patches/kernel/0006-net-core-downgrade-unregister_netdevice-refcount-lea.patch
patches/kernel/0009-allow-opt-in-to-allow-pass-through-on-broken-hardwar.patch
patches/kernel/0014-Revert-scsi-aacraid-Reply-queue-mapping-to-CPUs-base.patch [deleted file]
patches/kernel/0014-ext4-fallback-to-complex-scan-if-aligned-scan-doesn-.patch [new file with mode: 0644]
patches/kernel/0015-ext4-fallback-to-complex-scan-if-aligned-scan-doesn-.patch [deleted file]
patches/kernel/0015-sched-core-Drop-spinlocks-on-contention-iff-kernel-i.patch [new file with mode: 0644]
patches/kernel/0018-sched-core-Drop-spinlocks-on-contention-iff-kernel-i.patch [deleted file]

index 1f8e72216b0e7f6a53506c81a07439dc883d5523..fbd8493de161bd5a633cda3b5e80a986a1c98190 100644 (file)
@@ -75,7 +75,7 @@ index 90ddf08e8409..eedfabda597f 100644
                                Safety option to keep boot IRQs enabled. This
                                should never be necessary.
 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
-index b3a55f2cab46..baef591d10f6 100644
+index 47e8ee1f6429..6c7351c444b0 100644
 --- a/drivers/pci/quirks.c
 +++ b/drivers/pci/quirks.c
 @@ -287,6 +287,106 @@ static int __init pci_apply_final_quirks(void)
@@ -185,7 +185,7 @@ index b3a55f2cab46..baef591d10f6 100644
  /*
   * Decoding should be disabled for a PCI device during BAR sizing to avoid
   * conflict. But doing so may cause problems on host bridge and perhaps other
-@@ -5071,6 +5171,8 @@ static const struct pci_dev_acs_enabled {
+@@ -5075,6 +5175,8 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
        /* APM X-Gene */
        { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
index b66932d03fdcd75cc0baee2fe8cb6e86acffc8e1..b07247200b056c4f6344bee8b6c9b99eb0fa01a7 100644 (file)
@@ -14,10 +14,10 @@ Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
  1 file changed, 1 insertion(+), 1 deletion(-)
 
 diff --git a/net/core/dev.c b/net/core/dev.c
-index 3c5106cd0602..3634982362f7 100644
+index 855173c77581..80b4a639c32c 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -10352,7 +10352,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
+@@ -10355,7 +10355,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
                if (time_after(jiffies, warning_time +
                               READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
                        list_for_each_entry(dev, list, todo_list) {
index d5e9b83ddf08f0cee2222413f15ec0514301d34e..e3cb595636ead97d677293aa81d167cd0f386ad5 100644 (file)
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
  1 file changed, 5 insertions(+), 1 deletion(-)
 
 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
-index 15e2649b25ef..752f77e285f2 100644
+index 8faccfdfe500..2b9ef40799a5 100644
 --- a/drivers/iommu/intel/iommu.c
 +++ b/drivers/iommu/intel/iommu.c
 @@ -298,6 +298,7 @@ static int dmar_map_gfx = 1;
@@ -32,7 +32,7 @@ index 15e2649b25ef..752f77e285f2 100644
                } else {
                        pr_notice("Unknown option - '%s'\n", str);
                }
-@@ -2504,7 +2508,7 @@ static bool device_rmrr_is_relaxable(struct device *dev)
+@@ -2506,7 +2510,7 @@ static bool device_rmrr_is_relaxable(struct device *dev)
                return false;
  
        pdev = to_pci_dev(dev);
diff --git a/patches/kernel/0014-Revert-scsi-aacraid-Reply-queue-mapping-to-CPUs-base.patch b/patches/kernel/0014-Revert-scsi-aacraid-Reply-queue-mapping-to-CPUs-base.patch
deleted file mode 100644 (file)
index 16cf427..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: "Martin K. Petersen" <martin.petersen@oracle.com>
-Date: Fri, 8 Dec 2023 12:09:38 -0500
-Subject: [PATCH] Revert "scsi: aacraid: Reply queue mapping to CPUs based on
- IRQ affinity"
-
-This reverts commit 9dc704dcc09eae7d21b5da0615eb2ed79278f63e.
-
-Several reports have been made indicating that this commit caused
-hangs. Numerous attempts at root causing and fixing the issue have
-been unsuccessful so let's revert for now.
-
-Link: https://bugzilla.kernel.org/show_bug.cgi?id=217599
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
----
- drivers/scsi/aacraid/aacraid.h |  1 -
- drivers/scsi/aacraid/commsup.c |  6 +-----
- drivers/scsi/aacraid/linit.c   | 14 --------------
- drivers/scsi/aacraid/src.c     | 25 ++-----------------------
- 4 files changed, 3 insertions(+), 43 deletions(-)
-
-diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
-index 73b6ac0c01f5..7d5a155073c6 100644
---- a/drivers/scsi/aacraid/aacraid.h
-+++ b/drivers/scsi/aacraid/aacraid.h
-@@ -1678,7 +1678,6 @@ struct aac_dev
-       u32                     handle_pci_error;
-       bool                    init_reset;
-       u8                      soft_reset_support;
--      u8                      use_map_queue;
- };
- #define aac_adapter_interrupt(dev) \
-diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
-index 013a9a334972..25cee03d7f97 100644
---- a/drivers/scsi/aacraid/commsup.c
-+++ b/drivers/scsi/aacraid/commsup.c
-@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
- struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
- {
-       struct fib *fibptr;
--      u32 blk_tag;
--      int i;
--      blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
--      i = blk_mq_unique_tag_to_tag(blk_tag);
--      fibptr = &dev->fibs[i];
-+      fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
-       /*
-        *      Null out fields that depend on being zero at the start of
-        *      each I/O
-diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
-index c4a36c0be527..68f4dbcfff49 100644
---- a/drivers/scsi/aacraid/linit.c
-+++ b/drivers/scsi/aacraid/linit.c
-@@ -19,7 +19,6 @@
- #include <linux/compat.h>
- #include <linux/blkdev.h>
--#include <linux/blk-mq-pci.h>
- #include <linux/completion.h>
- #include <linux/init.h>
- #include <linux/interrupt.h>
-@@ -505,15 +504,6 @@ static int aac_slave_configure(struct scsi_device *sdev)
-       return 0;
- }
--static void aac_map_queues(struct Scsi_Host *shost)
--{
--      struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
--
--      blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
--                            aac->pdev, 0);
--      aac->use_map_queue = true;
--}
--
- /**
-  *    aac_change_queue_depth          -       alter queue depths
-  *    @sdev:  SCSI device we are considering
-@@ -1498,7 +1488,6 @@ static const struct scsi_host_template aac_driver_template = {
-       .bios_param                     = aac_biosparm,
-       .shost_groups                   = aac_host_groups,
-       .slave_configure                = aac_slave_configure,
--      .map_queues                     = aac_map_queues,
-       .change_queue_depth             = aac_change_queue_depth,
-       .sdev_groups                    = aac_dev_groups,
-       .eh_abort_handler               = aac_eh_abort,
-@@ -1786,8 +1775,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
-       shost->max_lun = AAC_MAX_LUN;
-       pci_set_drvdata(pdev, shost);
--      shost->nr_hw_queues = aac->max_msix;
--      shost->host_tagset = 1;
-       error = scsi_add_host(shost, &pdev->dev);
-       if (error)
-@@ -1919,7 +1906,6 @@ static void aac_remove_one(struct pci_dev *pdev)
-       struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
-       aac_cancel_rescan_worker(aac);
--      aac->use_map_queue = false;
-       scsi_remove_host(shost);
-       __aac_shutdown(aac);
-diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
-index 61949f374188..11ef58204e96 100644
---- a/drivers/scsi/aacraid/src.c
-+++ b/drivers/scsi/aacraid/src.c
-@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
- #endif
-       u16 vector_no;
--      struct scsi_cmnd *scmd;
--      u32 blk_tag;
--      struct Scsi_Host *shost = dev->scsi_host_ptr;
--      struct blk_mq_queue_map *qmap;
-       atomic_inc(&q->numpending);
-@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
-               if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
-                       && dev->sa_firmware)
-                       vector_no = aac_get_vector(dev);
--              else {
--                      if (!fib->vector_no || !fib->callback_data) {
--                              if (shost && dev->use_map_queue) {
--                                      qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
--                                      vector_no = qmap->mq_map[raw_smp_processor_id()];
--                              }
--                              /*
--                               *      We hardcode the vector_no for
--                               *      reserved commands as a valid shost is
--                               *      absent during the init
--                               */
--                              else
--                                      vector_no = 0;
--                      } else {
--                              scmd = (struct scsi_cmnd *)fib->callback_data;
--                              blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
--                              vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
--                      }
--              }
-+              else
-+                      vector_no = fib->vector_no;
-               if (native_hba) {
-                       if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
diff --git a/patches/kernel/0014-ext4-fallback-to-complex-scan-if-aligned-scan-doesn-.patch b/patches/kernel/0014-ext4-fallback-to-complex-scan-if-aligned-scan-doesn-.patch
new file mode 100644 (file)
index 0000000..4c1ab23
--- /dev/null
@@ -0,0 +1,56 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Date: Fri, 15 Dec 2023 16:49:50 +0530
+Subject: [PATCH] ext4: fallback to complex scan if aligned scan doesn't work
+
+Currently in case the goal length is a multiple of stripe size we use
+ext4_mb_scan_aligned() to find the stripe size aligned physical blocks.
+In case we are not able to find any, we again go back to calling
+ext4_mb_choose_next_group() to search for a different suitable block
+group. However, since the linear search always begins from the start,
+most of the times we end up with the same BG and the cycle continues.
+
+With large fliesystems, the CPU can be stuck in this loop for hours
+which can slow down the whole system. Hence, until we figure out a
+better way to continue the search (rather than starting from beginning)
+in ext4_mb_choose_next_group(), lets just fallback to
+ext4_mb_complex_scan_group() in case aligned scan fails, as it is much
+more likely to find the needed blocks.
+
+Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+---
+ fs/ext4/mballoc.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 5246b408cf0c..e3b942664842 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2894,14 +2894,19 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+                       ac->ac_groups_scanned++;
+                       if (cr == CR_POWER2_ALIGNED)
+                               ext4_mb_simple_scan_group(ac, &e4b);
+-                      else if ((cr == CR_GOAL_LEN_FAST ||
+-                               cr == CR_BEST_AVAIL_LEN) &&
+-                               sbi->s_stripe &&
+-                               !(ac->ac_g_ex.fe_len %
+-                               EXT4_B2C(sbi, sbi->s_stripe)))
+-                              ext4_mb_scan_aligned(ac, &e4b);
+-                      else
+-                              ext4_mb_complex_scan_group(ac, &e4b);
++                      else {
++                              bool is_stripe_aligned = sbi->s_stripe &&
++                                      !(ac->ac_g_ex.fe_len %
++                                        EXT4_B2C(sbi, sbi->s_stripe));
++
++                              if ((cr == CR_GOAL_LEN_FAST ||
++                                   cr == CR_BEST_AVAIL_LEN) &&
++                                  is_stripe_aligned)
++                                      ext4_mb_scan_aligned(ac, &e4b);
++
++                              if (ac->ac_status == AC_STATUS_CONTINUE)
++                                      ext4_mb_complex_scan_group(ac, &e4b);
++                      }
+                       ext4_unlock_group(sb, group);
+                       ext4_mb_unload_buddy(&e4b);
diff --git a/patches/kernel/0015-ext4-fallback-to-complex-scan-if-aligned-scan-doesn-.patch b/patches/kernel/0015-ext4-fallback-to-complex-scan-if-aligned-scan-doesn-.patch
deleted file mode 100644 (file)
index 49edbb2..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Ojaswin Mujoo <ojaswin@linux.ibm.com>
-Date: Fri, 15 Dec 2023 16:49:50 +0530
-Subject: [PATCH] ext4: fallback to complex scan if aligned scan doesn't work
-
-Currently in case the goal length is a multiple of stripe size we use
-ext4_mb_scan_aligned() to find the stripe size aligned physical blocks.
-In case we are not able to find any, we again go back to calling
-ext4_mb_choose_next_group() to search for a different suitable block
-group. However, since the linear search always begins from the start,
-most of the times we end up with the same BG and the cycle continues.
-
-With large fliesystems, the CPU can be stuck in this loop for hours
-which can slow down the whole system. Hence, until we figure out a
-better way to continue the search (rather than starting from beginning)
-in ext4_mb_choose_next_group(), lets just fallback to
-ext4_mb_complex_scan_group() in case aligned scan fails, as it is much
-more likely to find the needed blocks.
-
-Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
----
- fs/ext4/mballoc.c | 21 +++++++++++++--------
- 1 file changed, 13 insertions(+), 8 deletions(-)
-
-diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 3711be697a0a..0c81836f092e 100644
---- a/fs/ext4/mballoc.c
-+++ b/fs/ext4/mballoc.c
-@@ -2894,14 +2894,19 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
-                       ac->ac_groups_scanned++;
-                       if (cr == CR_POWER2_ALIGNED)
-                               ext4_mb_simple_scan_group(ac, &e4b);
--                      else if ((cr == CR_GOAL_LEN_FAST ||
--                               cr == CR_BEST_AVAIL_LEN) &&
--                               sbi->s_stripe &&
--                               !(ac->ac_g_ex.fe_len %
--                               EXT4_B2C(sbi, sbi->s_stripe)))
--                              ext4_mb_scan_aligned(ac, &e4b);
--                      else
--                              ext4_mb_complex_scan_group(ac, &e4b);
-+                      else {
-+                              bool is_stripe_aligned = sbi->s_stripe &&
-+                                      !(ac->ac_g_ex.fe_len %
-+                                        EXT4_B2C(sbi, sbi->s_stripe));
-+
-+                              if ((cr == CR_GOAL_LEN_FAST ||
-+                                   cr == CR_BEST_AVAIL_LEN) &&
-+                                  is_stripe_aligned)
-+                                      ext4_mb_scan_aligned(ac, &e4b);
-+
-+                              if (ac->ac_status == AC_STATUS_CONTINUE)
-+                                      ext4_mb_complex_scan_group(ac, &e4b);
-+                      }
-                       ext4_unlock_group(sb, group);
-                       ext4_mb_unload_buddy(&e4b);
diff --git a/patches/kernel/0015-sched-core-Drop-spinlocks-on-contention-iff-kernel-i.patch b/patches/kernel/0015-sched-core-Drop-spinlocks-on-contention-iff-kernel-i.patch
new file mode 100644 (file)
index 0000000..2ea4049
--- /dev/null
@@ -0,0 +1,75 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 10 Jan 2024 13:47:23 -0800
+Subject: [PATCH] sched/core: Drop spinlocks on contention iff kernel is
+ preemptible
+
+Use preempt_model_preemptible() to detect a preemptible kernel when
+deciding whether or not to reschedule in order to drop a contended
+spinlock or rwlock.  Because PREEMPT_DYNAMIC selects PREEMPTION, kernels
+built with PREEMPT_DYNAMIC=y will yield contended locks even if the live
+preemption model is "none" or "voluntary".  In short, make kernels with
+dynamically selected models behave the same as kernels with statically
+selected models.
+
+Somewhat counter-intuitively, NOT yielding a lock can provide better
+latency for the relevant tasks/processes.  E.g. KVM x86's mmu_lock, a
+rwlock, is often contended between an invalidation event (takes mmu_lock
+for write) and a vCPU servicing a guest page fault (takes mmu_lock for
+read).  For _some_ setups, letting the invalidation task complete even
+if there is mmu_lock contention provides lower latency for *all* tasks,
+i.e. the invalidation completes sooner *and* the vCPU services the guest
+page fault sooner.
+
+But even KVM's mmu_lock behavior isn't uniform, e.g. the "best" behavior
+can vary depending on the host VMM, the guest workload, the number of
+vCPUs, the number of pCPUs in the host, why there is lock contention, etc.
+
+In other words, simply deleting the CONFIG_PREEMPTION guard (or doing the
+opposite and removing contention yielding entirely) needs to come with a
+big pile of data proving that changing the status quo is a net positive.
+
+Cc: Valentin Schneider <valentin.schneider@arm.com>
+Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Marco Elver <elver@google.com>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: David Matlack <dmatlack@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+---
+ include/linux/sched.h | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 609bde814cb0..995e6699e026 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2211,11 +2211,10 @@ static inline bool preempt_model_preemptible(void)
+  */
+ static inline int spin_needbreak(spinlock_t *lock)
+ {
+-#ifdef CONFIG_PREEMPTION
++      if (!preempt_model_preemptible())
++              return 0;
++
+       return spin_is_contended(lock);
+-#else
+-      return 0;
+-#endif
+ }
+ /*
+@@ -2228,11 +2227,10 @@ static inline int spin_needbreak(spinlock_t *lock)
+  */
+ static inline int rwlock_needbreak(rwlock_t *lock)
+ {
+-#ifdef CONFIG_PREEMPTION
++      if (!preempt_model_preemptible())
++              return 0;
++
+       return rwlock_is_contended(lock);
+-#else
+-      return 0;
+-#endif
+ }
+ static __always_inline bool need_resched(void)
diff --git a/patches/kernel/0018-sched-core-Drop-spinlocks-on-contention-iff-kernel-i.patch b/patches/kernel/0018-sched-core-Drop-spinlocks-on-contention-iff-kernel-i.patch
deleted file mode 100644 (file)
index 932e2f2..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-From 39f2bfe0177d3f56c9feac4e70424e4952949e2a Mon Sep 17 00:00:00 2001
-From: Sean Christopherson <seanjc@google.com>
-Date: Wed, 10 Jan 2024 13:47:23 -0800
-Subject: [PATCH] sched/core: Drop spinlocks on contention iff kernel is
- preemptible
-
-Use preempt_model_preemptible() to detect a preemptible kernel when
-deciding whether or not to reschedule in order to drop a contended
-spinlock or rwlock.  Because PREEMPT_DYNAMIC selects PREEMPTION, kernels
-built with PREEMPT_DYNAMIC=y will yield contended locks even if the live
-preemption model is "none" or "voluntary".  In short, make kernels with
-dynamically selected models behave the same as kernels with statically
-selected models.
-
-Somewhat counter-intuitively, NOT yielding a lock can provide better
-latency for the relevant tasks/processes.  E.g. KVM x86's mmu_lock, a
-rwlock, is often contended between an invalidation event (takes mmu_lock
-for write) and a vCPU servicing a guest page fault (takes mmu_lock for
-read).  For _some_ setups, letting the invalidation task complete even
-if there is mmu_lock contention provides lower latency for *all* tasks,
-i.e. the invalidation completes sooner *and* the vCPU services the guest
-page fault sooner.
-
-But even KVM's mmu_lock behavior isn't uniform, e.g. the "best" behavior
-can vary depending on the host VMM, the guest workload, the number of
-vCPUs, the number of pCPUs in the host, why there is lock contention, etc.
-
-In other words, simply deleting the CONFIG_PREEMPTION guard (or doing the
-opposite and removing contention yielding entirely) needs to come with a
-big pile of data proving that changing the status quo is a net positive.
-
-Cc: Valentin Schneider <valentin.schneider@arm.com>
-Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: Marco Elver <elver@google.com>
-Cc: Frederic Weisbecker <frederic@kernel.org>
-Cc: David Matlack <dmatlack@google.com>
-Signed-off-by: Sean Christopherson <seanjc@google.com>
----
- include/linux/sched.h | 14 ++++++--------
- 1 file changed, 6 insertions(+), 8 deletions(-)
-
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 292c31697248..a274bc85f222 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -2234,11 +2234,10 @@ static inline bool preempt_model_preemptible(void)
-  */
- static inline int spin_needbreak(spinlock_t *lock)
- {
--#ifdef CONFIG_PREEMPTION
-+      if (!preempt_model_preemptible())
-+              return 0;
-+
-       return spin_is_contended(lock);
--#else
--      return 0;
--#endif
- }
- /*
-@@ -2251,11 +2250,10 @@ static inline int spin_needbreak(spinlock_t *lock)
-  */
- static inline int rwlock_needbreak(rwlock_t *lock)
- {
--#ifdef CONFIG_PREEMPTION
-+      if (!preempt_model_preemptible())
-+              return 0;
-+
-       return rwlock_is_contended(lock);
--#else
--      return 0;
--#endif
- }
- static __always_inline bool need_resched(void)
--- 
-2.39.2
-