1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <linux/kfence.h>
35 #include <asm/asm-offsets.h>
39 #include <asm/mmu_context.h>
40 #include <asm/facility.h>
42 #include "../kernel/entry.h"
44 #define __FAIL_ADDR_MASK -4096L
45 #define __SUBCODE_MASK 0x0600
46 #define __PF_RES_FIELD 0x8000000000000000ULL
48 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
49 #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
50 #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
51 #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
52 #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
60 static unsigned long store_indication __read_mostly
;
62 static int __init
fault_init(void)
64 if (test_facility(75))
65 store_indication
= 0xc00;
68 early_initcall(fault_init
);
71 * Find out which address space caused the exception.
73 static enum fault_type
get_fault_type(struct pt_regs
*regs
)
75 unsigned long trans_exc_code
;
77 trans_exc_code
= regs
->int_parm_long
& 3;
78 if (likely(trans_exc_code
== 0)) {
79 /* primary space exception */
82 if (!IS_ENABLED(CONFIG_PGSTE
))
84 if (test_pt_regs_flag(regs
, PIF_GUEST_FAULT
))
88 if (trans_exc_code
== 2)
90 if (trans_exc_code
== 1) {
91 /* access register mode, not used in the kernel */
94 /* home space exception -> access via kernel ASCE */
98 static int bad_address(void *p
)
102 return get_kernel_nofault(dummy
, (unsigned long *)p
);
105 static void dump_pagetable(unsigned long asce
, unsigned long address
)
107 unsigned long *table
= __va(asce
& _ASCE_ORIGIN
);
109 pr_alert("AS:%016lx ", asce
);
110 switch (asce
& _ASCE_TYPE_MASK
) {
111 case _ASCE_TYPE_REGION1
:
112 table
+= (address
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
113 if (bad_address(table
))
115 pr_cont("R1:%016lx ", *table
);
116 if (*table
& _REGION_ENTRY_INVALID
)
118 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
120 case _ASCE_TYPE_REGION2
:
121 table
+= (address
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
122 if (bad_address(table
))
124 pr_cont("R2:%016lx ", *table
);
125 if (*table
& _REGION_ENTRY_INVALID
)
127 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
129 case _ASCE_TYPE_REGION3
:
130 table
+= (address
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
131 if (bad_address(table
))
133 pr_cont("R3:%016lx ", *table
);
134 if (*table
& (_REGION_ENTRY_INVALID
| _REGION3_ENTRY_LARGE
))
136 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
138 case _ASCE_TYPE_SEGMENT
:
139 table
+= (address
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
140 if (bad_address(table
))
142 pr_cont("S:%016lx ", *table
);
143 if (*table
& (_SEGMENT_ENTRY_INVALID
| _SEGMENT_ENTRY_LARGE
))
145 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
147 table
+= (address
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
148 if (bad_address(table
))
150 pr_cont("P:%016lx ", *table
);
158 static void dump_fault_info(struct pt_regs
*regs
)
162 pr_alert("Failing address: %016lx TEID: %016lx\n",
163 regs
->int_parm_long
& __FAIL_ADDR_MASK
, regs
->int_parm_long
);
164 pr_alert("Fault in ");
165 switch (regs
->int_parm_long
& 3) {
167 pr_cont("home space ");
170 pr_cont("secondary space ");
173 pr_cont("access register ");
176 pr_cont("primary space ");
179 pr_cont("mode while using ");
180 switch (get_fault_type(regs
)) {
182 asce
= S390_lowcore
.user_asce
;
186 asce
= ((struct gmap
*) S390_lowcore
.gmap
)->asce
;
190 asce
= S390_lowcore
.kernel_asce
;
197 dump_pagetable(asce
, regs
->int_parm_long
& __FAIL_ADDR_MASK
);
200 int show_unhandled_signals
= 1;
202 void report_user_fault(struct pt_regs
*regs
, long signr
, int is_mm_fault
)
204 if ((task_pid_nr(current
) > 1) && !show_unhandled_signals
)
206 if (!unhandled_signal(current
, signr
))
208 if (!printk_ratelimit())
210 printk(KERN_ALERT
"User process fault: interruption code %04x ilc:%d ",
211 regs
->int_code
& 0xffff, regs
->int_code
>> 17);
212 print_vma_addr(KERN_CONT
"in ", regs
->psw
.addr
);
213 printk(KERN_CONT
"\n");
215 dump_fault_info(regs
);
220 * Send SIGSEGV to task. This is an external routine
221 * to keep the stack usage of do_page_fault small.
223 static noinline
void do_sigsegv(struct pt_regs
*regs
, int si_code
)
225 report_user_fault(regs
, SIGSEGV
, 1);
226 force_sig_fault(SIGSEGV
, si_code
,
227 (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
));
230 const struct exception_table_entry
*s390_search_extables(unsigned long addr
)
232 const struct exception_table_entry
*fixup
;
234 fixup
= search_extable(__start_amode31_ex_table
,
235 __stop_amode31_ex_table
- __start_amode31_ex_table
,
238 fixup
= search_exception_tables(addr
);
242 static noinline
void do_no_context(struct pt_regs
*regs
)
244 const struct exception_table_entry
*fixup
;
246 /* Are we prepared to handle this kernel fault? */
247 fixup
= s390_search_extables(regs
->psw
.addr
);
248 if (fixup
&& ex_handle(fixup
, regs
))
252 * Oops. The kernel tried to access some bad page. We'll have to
253 * terminate things with extreme prejudice.
255 if (get_fault_type(regs
) == KERNEL_FAULT
)
256 printk(KERN_ALERT
"Unable to handle kernel pointer dereference"
257 " in virtual kernel address space\n");
259 printk(KERN_ALERT
"Unable to handle kernel paging request"
260 " in virtual user address space\n");
261 dump_fault_info(regs
);
266 static noinline
void do_low_address(struct pt_regs
*regs
)
268 /* Low-address protection hit in kernel mode means
269 NULL pointer write access in kernel mode. */
270 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
271 /* Low-address protection hit in user mode 'cannot happen'. */
272 die (regs
, "Low-address protection");
279 static noinline
void do_sigbus(struct pt_regs
*regs
)
282 * Send a sigbus, regardless of whether we were in kernel
285 force_sig_fault(SIGBUS
, BUS_ADRERR
,
286 (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
));
289 static noinline
void do_fault_error(struct pt_regs
*regs
, int access
,
295 case VM_FAULT_BADACCESS
:
296 case VM_FAULT_BADMAP
:
297 /* Bad memory access. Check if it is kernel or user space. */
298 if (user_mode(regs
)) {
299 /* User mode accesses just cause a SIGSEGV */
300 si_code
= (fault
== VM_FAULT_BADMAP
) ?
301 SEGV_MAPERR
: SEGV_ACCERR
;
302 do_sigsegv(regs
, si_code
);
306 case VM_FAULT_BADCONTEXT
:
307 case VM_FAULT_PFAULT
:
310 case VM_FAULT_SIGNAL
:
311 if (!user_mode(regs
))
314 default: /* fault & VM_FAULT_ERROR */
315 if (fault
& VM_FAULT_OOM
) {
316 if (!user_mode(regs
))
319 pagefault_out_of_memory();
320 } else if (fault
& VM_FAULT_SIGSEGV
) {
321 /* Kernel mode? Handle exceptions or die */
322 if (!user_mode(regs
))
325 do_sigsegv(regs
, SEGV_MAPERR
);
326 } else if (fault
& VM_FAULT_SIGBUS
) {
327 /* Kernel mode? Handle exceptions or die */
328 if (!user_mode(regs
))
339 * This routine handles page faults. It determines the address,
340 * and the problem, and then passes it off to one of the appropriate
343 * interruption code (int_code):
344 * 04 Protection -> Write-Protection (suppression)
345 * 10 Segment translation -> Not present (nullification)
346 * 11 Page translation -> Not present (nullification)
347 * 3b Region third trans. -> Not present (nullification)
349 static inline vm_fault_t
do_exception(struct pt_regs
*regs
, int access
)
352 struct task_struct
*tsk
;
353 struct mm_struct
*mm
;
354 struct vm_area_struct
*vma
;
355 enum fault_type type
;
356 unsigned long trans_exc_code
;
357 unsigned long address
;
364 * The instruction that caused the program check has
365 * been nullified. Don't signal single step via SIGTRAP.
367 clear_thread_flag(TIF_PER_TRAP
);
369 if (kprobe_page_fault(regs
, 14))
373 trans_exc_code
= regs
->int_parm_long
;
374 address
= trans_exc_code
& __FAIL_ADDR_MASK
;
375 is_write
= (trans_exc_code
& store_indication
) == 0x400;
378 * Verify that the fault happened in user space, that
379 * we are not in an interrupt and that there is a
382 fault
= VM_FAULT_BADCONTEXT
;
383 type
= get_fault_type(regs
);
386 if (kfence_handle_page_fault(address
, is_write
, regs
))
391 if (faulthandler_disabled() || !mm
)
396 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
397 flags
= FAULT_FLAG_DEFAULT
;
399 flags
|= FAULT_FLAG_USER
;
400 if (access
== VM_WRITE
|| is_write
)
401 flags
|= FAULT_FLAG_WRITE
;
405 if (IS_ENABLED(CONFIG_PGSTE
) && type
== GMAP_FAULT
) {
406 gmap
= (struct gmap
*) S390_lowcore
.gmap
;
407 current
->thread
.gmap_addr
= address
;
408 current
->thread
.gmap_write_flag
= !!(flags
& FAULT_FLAG_WRITE
);
409 current
->thread
.gmap_int_code
= regs
->int_code
& 0xffff;
410 address
= __gmap_translate(gmap
, address
);
411 if (address
== -EFAULT
) {
412 fault
= VM_FAULT_BADMAP
;
415 if (gmap
->pfault_enabled
)
416 flags
|= FAULT_FLAG_RETRY_NOWAIT
;
420 fault
= VM_FAULT_BADMAP
;
421 vma
= find_vma(mm
, address
);
425 if (unlikely(vma
->vm_start
> address
)) {
426 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
428 if (expand_stack(vma
, address
))
433 * Ok, we have a good vm_area for this memory access, so
436 fault
= VM_FAULT_BADACCESS
;
437 if (unlikely(!(vma
->vm_flags
& access
)))
440 if (is_vm_hugetlb_page(vma
))
441 address
&= HPAGE_MASK
;
443 * If for any reason at all we couldn't handle the fault,
444 * make sure we exit gracefully rather than endlessly redo
447 fault
= handle_mm_fault(vma
, address
, flags
, regs
);
448 if (fault_signal_pending(fault
, regs
)) {
449 fault
= VM_FAULT_SIGNAL
;
450 if (flags
& FAULT_FLAG_RETRY_NOWAIT
)
454 if (unlikely(fault
& VM_FAULT_ERROR
))
457 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
458 if (fault
& VM_FAULT_RETRY
) {
459 if (IS_ENABLED(CONFIG_PGSTE
) && gmap
&&
460 (flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
461 /* FAULT_FLAG_RETRY_NOWAIT has been set,
462 * mmap_lock has not been released */
463 current
->thread
.gmap_pfault
= 1;
464 fault
= VM_FAULT_PFAULT
;
467 flags
&= ~FAULT_FLAG_RETRY_NOWAIT
;
468 flags
|= FAULT_FLAG_TRIED
;
473 if (IS_ENABLED(CONFIG_PGSTE
) && gmap
) {
474 address
= __gmap_link(gmap
, current
->thread
.gmap_addr
,
476 if (address
== -EFAULT
) {
477 fault
= VM_FAULT_BADMAP
;
480 if (address
== -ENOMEM
) {
481 fault
= VM_FAULT_OOM
;
487 mmap_read_unlock(mm
);
492 void do_protection_exception(struct pt_regs
*regs
)
494 unsigned long trans_exc_code
;
498 trans_exc_code
= regs
->int_parm_long
;
500 * Protection exceptions are suppressing, decrement psw address.
501 * The exception to this rule are aborted transactions, for these
502 * the PSW already points to the correct location.
504 if (!(regs
->int_code
& 0x200))
505 regs
->psw
.addr
= __rewind_psw(regs
->psw
, regs
->int_code
>> 16);
507 * Check for low-address protection. This needs to be treated
508 * as a special case because the translation exception code
509 * field is not guaranteed to contain valid data in this case.
511 if (unlikely(!(trans_exc_code
& 4))) {
512 do_low_address(regs
);
515 if (unlikely(MACHINE_HAS_NX
&& (trans_exc_code
& 0x80))) {
516 regs
->int_parm_long
= (trans_exc_code
& ~PAGE_MASK
) |
517 (regs
->psw
.addr
& PAGE_MASK
);
519 fault
= VM_FAULT_BADACCESS
;
522 fault
= do_exception(regs
, access
);
525 do_fault_error(regs
, access
, fault
);
527 NOKPROBE_SYMBOL(do_protection_exception
);
529 void do_dat_exception(struct pt_regs
*regs
)
534 access
= VM_ACCESS_FLAGS
;
535 fault
= do_exception(regs
, access
);
537 do_fault_error(regs
, access
, fault
);
539 NOKPROBE_SYMBOL(do_dat_exception
);
543 * 'pfault' pseudo page faults routines.
545 static int pfault_disable
;
547 static int __init
nopfault(char *str
)
553 __setup("nopfault", nopfault
);
555 struct pfault_refbk
{
564 } __attribute__ ((packed
, aligned(8)));
566 static struct pfault_refbk pfault_init_refbk
= {
571 .refgaddr
= __LC_LPP
,
572 .refselmk
= 1ULL << 48,
573 .refcmpmk
= 1ULL << 48,
574 .reserved
= __PF_RES_FIELD
577 int pfault_init(void)
583 diag_stat_inc(DIAG_STAT_X258
);
585 " diag %1,%0,0x258\n"
591 : "a" (&pfault_init_refbk
), "m" (pfault_init_refbk
) : "cc");
595 static struct pfault_refbk pfault_fini_refbk
= {
602 void pfault_fini(void)
607 diag_stat_inc(DIAG_STAT_X258
);
612 : : "a" (&pfault_fini_refbk
), "m" (pfault_fini_refbk
) : "cc");
615 static DEFINE_SPINLOCK(pfault_lock
);
616 static LIST_HEAD(pfault_list
);
618 #define PF_COMPLETE 0x0080
621 * The mechanism of our pfault code: if Linux is running as guest, runs a user
622 * space process and the user space process accesses a page that the host has
623 * paged out we get a pfault interrupt.
625 * This allows us, within the guest, to schedule a different process. Without
626 * this mechanism the host would have to suspend the whole virtual cpu until
627 * the page has been paged in.
629 * So when we get such an interrupt then we set the state of the current task
630 * to uninterruptible and also set the need_resched flag. Both happens within
631 * interrupt context(!). If we later on want to return to user space we
632 * recognize the need_resched flag and then call schedule(). It's not very
633 * obvious how this works...
635 * Of course we have a lot of additional fun with the completion interrupt (->
636 * host signals that a page of a process has been paged in and the process can
637 * continue to run). This interrupt can arrive on any cpu and, since we have
638 * virtual cpus, actually appear before the interrupt that signals that a page
641 static void pfault_interrupt(struct ext_code ext_code
,
642 unsigned int param32
, unsigned long param64
)
644 struct task_struct
*tsk
;
649 * Get the external interruption subcode & pfault initial/completion
650 * signal bit. VM stores this in the 'cpu address' field associated
651 * with the external interrupt.
653 subcode
= ext_code
.subcode
;
654 if ((subcode
& 0xff00) != __SUBCODE_MASK
)
656 inc_irq_stat(IRQEXT_PFL
);
657 /* Get the token (= pid of the affected task). */
658 pid
= param64
& LPP_PID_MASK
;
660 tsk
= find_task_by_pid_ns(pid
, &init_pid_ns
);
662 get_task_struct(tsk
);
666 spin_lock(&pfault_lock
);
667 if (subcode
& PF_COMPLETE
) {
668 /* signal bit is set -> a page has been swapped in by VM */
669 if (tsk
->thread
.pfault_wait
== 1) {
670 /* Initial interrupt was faster than the completion
671 * interrupt. pfault_wait is valid. Set pfault_wait
672 * back to zero and wake up the process. This can
673 * safely be done because the task is still sleeping
674 * and can't produce new pfaults. */
675 tsk
->thread
.pfault_wait
= 0;
676 list_del(&tsk
->thread
.list
);
677 wake_up_process(tsk
);
678 put_task_struct(tsk
);
680 /* Completion interrupt was faster than initial
681 * interrupt. Set pfault_wait to -1 so the initial
682 * interrupt doesn't put the task to sleep.
683 * If the task is not running, ignore the completion
684 * interrupt since it must be a leftover of a PFAULT
685 * CANCEL operation which didn't remove all pending
686 * completion interrupts. */
687 if (task_is_running(tsk
))
688 tsk
->thread
.pfault_wait
= -1;
691 /* signal bit not set -> a real page is missing. */
692 if (WARN_ON_ONCE(tsk
!= current
))
694 if (tsk
->thread
.pfault_wait
== 1) {
695 /* Already on the list with a reference: put to sleep */
697 } else if (tsk
->thread
.pfault_wait
== -1) {
698 /* Completion interrupt was faster than the initial
699 * interrupt (pfault_wait == -1). Set pfault_wait
700 * back to zero and exit. */
701 tsk
->thread
.pfault_wait
= 0;
703 /* Initial interrupt arrived before completion
704 * interrupt. Let the task sleep.
705 * An extra task reference is needed since a different
706 * cpu may set the task state to TASK_RUNNING again
707 * before the scheduler is reached. */
708 get_task_struct(tsk
);
709 tsk
->thread
.pfault_wait
= 1;
710 list_add(&tsk
->thread
.list
, &pfault_list
);
712 /* Since this must be a userspace fault, there
713 * is no kernel task state to trample. Rely on the
714 * return to userspace schedule() to block. */
715 __set_current_state(TASK_UNINTERRUPTIBLE
);
716 set_tsk_need_resched(tsk
);
717 set_preempt_need_resched();
721 spin_unlock(&pfault_lock
);
722 put_task_struct(tsk
);
725 static int pfault_cpu_dead(unsigned int cpu
)
727 struct thread_struct
*thread
, *next
;
728 struct task_struct
*tsk
;
730 spin_lock_irq(&pfault_lock
);
731 list_for_each_entry_safe(thread
, next
, &pfault_list
, list
) {
732 thread
->pfault_wait
= 0;
733 list_del(&thread
->list
);
734 tsk
= container_of(thread
, struct task_struct
, thread
);
735 wake_up_process(tsk
);
736 put_task_struct(tsk
);
738 spin_unlock_irq(&pfault_lock
);
742 static int __init
pfault_irq_init(void)
746 rc
= register_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
749 rc
= pfault_init() == 0 ? 0 : -EOPNOTSUPP
;
752 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
753 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD
, "s390/pfault:dead",
754 NULL
, pfault_cpu_dead
);
758 unregister_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
763 early_initcall(pfault_irq_init
);
765 #endif /* CONFIG_PFAULT */
767 #if IS_ENABLED(CONFIG_PGSTE)
769 void do_secure_storage_access(struct pt_regs
*regs
)
771 unsigned long addr
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
772 struct vm_area_struct
*vma
;
773 struct mm_struct
*mm
;
778 * bit 61 tells us if the address is valid, if it's not we
779 * have a major problem and should stop the kernel or send a
780 * SIGSEGV to the process. Unfortunately bit 61 is not
781 * reliable without the misc UV feature so we need to check
784 if (test_bit_inv(BIT_UV_FEAT_MISC
, &uv_info
.uv_feature_indications
) &&
785 !test_bit_inv(61, ®s
->int_parm_long
)) {
787 * When this happens, userspace did something that it
788 * was not supposed to do, e.g. branching into secure
789 * memory. Trigger a segmentation fault.
791 if (user_mode(regs
)) {
792 send_sig(SIGSEGV
, current
, 0);
797 * The kernel should never run into this case and we
798 * have no way out of this situation.
800 panic("Unexpected PGM 0x3d with TEID bit 61=0");
803 switch (get_fault_type(regs
)) {
807 vma
= find_vma(mm
, addr
);
809 mmap_read_unlock(mm
);
810 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
813 page
= follow_page(vma
, addr
, FOLL_WRITE
| FOLL_GET
);
814 if (IS_ERR_OR_NULL(page
)) {
815 mmap_read_unlock(mm
);
818 if (arch_make_page_accessible(page
))
819 send_sig(SIGSEGV
, current
, 0);
821 mmap_read_unlock(mm
);
824 page
= phys_to_page(addr
);
825 if (unlikely(!try_get_page(page
)))
827 rc
= arch_make_page_accessible(page
);
834 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
838 NOKPROBE_SYMBOL(do_secure_storage_access
);
840 void do_non_secure_storage_access(struct pt_regs
*regs
)
842 unsigned long gaddr
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
843 struct gmap
*gmap
= (struct gmap
*)S390_lowcore
.gmap
;
845 if (get_fault_type(regs
) != GMAP_FAULT
) {
846 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
851 if (gmap_convert_to_secure(gmap
, gaddr
) == -EINVAL
)
852 send_sig(SIGSEGV
, current
, 0);
854 NOKPROBE_SYMBOL(do_non_secure_storage_access
);
856 void do_secure_storage_violation(struct pt_regs
*regs
)
859 * Either KVM messed up the secure guest mapping or the same
860 * page is mapped into multiple secure guests.
862 * This exception is only triggered when a guest 2 is running
863 * and can therefore never occur in kernel context.
865 printk_ratelimited(KERN_WARNING
866 "Secure storage violation in task: %s, pid %d\n",
867 current
->comm
, current
->pid
);
868 send_sig(SIGSEGV
, current
, 0);
871 #endif /* CONFIG_PGSTE */