1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SN Platform GRU Driver
5 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
7 * This file contains code that handles TLB misses within the GRU.
8 * These misses are reported either via interrupts or user polling of
11 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/spinlock.h>
18 #include <linux/hugetlb.h>
19 #include <linux/device.h>
21 #include <linux/uaccess.h>
22 #include <linux/security.h>
23 #include <linux/prefetch.h>
25 #include "grutables.h"
27 #include "gru_instructions.h"
28 #include <asm/uv/uv_hub.h>
30 /* Return codes for vtop functions */
31 #define VTOP_SUCCESS 0
32 #define VTOP_INVALID -1
37 * Test if a physical address is a valid GRU GSEG address
39 static inline int is_gru_paddr(unsigned long paddr
)
41 return paddr
>= gru_start_paddr
&& paddr
< gru_end_paddr
;
45 * Find the vma of a GRU segment. Caller must hold mmap_lock.
47 struct vm_area_struct
*gru_find_vma(unsigned long vaddr
)
49 struct vm_area_struct
*vma
;
51 vma
= find_vma(current
->mm
, vaddr
);
52 if (vma
&& vma
->vm_start
<= vaddr
&& vma
->vm_ops
== &gru_vm_ops
)
58 * Find and lock the gts that contains the specified user vaddr.
61 * - *gts with the mmap_lock locked for read and the GTS locked.
62 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
65 static struct gru_thread_state
*gru_find_lock_gts(unsigned long vaddr
)
67 struct mm_struct
*mm
= current
->mm
;
68 struct vm_area_struct
*vma
;
69 struct gru_thread_state
*gts
= NULL
;
72 vma
= gru_find_vma(vaddr
);
74 gts
= gru_find_thread_state(vma
, TSID(vaddr
, vma
));
76 mutex_lock(>s
->ts_ctxlock
);
82 static struct gru_thread_state
*gru_alloc_locked_gts(unsigned long vaddr
)
84 struct mm_struct
*mm
= current
->mm
;
85 struct vm_area_struct
*vma
;
86 struct gru_thread_state
*gts
= ERR_PTR(-EINVAL
);
89 vma
= gru_find_vma(vaddr
);
93 gts
= gru_alloc_thread_state(vma
, TSID(vaddr
, vma
));
96 mutex_lock(>s
->ts_ctxlock
);
97 mmap_write_downgrade(mm
);
101 mmap_write_unlock(mm
);
106 * Unlock a GTS that was previously locked with gru_find_lock_gts().
108 static void gru_unlock_gts(struct gru_thread_state
*gts
)
110 mutex_unlock(>s
->ts_ctxlock
);
111 mmap_read_unlock(current
->mm
);
115 * Set a CB.istatus to active using a user virtual address. This must be done
116 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
117 * If the line is evicted, the status may be lost. The in-cache update
118 * is necessary to prevent the user from seeing a stale cb.istatus that will
119 * change as soon as the TFH restart is complete. Races may cause an
120 * occasional failure to clear the cb.istatus, but that is ok.
122 static void gru_cb_set_istatus_active(struct gru_instruction_bits
*cbk
)
125 cbk
->istatus
= CBS_ACTIVE
;
132 * The GRU has an array of fault maps. A map is private to a cpu
133 * Only one cpu will be accessing a cpu's fault map.
135 * This function scans the cpu-private fault map & clears all bits that
136 * are set. The function returns a bitmap that indicates the bits that
137 * were cleared. Note that sense the maps may be updated asynchronously by
138 * the GRU, atomic operations must be used to clear bits.
140 static void get_clear_fault_map(struct gru_state
*gru
,
141 struct gru_tlb_fault_map
*imap
,
142 struct gru_tlb_fault_map
*dmap
)
145 struct gru_tlb_fault_map
*tfm
;
147 tfm
= get_tfm_for_cpu(gru
, gru_cpu_fault_map_id());
148 prefetchw(tfm
); /* Helps on hardware, required for emulator */
149 for (i
= 0; i
< BITS_TO_LONGS(GRU_NUM_CBE
); i
++) {
150 k
= tfm
->fault_bits
[i
];
152 k
= xchg(&tfm
->fault_bits
[i
], 0UL);
153 imap
->fault_bits
[i
] = k
;
154 k
= tfm
->done_bits
[i
];
156 k
= xchg(&tfm
->done_bits
[i
], 0UL);
157 dmap
->fault_bits
[i
] = k
;
161 * Not functionally required but helps performance. (Required
164 gru_flush_cache(tfm
);
168 * Atomic (interrupt context) & non-atomic (user context) functions to
169 * convert a vaddr into a physical address. The size of the page
170 * is returned in pageshift.
174 * 1 - (atomic only) try again in non-atomic context
176 static int non_atomic_pte_lookup(struct vm_area_struct
*vma
,
177 unsigned long vaddr
, int write
,
178 unsigned long *paddr
, int *pageshift
)
182 #ifdef CONFIG_HUGETLB_PAGE
183 *pageshift
= is_vm_hugetlb_page(vma
) ? HPAGE_SHIFT
: PAGE_SHIFT
;
185 *pageshift
= PAGE_SHIFT
;
187 if (get_user_pages(vaddr
, 1, write
? FOLL_WRITE
: 0, &page
, NULL
) <= 0)
189 *paddr
= page_to_phys(page
);
197 * Convert a user virtual address to a physical address
198 * Only supports Intel large pages (2MB only) on x86_64.
199 * ZZZ - hugepage support is incomplete
201 * NOTE: mmap_lock is already held on entry to this function. This
202 * guarantees existence of the page tables.
204 static int atomic_pte_lookup(struct vm_area_struct
*vma
, unsigned long vaddr
,
205 int write
, unsigned long *paddr
, int *pageshift
)
213 pgdp
= pgd_offset(vma
->vm_mm
, vaddr
);
214 if (unlikely(pgd_none(*pgdp
)))
217 p4dp
= p4d_offset(pgdp
, vaddr
);
218 if (unlikely(p4d_none(*p4dp
)))
221 pudp
= pud_offset(p4dp
, vaddr
);
222 if (unlikely(pud_none(*pudp
)))
225 pmdp
= pmd_offset(pudp
, vaddr
);
226 if (unlikely(pmd_none(*pmdp
)))
229 if (unlikely(pmd_large(*pmdp
)))
230 pte
= *(pte_t
*) pmdp
;
233 pte
= *pte_offset_kernel(pmdp
, vaddr
);
235 if (unlikely(!pte_present(pte
) ||
236 (write
&& (!pte_write(pte
) || !pte_dirty(pte
)))))
239 *paddr
= pte_pfn(pte
) << PAGE_SHIFT
;
240 #ifdef CONFIG_HUGETLB_PAGE
241 *pageshift
= is_vm_hugetlb_page(vma
) ? HPAGE_SHIFT
: PAGE_SHIFT
;
243 *pageshift
= PAGE_SHIFT
;
251 static int gru_vtop(struct gru_thread_state
*gts
, unsigned long vaddr
,
252 int write
, int atomic
, unsigned long *gpa
, int *pageshift
)
254 struct mm_struct
*mm
= gts
->ts_mm
;
255 struct vm_area_struct
*vma
;
259 vma
= find_vma(mm
, vaddr
);
264 * Atomic lookup is faster & usually works even if called in non-atomic
267 rmb(); /* Must/check ms_range_active before loading PTEs */
268 ret
= atomic_pte_lookup(vma
, vaddr
, write
, &paddr
, &ps
);
272 if (non_atomic_pte_lookup(vma
, vaddr
, write
, &paddr
, &ps
))
275 if (is_gru_paddr(paddr
))
277 paddr
= paddr
& ~((1UL << ps
) - 1);
278 *gpa
= uv_soc_phys_ram_to_gpa(paddr
);
290 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
291 * CBE cacheline so that the line will be written back to home agent.
292 * Otherwise the line may be silently dropped. This has no impact
293 * except on performance.
295 static void gru_flush_cache_cbe(struct gru_control_block_extended
*cbe
)
298 cbe
->cbrexecstatus
= 0; /* make CL dirty */
299 gru_flush_cache(cbe
);
304 * Preload the TLB with entries that may be required. Currently, preloading
305 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
306 * the end of the bcopy tranfer, whichever is smaller.
308 static void gru_preload_tlb(struct gru_state
*gru
,
309 struct gru_thread_state
*gts
, int atomic
,
310 unsigned long fault_vaddr
, int asid
, int write
,
311 unsigned char tlb_preload_count
,
312 struct gru_tlb_fault_handle
*tfh
,
313 struct gru_control_block_extended
*cbe
)
315 unsigned long vaddr
= 0, gpa
;
318 if (cbe
->opccpy
!= OP_BCOPY
)
321 if (fault_vaddr
== cbe
->cbe_baddr0
)
322 vaddr
= fault_vaddr
+ GRU_CACHE_LINE_BYTES
* cbe
->cbe_src_cl
- 1;
323 else if (fault_vaddr
== cbe
->cbe_baddr1
)
324 vaddr
= fault_vaddr
+ (1 << cbe
->xtypecpy
) * cbe
->cbe_nelemcur
- 1;
326 fault_vaddr
&= PAGE_MASK
;
328 vaddr
= min(vaddr
, fault_vaddr
+ tlb_preload_count
* PAGE_SIZE
);
330 while (vaddr
> fault_vaddr
) {
331 ret
= gru_vtop(gts
, vaddr
, write
, atomic
, &gpa
, &pageshift
);
332 if (ret
|| tfh_write_only(tfh
, gpa
, GAA_RAM
, vaddr
, asid
, write
,
333 GRU_PAGESIZE(pageshift
)))
336 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
337 atomic
? "atomic" : "non-atomic", gru
->gs_gid
, gts
, tfh
,
338 vaddr
, asid
, write
, pageshift
, gpa
);
340 STAT(tlb_preload_page
);
345 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
347 * cb Address of user CBR. Null if not running in user context
349 * 0 = dropin, exception, or switch to UPM successful
350 * 1 = range invalidate active
354 static int gru_try_dropin(struct gru_state
*gru
,
355 struct gru_thread_state
*gts
,
356 struct gru_tlb_fault_handle
*tfh
,
357 struct gru_instruction_bits
*cbk
)
359 struct gru_control_block_extended
*cbe
= NULL
;
360 unsigned char tlb_preload_count
= gts
->ts_tlb_preload_count
;
361 int pageshift
= 0, asid
, write
, ret
, atomic
= !cbk
, indexway
;
362 unsigned long gpa
= 0, vaddr
= 0;
365 * NOTE: The GRU contains magic hardware that eliminates races between
366 * TLB invalidates and TLB dropins. If an invalidate occurs
367 * in the window between reading the TFH and the subsequent TLB dropin,
368 * the dropin is ignored. This eliminates the need for additional locks.
372 * Prefetch the CBE if doing TLB preloading
374 if (unlikely(tlb_preload_count
)) {
375 cbe
= gru_tfh_to_cbe(tfh
);
380 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
381 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
382 * is a transient state.
384 if (tfh
->status
!= TFHSTATUS_EXCEPTION
) {
385 gru_flush_cache(tfh
);
387 if (tfh
->status
!= TFHSTATUS_EXCEPTION
)
388 goto failnoexception
;
389 STAT(tfh_stale_on_fault
);
391 if (tfh
->state
== TFHSTATE_IDLE
)
393 if (tfh
->state
== TFHSTATE_MISS_FMM
&& cbk
)
396 write
= (tfh
->cause
& TFHCAUSE_TLB_MOD
) != 0;
397 vaddr
= tfh
->missvaddr
;
398 asid
= tfh
->missasid
;
399 indexway
= tfh
->indexway
;
403 rmb(); /* TFH must be cache resident before reading ms_range_active */
406 * TFH is cache resident - at least briefly. Fail the dropin
407 * if a range invalidate is active.
409 if (atomic_read(>s
->ts_gms
->ms_range_active
))
412 ret
= gru_vtop(gts
, vaddr
, write
, atomic
, &gpa
, &pageshift
);
413 if (ret
== VTOP_INVALID
)
415 if (ret
== VTOP_RETRY
)
418 if (!(gts
->ts_sizeavail
& GRU_SIZEAVAIL(pageshift
))) {
419 gts
->ts_sizeavail
|= GRU_SIZEAVAIL(pageshift
);
420 if (atomic
|| !gru_update_cch(gts
)) {
421 gts
->ts_force_cch_reload
= 1;
426 if (unlikely(cbe
) && pageshift
== PAGE_SHIFT
) {
427 gru_preload_tlb(gru
, gts
, atomic
, vaddr
, asid
, write
, tlb_preload_count
, tfh
, cbe
);
428 gru_flush_cache_cbe(cbe
);
431 gru_cb_set_istatus_active(cbk
);
432 gts
->ustats
.tlbdropin
++;
433 tfh_write_restart(tfh
, gpa
, GAA_RAM
, vaddr
, asid
, write
,
434 GRU_PAGESIZE(pageshift
));
436 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
437 " rw %d, ps %d, gpa 0x%lx\n",
438 atomic
? "atomic" : "non-atomic", gru
->gs_gid
, gts
, tfh
, vaddr
, asid
,
439 indexway
, write
, pageshift
, gpa
);
444 /* No asid (delayed unload). */
445 STAT(tlb_dropin_fail_no_asid
);
446 gru_dbg(grudev
, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
448 tfh_user_polling_mode(tfh
);
450 gru_flush_cache(tfh
);
451 gru_flush_cache_cbe(cbe
);
455 /* Atomic failure switch CBR to UPM */
456 tfh_user_polling_mode(tfh
);
457 gru_flush_cache_cbe(cbe
);
458 STAT(tlb_dropin_fail_upm
);
459 gru_dbg(grudev
, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
463 /* FMM state on UPM call */
464 gru_flush_cache(tfh
);
465 gru_flush_cache_cbe(cbe
);
466 STAT(tlb_dropin_fail_fmm
);
467 gru_dbg(grudev
, "FAILED fmm tfh: 0x%p, state %d\n", tfh
, tfh
->state
);
471 /* TFH status did not show exception pending */
472 gru_flush_cache(tfh
);
473 gru_flush_cache_cbe(cbe
);
475 gru_flush_cache(cbk
);
476 STAT(tlb_dropin_fail_no_exception
);
477 gru_dbg(grudev
, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
478 tfh
, tfh
->status
, tfh
->state
);
482 /* TFH state was idle - no miss pending */
483 gru_flush_cache(tfh
);
484 gru_flush_cache_cbe(cbe
);
486 gru_flush_cache(cbk
);
487 STAT(tlb_dropin_fail_idle
);
488 gru_dbg(grudev
, "FAILED idle tfh: 0x%p, state %d\n", tfh
, tfh
->state
);
492 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
494 gru_flush_cache_cbe(cbe
);
495 STAT(tlb_dropin_fail_invalid
);
496 gru_dbg(grudev
, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
500 /* Range invalidate active. Switch to UPM iff atomic */
502 tfh_user_polling_mode(tfh
);
504 gru_flush_cache(tfh
);
505 gru_flush_cache_cbe(cbe
);
506 STAT(tlb_dropin_fail_range_active
);
507 gru_dbg(grudev
, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
513 * Process an external interrupt from the GRU. This interrupt is
514 * caused by a TLB miss.
515 * Note that this is the interrupt handler that is registered with linux
516 * interrupt handlers.
518 static irqreturn_t
gru_intr(int chiplet
, int blade
)
520 struct gru_state
*gru
;
521 struct gru_tlb_fault_map imap
, dmap
;
522 struct gru_thread_state
*gts
;
523 struct gru_tlb_fault_handle
*tfh
= NULL
;
524 struct completion
*cmp
;
529 gru
= &gru_base
[blade
]->bs_grus
[chiplet
];
531 dev_err(grudev
, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
532 raw_smp_processor_id(), chiplet
);
535 get_clear_fault_map(gru
, &imap
, &dmap
);
537 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
538 smp_processor_id(), chiplet
, gru
->gs_gid
,
539 imap
.fault_bits
[0], imap
.fault_bits
[1],
540 dmap
.fault_bits
[0], dmap
.fault_bits
[1]);
542 for_each_cbr_in_tfm(cbrnum
, dmap
.fault_bits
) {
544 cmp
= gru
->gs_blade
->bs_async_wq
;
547 gru_dbg(grudev
, "gid %d, cbr_done %d, done %d\n",
548 gru
->gs_gid
, cbrnum
, cmp
? cmp
->done
: -1);
551 for_each_cbr_in_tfm(cbrnum
, imap
.fault_bits
) {
553 tfh
= get_tfh_by_index(gru
, cbrnum
);
554 prefetchw(tfh
); /* Helps on hdw, required for emulator */
557 * When hardware sets a bit in the faultmap, it implicitly
558 * locks the GRU context so that it cannot be unloaded.
559 * The gts cannot change until a TFH start/writestart command
562 ctxnum
= tfh
->ctxnum
;
563 gts
= gru
->gs_gts
[ctxnum
];
565 /* Spurious interrupts can cause this. Ignore. */
572 * This is running in interrupt context. Trylock the mmap_lock.
573 * If it fails, retry the fault in user context.
575 gts
->ustats
.fmm_tlbmiss
++;
576 if (!gts
->ts_force_cch_reload
&&
577 mmap_read_trylock(gts
->ts_mm
)) {
578 gru_try_dropin(gru
, gts
, tfh
, NULL
);
579 mmap_read_unlock(gts
->ts_mm
);
581 tfh_user_polling_mode(tfh
);
582 STAT(intr_mm_lock_failed
);
588 irqreturn_t
gru0_intr(int irq
, void *dev_id
)
590 return gru_intr(0, uv_numa_blade_id());
593 irqreturn_t
gru1_intr(int irq
, void *dev_id
)
595 return gru_intr(1, uv_numa_blade_id());
598 irqreturn_t
gru_intr_mblade(int irq
, void *dev_id
)
602 for_each_possible_blade(blade
) {
603 if (uv_blade_nr_possible_cpus(blade
))
612 static int gru_user_dropin(struct gru_thread_state
*gts
,
613 struct gru_tlb_fault_handle
*tfh
,
616 struct gru_mm_struct
*gms
= gts
->ts_gms
;
619 gts
->ustats
.upm_tlbmiss
++;
621 wait_event(gms
->ms_wait_queue
,
622 atomic_read(&gms
->ms_range_active
) == 0);
623 prefetchw(tfh
); /* Helps on hdw, required for emulator */
624 ret
= gru_try_dropin(gts
->ts_gru
, gts
, tfh
, cb
);
627 STAT(call_os_wait_queue
);
632 * This interface is called as a result of a user detecting a "call OS" bit
633 * in a user CB. Normally means that a TLB fault has occurred.
634 * cb - user virtual address of the CB
636 int gru_handle_user_call_os(unsigned long cb
)
638 struct gru_tlb_fault_handle
*tfh
;
639 struct gru_thread_state
*gts
;
641 int ucbnum
, cbrnum
, ret
= -EINVAL
;
645 /* sanity check the cb pointer */
646 ucbnum
= get_cb_number((void *)cb
);
647 if ((cb
& (GRU_HANDLE_STRIDE
- 1)) || ucbnum
>= GRU_NUM_CB
)
650 gts
= gru_find_lock_gts(cb
);
653 gru_dbg(grudev
, "address 0x%lx, gid %d, gts 0x%p\n", cb
, gts
->ts_gru
? gts
->ts_gru
->gs_gid
: -1, gts
);
655 if (ucbnum
>= gts
->ts_cbr_au_count
* GRU_CBR_AU_SIZE
)
658 gru_check_context_placement(gts
);
661 * CCH may contain stale data if ts_force_cch_reload is set.
663 if (gts
->ts_gru
&& gts
->ts_force_cch_reload
) {
664 gts
->ts_force_cch_reload
= 0;
669 cbrnum
= thread_cbr_number(gts
, ucbnum
);
671 tfh
= get_tfh_by_index(gts
->ts_gru
, cbrnum
);
672 cbk
= get_gseg_base_address_cb(gts
->ts_gru
->gs_gru_base_vaddr
,
673 gts
->ts_ctxnum
, ucbnum
);
674 ret
= gru_user_dropin(gts
, tfh
, cbk
);
682 * Fetch the exception detail information for a CB that terminated with
685 int gru_get_exception_detail(unsigned long arg
)
687 struct control_block_extended_exc_detail excdet
;
688 struct gru_control_block_extended
*cbe
;
689 struct gru_thread_state
*gts
;
690 int ucbnum
, cbrnum
, ret
;
692 STAT(user_exception
);
693 if (copy_from_user(&excdet
, (void __user
*)arg
, sizeof(excdet
)))
696 gts
= gru_find_lock_gts(excdet
.cb
);
700 gru_dbg(grudev
, "address 0x%lx, gid %d, gts 0x%p\n", excdet
.cb
, gts
->ts_gru
? gts
->ts_gru
->gs_gid
: -1, gts
);
701 ucbnum
= get_cb_number((void *)excdet
.cb
);
702 if (ucbnum
>= gts
->ts_cbr_au_count
* GRU_CBR_AU_SIZE
) {
704 } else if (gts
->ts_gru
) {
705 cbrnum
= thread_cbr_number(gts
, ucbnum
);
706 cbe
= get_cbe_by_index(gts
->ts_gru
, cbrnum
);
707 gru_flush_cache(cbe
); /* CBE not coherent */
708 sync_core(); /* make sure we are have current data */
709 excdet
.opc
= cbe
->opccpy
;
710 excdet
.exopc
= cbe
->exopccpy
;
711 excdet
.ecause
= cbe
->ecause
;
712 excdet
.exceptdet0
= cbe
->idef1upd
;
713 excdet
.exceptdet1
= cbe
->idef3upd
;
714 excdet
.cbrstate
= cbe
->cbrstate
;
715 excdet
.cbrexecstatus
= cbe
->cbrexecstatus
;
716 gru_flush_cache_cbe(cbe
);
724 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
725 "exdet0 0x%lx, exdet1 0x%x\n",
726 excdet
.cb
, excdet
.opc
, excdet
.exopc
, excdet
.cbrstate
, excdet
.cbrexecstatus
,
727 excdet
.ecause
, excdet
.exceptdet0
, excdet
.exceptdet1
);
728 if (!ret
&& copy_to_user((void __user
*)arg
, &excdet
, sizeof(excdet
)))
734 * User request to unload a context. Content is saved for possible reload.
736 static int gru_unload_all_contexts(void)
738 struct gru_thread_state
*gts
;
739 struct gru_state
*gru
;
742 if (!capable(CAP_SYS_ADMIN
))
745 gru
= GID_TO_GRU(gid
);
746 spin_lock(&gru
->gs_lock
);
747 for (ctxnum
= 0; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
748 gts
= gru
->gs_gts
[ctxnum
];
749 if (gts
&& mutex_trylock(>s
->ts_ctxlock
)) {
750 spin_unlock(&gru
->gs_lock
);
751 gru_unload_context(gts
, 1);
752 mutex_unlock(>s
->ts_ctxlock
);
753 spin_lock(&gru
->gs_lock
);
756 spin_unlock(&gru
->gs_lock
);
761 int gru_user_unload_context(unsigned long arg
)
763 struct gru_thread_state
*gts
;
764 struct gru_unload_context_req req
;
766 STAT(user_unload_context
);
767 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
770 gru_dbg(grudev
, "gseg 0x%lx\n", req
.gseg
);
773 return gru_unload_all_contexts();
775 gts
= gru_find_lock_gts(req
.gseg
);
780 gru_unload_context(gts
, 1);
787 * User request to flush a range of virtual addresses from the GRU TLB
788 * (Mainly for testing).
790 int gru_user_flush_tlb(unsigned long arg
)
792 struct gru_thread_state
*gts
;
793 struct gru_flush_tlb_req req
;
794 struct gru_mm_struct
*gms
;
796 STAT(user_flush_tlb
);
797 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
800 gru_dbg(grudev
, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req
.gseg
,
803 gts
= gru_find_lock_gts(req
.gseg
);
809 gru_flush_tlb_range(gms
, req
.vaddr
, req
.len
);
815 * Fetch GSEG statisticss
817 long gru_get_gseg_statistics(unsigned long arg
)
819 struct gru_thread_state
*gts
;
820 struct gru_get_gseg_statistics_req req
;
822 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
826 * The library creates arrays of contexts for threaded programs.
827 * If no gts exists in the array, the context has never been used & all
828 * statistics are implicitly 0.
830 gts
= gru_find_lock_gts(req
.gseg
);
832 memcpy(&req
.stats
, >s
->ustats
, sizeof(gts
->ustats
));
835 memset(&req
.stats
, 0, sizeof(gts
->ustats
));
838 if (copy_to_user((void __user
*)arg
, &req
, sizeof(req
)))
845 * Register the current task as the user of the GSEG slice.
846 * Needed for TLB fault interrupt targeting.
848 int gru_set_context_option(unsigned long arg
)
850 struct gru_thread_state
*gts
;
851 struct gru_set_context_option_req req
;
854 STAT(set_context_option
);
855 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
857 gru_dbg(grudev
, "op %d, gseg 0x%lx, value1 0x%lx\n", req
.op
, req
.gseg
, req
.val1
);
859 gts
= gru_find_lock_gts(req
.gseg
);
861 gts
= gru_alloc_locked_gts(req
.gseg
);
867 case sco_blade_chiplet
:
868 /* Select blade/chiplet for GRU context */
869 if (req
.val0
< -1 || req
.val0
>= GRU_CHIPLETS_PER_HUB
||
870 req
.val1
< -1 || req
.val1
>= GRU_MAX_BLADES
||
871 (req
.val1
>= 0 && !gru_base
[req
.val1
])) {
874 gts
->ts_user_blade_id
= req
.val1
;
875 gts
->ts_user_chiplet_id
= req
.val0
;
876 gru_check_context_placement(gts
);
880 /* Register the current task as the GSEG owner */
881 gts
->ts_tgid_owner
= current
->tgid
;
883 case sco_cch_req_slice
:
884 /* Set the CCH slice option */
885 gts
->ts_cch_req_slice
= req
.val1
& 3;