]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
gru: preload tlb for bcopy instructions
authorJack Steiner <steiner@sgi.com>
Wed, 16 Dec 2009 00:48:13 +0000 (16:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2009 15:20:16 +0000 (07:20 -0800)
Add anticipatory TLB dropins for GRU TLB misses that occur on BCOPY
instructions that copy large amounts of data.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/misc/sgi-gru/grufault.c
drivers/misc/sgi-gru/grufile.c
drivers/misc/sgi-gru/gruhandles.c
drivers/misc/sgi-gru/gruhandles.h
drivers/misc/sgi-gru/grukservices.c
drivers/misc/sgi-gru/grulib.h
drivers/misc/sgi-gru/grumain.c
drivers/misc/sgi-gru/gruprocfs.c
drivers/misc/sgi-gru/grutables.h

index 7d757e9c42f05a4019ffaee9fa5bfe2a30d74ce6..a1b3a1d66af5969a06b05430a4571ad75225fceb 100644 (file)
@@ -289,6 +289,61 @@ upm:
 }
 
 
+/*
+ * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
+ * CBE cacheline so that the line will be written back to home agent.
+ * Otherwise the line may be silently dropped. This has no impact
+ * except on performance.
+ */
+static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
+{
+       if (unlikely(cbe)) {
+               cbe->cbrexecstatus = 0;         /* make CL dirty */
+               gru_flush_cache(cbe);
+       }
+}
+
+/*
+ * Preload the TLB with entries that may be required. Currently, preloading
+ * is implemented only for BCOPY. Preload  <tlb_preload_count> pages OR to
+ * the end of the bcopy tranfer, whichever is smaller.
+ */
+static void gru_preload_tlb(struct gru_state *gru,
+                       struct gru_thread_state *gts, int atomic,
+                       unsigned long fault_vaddr, int asid, int write,
+                       unsigned char tlb_preload_count,
+                       struct gru_tlb_fault_handle *tfh,
+                       struct gru_control_block_extended *cbe)
+{
+       unsigned long vaddr = 0, gpa;
+       int ret, pageshift;
+
+       if (cbe->opccpy != OP_BCOPY)
+               return;
+
+       if (fault_vaddr == cbe->cbe_baddr0)
+               vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
+       else if (fault_vaddr == cbe->cbe_baddr1)
+               vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
+
+       fault_vaddr &= PAGE_MASK;
+       vaddr &= PAGE_MASK;
+       vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
+
+       while (vaddr > fault_vaddr) {
+               ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+               if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
+                                         GRU_PAGESIZE(pageshift)))
+                       return;
+               gru_dbg(grudev,
+                       "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
+                       atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
+                       vaddr, asid, write, pageshift, gpa);
+               vaddr -= PAGE_SIZE;
+               STAT(tlb_preload_page);
+       }
+}
+
 /*
  * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
  *     Input:
@@ -303,6 +358,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
                          struct gru_tlb_fault_handle *tfh,
                          struct gru_instruction_bits *cbk)
 {
+       struct gru_control_block_extended *cbe = NULL;
+       unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
        int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
        unsigned long gpa = 0, vaddr = 0;
 
@@ -313,6 +370,14 @@ static int gru_try_dropin(struct gru_thread_state *gts,
         * the dropin is ignored. This eliminates the need for additional locks.
         */
 
+       /*
+        * Prefetch the CBE if doing TLB preloading
+        */
+       if (unlikely(tlb_preload_count)) {
+               cbe = gru_tfh_to_cbe(tfh);
+               prefetchw(cbe);
+       }
+
        /*
         * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
         * Might be a hardware race OR a stupid user. Ignore FMM because FMM
@@ -359,6 +424,12 @@ static int gru_try_dropin(struct gru_thread_state *gts,
                        goto failupm;
                }
        }
+
+       if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
+               gru_preload_tlb(gts->ts_gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
+               gru_flush_cache_cbe(cbe);
+       }
+
        gru_cb_set_istatus_active(cbk);
        tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
                          GRU_PAGESIZE(pageshift));
@@ -378,11 +449,13 @@ failnoasid:
                tfh_user_polling_mode(tfh);
        else
                gru_flush_cache(tfh);
+       gru_flush_cache_cbe(cbe);
        return -EAGAIN;
 
 failupm:
        /* Atomic failure switch CBR to UPM */
        tfh_user_polling_mode(tfh);
+       gru_flush_cache_cbe(cbe);
        STAT(tlb_dropin_fail_upm);
        gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
        return 1;
@@ -390,6 +463,7 @@ failupm:
 failfmm:
        /* FMM state on UPM call */
        gru_flush_cache(tfh);
+       gru_flush_cache_cbe(cbe);
        STAT(tlb_dropin_fail_fmm);
        gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
        return 0;
@@ -397,6 +471,7 @@ failfmm:
 failnoexception:
        /* TFH status did not show exception pending */
        gru_flush_cache(tfh);
+       gru_flush_cache_cbe(cbe);
        if (cbk)
                gru_flush_cache(cbk);
        STAT(tlb_dropin_fail_no_exception);
@@ -407,6 +482,7 @@ failnoexception:
 failidle:
        /* TFH state was idle  - no miss pending */
        gru_flush_cache(tfh);
+       gru_flush_cache_cbe(cbe);
        if (cbk)
                gru_flush_cache(cbk);
        STAT(tlb_dropin_fail_idle);
@@ -416,6 +492,7 @@ failidle:
 failinval:
        /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
        tfh_exception(tfh);
+       gru_flush_cache_cbe(cbe);
        STAT(tlb_dropin_fail_invalid);
        gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
        return -EFAULT;
@@ -426,6 +503,7 @@ failactive:
                tfh_user_polling_mode(tfh);
        else
                gru_flush_cache(tfh);
+       gru_flush_cache_cbe(cbe);
        STAT(tlb_dropin_fail_range_active);
        gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
                tfh, vaddr);
@@ -627,7 +705,7 @@ int gru_get_exception_detail(unsigned long arg)
                excdet.exceptdet1 = cbe->idef3upd;
                excdet.cbrstate = cbe->cbrstate;
                excdet.cbrexecstatus = cbe->cbrexecstatus;
-               gru_flush_cache(cbe);
+               gru_flush_cache_cbe(cbe);
                ret = 0;
        } else {
                ret = -EAGAIN;
@@ -770,9 +848,12 @@ int gru_set_context_option(unsigned long arg)
                return -EFAULT;
        gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
 
-       gts = gru_alloc_locked_gts(req.gseg);
-       if (IS_ERR(gts))
-               return PTR_ERR(gts);
+       gts = gru_find_lock_gts(req.gseg);
+       if (!gts) {
+               gts = gru_alloc_locked_gts(req.gseg);
+               if (IS_ERR(gts))
+                       return PTR_ERR(gts);
+       }
 
        switch (req.op) {
        case sco_blade_chiplet:
index 9d41208a6c92a4f8a09e465107e485fd74b236a5..cb3b4d228475905fbdc25bc00605188e2afc53c4 100644 (file)
@@ -152,6 +152,7 @@ static int gru_create_new_context(unsigned long arg)
                vdata->vd_dsr_au_count =
                    GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
                vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
+               vdata->vd_tlb_preload_count = req.tlb_preload_count;
                ret = 0;
        }
        up_write(&current->mm->mmap_sem);
index 66d67d9bc9b6e58861a6e9b9965f16a4335a8c37..2f30badc6ffd9251cd2e59835e3061b2816cd09f 100644 (file)
@@ -165,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh,
        return wait_instruction_complete(tgh, tghop_invalidate);
 }
 
-void tfh_write_only(struct gru_tlb_fault_handle *tfh,
-                                 unsigned long pfn, unsigned long vaddr,
-                                 int asid, int dirty, int pagesize)
+int tfh_write_only(struct gru_tlb_fault_handle *tfh,
+                                 unsigned long paddr, int gaa,
+                                 unsigned long vaddr, int asid, int dirty,
+                                 int pagesize)
 {
        tfh->fillasid = asid;
        tfh->fillvaddr = vaddr;
-       tfh->pfn = pfn;
+       tfh->pfn = paddr >> GRU_PADDR_SHIFT;
+       tfh->gaa = gaa;
        tfh->dirty = dirty;
        tfh->pagesize = pagesize;
        tfh->opc = TFHOP_WRITE_ONLY;
        start_instruction(tfh);
+       return wait_instruction_complete(tfh, tfhop_write_only);
 }
 
 void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
index 47b762f89e0db587d7afaa4490928ece212035f7..ea584ebf65b18c4c9ce2cd54b25b2a9c5bda8a91 100644 (file)
@@ -164,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
        return vaddr + GRU_SIZE * (2 * pnode  + chiplet);
 }
 
+static inline struct gru_control_block_extended *gru_tfh_to_cbe(
+                                       struct gru_tlb_fault_handle *tfh)
+{
+       unsigned long cbe;
+
+       cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
+       return (struct gru_control_block_extended*)cbe;
+}
+
+
 
 
 /*
@@ -446,6 +456,12 @@ struct gru_control_block_extended {
        unsigned int cbrexecstatus:8;
 };
 
+/* CBE fields for active BCOPY instructions */
+#define cbe_baddr0     idef1upd
+#define cbe_baddr1     idef3upd
+#define cbe_src_cl     idef6cpy
+#define cbe_nelemcur   idef5upd
+
 enum gru_cbr_state {
        CBRSTATE_INACTIVE,
        CBRSTATE_IDLE,
@@ -493,8 +509,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
 int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
        unsigned long vaddrmask, int asid, int pagesize, int global, int n,
        unsigned short ctxbitmap);
-void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn,
-       unsigned long vaddr, int asid, int dirty, int pagesize);
+int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
+       int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
 void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
        int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
 void tfh_restart(struct gru_tlb_fault_handle *tfh);
index 4da6f56833d125d209ff4637d17b3d6c43e8f9f5..d9ff0289a1c34e5beeef3532fa1c91593a498d04 100644 (file)
@@ -161,7 +161,7 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
        down_write(&bs->bs_kgts_sema);
 
        if (!bs->bs_kgts) {
-               bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
+               bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
                bs->bs_kgts->ts_user_blade_id = blade_id;
        }
        kgts = bs->bs_kgts;
index e033b36df7e0b876156cdd84edb90cc039e3e2f4..c6928af7393ad4359ee391c57191aa18535d429c 100644 (file)
@@ -86,6 +86,7 @@ struct gru_create_context_req {
        unsigned int            control_blocks;
        unsigned int            maximum_thread_count;
        unsigned int            options;
+       unsigned char           tlb_preload_count;
 };
 
 /*
index ebabbdcbb97f9f958b7e56ab4d4d05e1a7e0aab4..ade0925eab0e4b5e3b8c7b41e97e34c78adfe525 100644 (file)
@@ -316,7 +316,8 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
  * Allocate a thread state structure.
  */
 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
-               int cbr_au_count, int dsr_au_count, int options, int tsid)
+               int cbr_au_count, int dsr_au_count,
+               unsigned char tlb_preload_count, int options, int tsid)
 {
        struct gru_thread_state *gts;
        struct gru_mm_struct *gms;
@@ -334,6 +335,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
        mutex_init(&gts->ts_ctxlock);
        gts->ts_cbr_au_count = cbr_au_count;
        gts->ts_dsr_au_count = dsr_au_count;
+       gts->ts_tlb_preload_count = tlb_preload_count;
        gts->ts_user_options = options;
        gts->ts_user_blade_id = -1;
        gts->ts_user_chiplet_id = -1;
@@ -403,7 +405,9 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
        struct gru_vma_data *vdata = vma->vm_private_data;
        struct gru_thread_state *gts, *ngts;
 
-       gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
+       gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
+                           vdata->vd_dsr_au_count,
+                           vdata->vd_tlb_preload_count,
                            vdata->vd_user_options, tsid);
        if (IS_ERR(gts))
                return gts;
index 0a57ab29cd30432a2801bba2ea1183e4d9498486..54a5a1c35ad1ed4c552058d2bd5f3016a703c766 100644 (file)
@@ -76,6 +76,7 @@ static int statistics_show(struct seq_file *s, void *p)
        printstat(s, check_context_retarget_intr);
        printstat(s, check_context_unload);
        printstat(s, tlb_dropin);
+       printstat(s, tlb_preload_page);
        printstat(s, tlb_dropin_fail_no_asid);
        printstat(s, tlb_dropin_fail_upm);
        printstat(s, tlb_dropin_fail_invalid);
@@ -127,7 +128,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
        int op;
        unsigned long total, count, max;
        static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
-               "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
+               "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
+               "tfh_write_restart", "tgh_invalidate"};
 
        seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
        for (op = 0; op < mcsop_last; op++) {
index 76fe2987fc9f6268b4a593f03d1c75a883f213ee..adaf691d59f5f3fee90181bbea2866847344732a 100644 (file)
@@ -202,6 +202,7 @@ struct gru_stats_s {
        atomic_long_t check_context_retarget_intr;
        atomic_long_t check_context_unload;
        atomic_long_t tlb_dropin;
+       atomic_long_t tlb_preload_page;
        atomic_long_t tlb_dropin_fail_no_asid;
        atomic_long_t tlb_dropin_fail_upm;
        atomic_long_t tlb_dropin_fail_invalid;
@@ -245,7 +246,8 @@ struct gru_stats_s {
 };
 
 enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
-       cchop_deallocate, tghop_invalidate, mcsop_last};
+       cchop_deallocate, tfhop_write_only, tfhop_write_restart,
+       tghop_invalidate, mcsop_last};
 
 struct mcs_op_statistic {
        atomic_long_t   count;
@@ -335,6 +337,7 @@ struct gru_vma_data {
        long                    vd_user_options;/* misc user option flags */
        int                     vd_cbr_au_count;
        int                     vd_dsr_au_count;
+       unsigned char           vd_tlb_preload_count;
 };
 
 /*
@@ -350,6 +353,7 @@ struct gru_thread_state {
        struct gru_state        *ts_gru;        /* GRU where the context is
                                                   loaded */
        struct gru_mm_struct    *ts_gms;        /* asid & ioproc struct */
+       unsigned char           ts_tlb_preload_count; /* TLB preload pages */
        unsigned long           ts_cbr_map;     /* map of allocated CBRs */
        unsigned long           ts_dsr_map;     /* map of allocated DATA
                                                   resources */
@@ -661,7 +665,8 @@ extern int gru_proc_init(void);
 extern void gru_proc_exit(void);
 
 extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
-               int cbr_au_count, int dsr_au_count, int options, int tsid);
+               int cbr_au_count, int dsr_au_count,
+               unsigned char tlb_preload_count, int options, int tsid);
 extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
                int cbr_au_count, char *cbmap);
 extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,