sgi-gru: misc GRU cleanup
authorJack Steiner <steiner@sgi.com>
Thu, 2 Apr 2009 23:59:04 +0000 (16:59 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Apr 2009 02:05:05 +0000 (19:05 -0700)
Misc trivial GRU drivers fixes:
- fix long lines
- eliminate extra whitespace
- eliminate compiler warning
- better validation of invalidate user parameters
- bug fix for GRU TLB flush (not the cpu TLB flush)

These changes are all internal to the SGI GRU driver and have no effect
on the base kernel.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/misc/sgi-gru/gru_instructions.h
drivers/misc/sgi-gru/grufault.c
drivers/misc/sgi-gru/grufile.c
drivers/misc/sgi-gru/gruhandles.h
drivers/misc/sgi-gru/grukservices.c
drivers/misc/sgi-gru/grumain.c
drivers/misc/sgi-gru/grutables.h
drivers/misc/sgi-gru/grutlbpurge.c

index 48762e7b98be65a5b3758489657a5d1675cf5de0..3fde33c1e8f3b64e10192194974ec0016ea95f00 100644 (file)
 #ifndef __GRU_INSTRUCTIONS_H__
 #define __GRU_INSTRUCTIONS_H__
 
-#define gru_flush_cache_hook(p)
-#define gru_emulator_wait_hook(p, w)
+extern int gru_check_status_proc(void *cb);
+extern int gru_wait_proc(void *cb);
+extern void gru_wait_abort_proc(void *cb);
+
+
 
 /*
  * Architecture dependent functions
 #if defined(CONFIG_IA64)
 #include <linux/compiler.h>
 #include <asm/intrinsics.h>
-#define __flush_cache(p)               ia64_fc(p)
+#define __flush_cache(p)               ia64_fc((unsigned long)p)
 /* Use volatile on IA64 to ensure ordering via st4.rel */
-#define gru_ordered_store_int(p,v)                                     \
+#define gru_ordered_store_int(p, v)                                    \
                do {                                                    \
                        barrier();                                      \
                        *((volatile int *)(p)) = v; /* force st.rel */  \
                } while (0)
 #elif defined(CONFIG_X86_64)
 #define __flush_cache(p)               clflush(p)
-#define gru_ordered_store_int(p,v)                                     \
+#define gru_ordered_store_int(p, v)                                    \
                do {                                                    \
                        barrier();                                      \
                        *(int *)p = v;                                  \
@@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb,
 
 #define GRU_EXC_STR_SIZE               256
 
-extern int gru_check_status_proc(void *cb);
-extern int gru_wait_proc(void *cb);
-extern void gru_wait_abort_proc(void *cb);
 
 /*
  * Control block definition for checking status
  */
 struct gru_control_block_status {
        unsigned int    icmd            :1;
-       unsigned int    unused1         :31;
+       unsigned int    ima             :3;
+       unsigned int    reserved0       :4;
+       unsigned int    unused1         :24;
        unsigned int    unused2         :24;
        unsigned int    istatus         :2;
        unsigned int    isubstatus      :4;
-       unsigned int    inused3         :2;
+       unsigned int    unused3         :2;
 };
 
 /* Get CB status */
index 3ee698ad85997cf2f5c32b986689d7c07a0ed607..f85d27306789b16507e137f904e1420f3afe455a 100644 (file)
@@ -368,6 +368,7 @@ failupm:
 
 failfmm:
        /* FMM state on UPM call */
+       gru_flush_cache(tfh);
        STAT(tlb_dropin_fail_fmm);
        gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
        return 0;
@@ -497,10 +498,8 @@ int gru_handle_user_call_os(unsigned long cb)
        if (!gts)
                return -EINVAL;
 
-       if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
-               ret = -EINVAL;
+       if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
                goto exit;
-       }
 
        /*
         * If force_unload is set, the UPM TLB fault is phony. The task
@@ -508,6 +507,10 @@ int gru_handle_user_call_os(unsigned long cb)
         * unload the context. The task will page fault and assign a new
         * context.
         */
+       if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
+                               gts->ts_blade != uv_numa_blade_id())
+               gts->ts_force_unload = 1;
+
        ret = -EAGAIN;
        cbrnum = thread_cbr_number(gts, ucbnum);
        if (gts->ts_force_unload) {
@@ -541,11 +544,13 @@ int gru_get_exception_detail(unsigned long arg)
        if (!gts)
                return -EINVAL;
 
-       if (gts->ts_gru) {
-               ucbnum = get_cb_number((void *)excdet.cb);
+       ucbnum = get_cb_number((void *)excdet.cb);
+       if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
+               ret = -EINVAL;
+       } else if (gts->ts_gru) {
                cbrnum = thread_cbr_number(gts, ucbnum);
                cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
-               prefetchw(cbe);         /* Harmless on hardware, required for emulator */
+               prefetchw(cbe);/* Harmless on hardware, required for emulator */
                excdet.opc = cbe->opccpy;
                excdet.exopc = cbe->exopccpy;
                excdet.ecause = cbe->ecause;
@@ -609,7 +614,7 @@ int gru_user_flush_tlb(unsigned long arg)
        if (!gts)
                return -EINVAL;
 
-       gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len);
+       gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
        gru_unlock_gts(gts);
 
        return 0;
index c67e4e8bd62c5fd83236ff97fb0961a82db189d2..15292e5f74a14315b9cedc4da28de32e071d86e7 100644 (file)
@@ -45,7 +45,8 @@
 #include <asm/uv/uv_mmrs.h>
 
 struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
-unsigned long gru_start_paddr, gru_end_paddr __read_mostly;
+unsigned long gru_start_paddr __read_mostly;
+unsigned long gru_end_paddr __read_mostly;
 struct gru_stats_s gru_stats;
 
 /* Guaranteed user available resources on each node */
@@ -101,7 +102,7 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
                return -EPERM;
 
        if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
-                               vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
+                               vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
                return -EINVAL;
 
        vma->vm_flags |=
@@ -295,7 +296,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
        for_each_online_node(nid) {
                bid = uv_node_to_blade_id(nid);
                pnode = uv_node_to_pnode(nid);
-               if (gru_base[bid])
+               if (bid < 0 || gru_base[bid])
                        continue;
                page = alloc_pages_node(nid, GFP_KERNEL, order);
                if (!page)
@@ -308,11 +309,11 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
                dsrbytes = 0;
                cbrs = 0;
                for (gru = gru_base[bid]->bs_grus, chip = 0;
-                               chip < GRU_CHIPLETS_PER_BLADE;
+                               chip < GRU_CHIPLETS_PER_BLADE;
                                chip++, gru++) {
                        paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
                        vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
-                       gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip);
+                       gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
                        n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
                        cbrs = max(cbrs, n);
                        n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -370,26 +371,26 @@ static int __init gru_init(void)
        void *gru_start_vaddr;
 
        if (!is_uv_system())
-               return 0;
+               return -ENODEV;
 
 #if defined CONFIG_IA64
        gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
 #else
        gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
                                0x7fffffffffffUL;
-
 #endif
        gru_start_vaddr = __va(gru_start_paddr);
-       gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE;
+       gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
        printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
               gru_start_paddr, gru_end_paddr);
        irq = get_base_irq();
        for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
                ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
-               /* TODO: fix irq handling on x86. For now ignore failures because
+               /* TODO: fix irq handling on x86. For now ignore failure because
                 * interrupts are not required & not yet fully supported */
                if (ret) {
-                       printk("!!!WARNING: GRU ignoring request failure!!!\n");
+                       printk(KERN_WARNING
+                              "!!!WARNING: GRU ignoring request failure!!!\n");
                        ret = 0;
                }
                if (ret) {
@@ -469,7 +470,11 @@ struct vm_operations_struct gru_vm_ops = {
        .fault          = gru_fault,
 };
 
+#ifndef MODULE
 fs_initcall(gru_init);
+#else
+module_init(gru_init);
+#endif
 module_exit(gru_exit);
 
 module_param(gru_options, ulong, 0644);
index b63018d60fe1a56b22e0cc2c02cc43b650dba7f6..fb72a52a34a8d21515b42df9082c4bb30989d029 100644 (file)
@@ -489,7 +489,7 @@ enum gru_cbr_state {
  *      64m                    26      8
  *     ...
  */
-#define GRU_PAGESIZE(sh)       ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6)
+#define GRU_PAGESIZE(sh)       ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6)
 #define GRU_SIZEAVAIL(sh)      (1UL << GRU_PAGESIZE(sh))
 
 /* minimum TLB purge count to ensure a full purge */
index 880c55dfb66266dfd53cae3dea2492b986ff5744..3e36b7b6e1c80288d68090d4875e32adf73378d3 100644 (file)
@@ -122,7 +122,7 @@ int gru_get_cb_exception_detail(void *cb,
        struct gru_control_block_extended *cbe;
 
        cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
-       prefetchw(cbe);         /* Harmless on hardware, required for emulator */
+       prefetchw(cbe); /* Harmless on hardware, required for emulator */
        excdet->opc = cbe->opccpy;
        excdet->exopc = cbe->exopccpy;
        excdet->ecause = cbe->ecause;
@@ -437,7 +437,7 @@ static int send_message_failure(void *cb,
                break;
        case CBSS_PUT_NACKED:
                STAT(mesq_send_put_nacked);
-               m =mq + (gru_get_amo_value_head(cb) << 6);
+               m = mq + (gru_get_amo_value_head(cb) << 6);
                gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
                if (gru_wait(cb) == CBS_IDLE)
                        ret = MQE_OK;
index 3d2fc216bae5b561dea357cd86d1661803a0377d..1ce32bcd0259f1a900d53c2435a5c2a9bdd71fd9 100644 (file)
@@ -432,8 +432,8 @@ static inline long gru_copy_handle(void *d, void *s)
        return GRU_HANDLE_BYTES;
 }
 
-static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap,
-                               unsigned long length)
+static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
+                               unsigned long cbrmap, unsigned long length)
 {
        int i, scr;
 
@@ -773,8 +773,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
 again:
-       preempt_disable();
        mutex_lock(&gts->ts_ctxlock);
+       preempt_disable();
        if (gts->ts_gru) {
                if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
                        STAT(migrated_nopfn_unload);
index a78f70deeb59d47a9554b99ed0ec5803c6ea26ee..db3fe08bf79e4ea33b1e8adeed2f059183563e90 100644 (file)
@@ -278,13 +278,12 @@ struct gru_stats_s {
 /* Generate a GRU asid value from a GRU base asid & a virtual address. */
 #if defined CONFIG_IA64
 #define VADDR_HI_BIT           64
-#define GRUREGION(addr)                ((addr) >> (VADDR_HI_BIT - 3) & 3)
 #elif defined CONFIG_X86_64
 #define VADDR_HI_BIT           48
-#define GRUREGION(addr)                (0)             /* ZZZ could do better */
 #else
 #error "Unsupported architecture"
 #endif
+#define GRUREGION(addr)                ((addr) >> (VADDR_HI_BIT - 3) & 3)
 #define GRUASID(asid, addr)    ((asid) + GRUREGION(addr))
 
 /*------------------------------------------------------------------------------
@@ -297,12 +296,12 @@ struct gru_state;
  * This structure is pointed to from the mmstruct via the notifier pointer.
  * There is one of these per address space.
  */
-struct gru_mm_tracker {
-       unsigned int            mt_asid_gen;    /* ASID wrap count */
-       int                     mt_asid;        /* current base ASID for gru */
-       unsigned short          mt_ctxbitmap;   /* bitmap of contexts using
+struct gru_mm_tracker {                                /* pack to reduce size */
+       unsigned int            mt_asid_gen:24; /* ASID wrap count */
+       unsigned int            mt_asid:24;     /* current base ASID for gru */
+       unsigned short          mt_ctxbitmap:16;/* bitmap of contexts using
                                                   asid */
-};
+} __attribute__ ((packed));
 
 struct gru_mm_struct {
        struct mmu_notifier     ms_notifier;
@@ -359,6 +358,8 @@ struct gru_thread_state {
                                                   required for contest */
        unsigned char           ts_cbr_au_count;/* Number of CBR resources
                                                   required for contest */
+       char                    ts_blade;       /* If >= 0, migrate context if
+                                                  ref from diferent blade */
        char                    ts_force_unload;/* force context to be unloaded
                                                   after migration */
        char                    ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
index c84496a7769176843d03b2ea680a620cf888c5be..4e438e4dd2a816880498505ad061ebacd9d1385c 100644 (file)
@@ -187,7 +187,7 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
        "  FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
                                gid, asid, num, asids->mt_ctxbitmap);
                        tgh = get_lock_tgh_handle(gru);
-                       tgh_invalidate(tgh, start, 0, asid, grupagesize, 0,
+                       tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
                                       num - 1, asids->mt_ctxbitmap);
                        get_unlock_tgh_handle(tgh);
                } else {
@@ -212,9 +212,8 @@ void gru_flush_all_tlb(struct gru_state *gru)
 
        gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid);
        tgh = get_lock_tgh_handle(gru);
-       tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0);
+       tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
        get_unlock_tgh_handle(tgh);
-       preempt_enable();
 }
 
 /*