x86: uv: update XPC to handle updated BIOS interface
authorRobin Holt <holt@sgi.com>
Wed, 16 Dec 2009 00:47:56 +0000 (16:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2009 15:20:14 +0000 (07:20 -0800)
The UV BIOS has moved the location of some of their pointers to the
"partition reserved page" from memory into a uv hub MMR.  The GRU does not
support bcopy operations from MMR space so we need to special case the MMR
addresses using VLOAD operations.

Additionally, the BIOS call for registering a message queue watchlist has
removed the 'blade' value and eliminated the structure that was being
passed in.  This is also reflected in this patch.

Signed-off-by: Robin Holt <holt@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/uv/bios.h
arch/x86/kernel/bios_uv.c
drivers/misc/sgi-xp/xp_uv.c
drivers/misc/sgi-xp/xpc_partition.c
drivers/misc/sgi-xp/xpc_uv.c

index 7ed17ff502b9edd76afb96fe682609a4df1afffe..2751f3075d8bc746ad548cefe5543960956cc3bd 100644 (file)
@@ -76,15 +76,6 @@ union partition_info_u {
        };
 };
 
-union uv_watchlist_u {
-       u64     val;
-       struct {
-               u64     blade   : 16,
-                       size    : 32,
-                       filler  : 16;
-       };
-};
-
 enum uv_memprotect {
        UV_MEMPROT_RESTRICT_ACCESS,
        UV_MEMPROT_ALLOW_AMO,
@@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
 
 extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
 extern s64 uv_bios_freq_base(u64, u64 *);
-extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
+extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
                                        unsigned long *);
 extern int uv_bios_mq_watchlist_free(int, int);
 extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
index 63a88e1f987d35b9ff94ccbd2d53d499b5c783ee..b0206a211b09db5bb050fdd56775add9bbb80e43 100644 (file)
@@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
 }
 
 int
-uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
+uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
                           unsigned long *intr_mmr_offset)
 {
-       union uv_watchlist_u size_blade;
        u64 watchlist;
        s64 ret;
 
-       size_blade.size = mq_size;
-       size_blade.blade = blade;
-
        /*
         * bios returns watchlist number or negative error number.
         */
        ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
-                       size_blade.val, (u64)intr_mmr_offset,
+                       mq_size, (u64)intr_mmr_offset,
                        (u64)&watchlist, 0);
        if (ret < BIOS_STATUS_SUCCESS)
                return ret;
index 1e61f8a61a300f1c18eeae9e269f5bc52e2ca6c9..a0d093274dc07a2bdf50c99d4d35c12490c77e01 100644 (file)
@@ -41,12 +41,35 @@ xp_socket_pa_uv(unsigned long gpa)
        return uv_gpa_to_soc_phys_ram(gpa);
 }
 
+static enum xp_retval
+xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
+                  size_t len)
+{
+       int ret;
+       unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
+
+       BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
+       BUG_ON(len != 8);
+
+       ret = gru_read_gpa(dst_va, src_gpa);
+       if (ret == 0)
+               return xpSuccess;
+
+       dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+               "len=%ld\n", dst_gpa, src_gpa, len);
+       return xpGruCopyError;
+}
+
+
 static enum xp_retval
 xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
                    size_t len)
 {
        int ret;
 
+       if (uv_gpa_in_mmr_space(src_gpa))
+               return xp_remote_mmr_read(dst_gpa, src_gpa, len);
+
        ret = gru_copy_gpa(dst_gpa, src_gpa, len);
        if (ret == 0)
                return xpSuccess;
index 65877bc5edaae9e67dd391b665de91670556fd20..9a6268c89fddc0c49f65001941e13d045a5063a5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/hardirq.h>
 #include "xpc.h"
+#include <asm/uv/uv_hub.h>
 
 /* XPC is exiting flag */
 int xpc_exiting;
@@ -92,8 +93,12 @@ xpc_get_rsvd_page_pa(int nasid)
                        break;
 
                /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
-               if (L1_CACHE_ALIGN(len) > buf_len) {
-                       kfree(buf_base);
+               if (is_shub())
+                       len = L1_CACHE_ALIGN(len);
+
+               if (len > buf_len) {
+                       if (buf_base != NULL)
+                               kfree(buf_base);
                        buf_len = L1_CACHE_ALIGN(len);
                        buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
                                                            &buf_base);
@@ -105,7 +110,7 @@ xpc_get_rsvd_page_pa(int nasid)
                        }
                }
 
-               ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
+               ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
                if (ret != xpSuccess) {
                        dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
                        break;
@@ -143,7 +148,7 @@ xpc_setup_rsvd_page(void)
                dev_err(xpc_part, "SAL failed to locate the reserved page\n");
                return -ESRCH;
        }
-       rp = (struct xpc_rsvd_page *)__va(rp_pa);
+       rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
 
        if (rp->SAL_version < 3) {
                /* SAL_versions < 3 had a SAL_partid defined as a u8 */
index b5bbe59f9c5729c964f3749bf9e94883256da6d2..bbf0e2ee6fd9c43bcca3c5c8423f424fcfa7ecc2 100644 (file)
@@ -157,22 +157,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
 {
        int ret;
 
-#if defined CONFIG_X86_64
-       ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
-                                        mq->order, &mq->mmr_offset);
-       if (ret < 0) {
-               dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
-                       "ret=%d\n", ret);
-               return ret;
-       }
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
-       ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+       int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+       ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
                                    mq->order, &mq->mmr_offset);
        if (ret < 0) {
                dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
                        ret);
                return -EBUSY;
        }
+#elif defined CONFIG_X86_64
+       ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
+                                        mq->order, &mq->mmr_offset);
+       if (ret < 0) {
+               dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+                       "ret=%d\n", ret);
+               return ret;
+       }
 #else
        #error not a supported configuration
 #endif
@@ -185,12 +187,13 @@ static void
 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
 {
        int ret;
+       int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
 
 #if defined CONFIG_X86_64
-       ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+       ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
        BUG_ON(ret != BIOS_STATUS_SUCCESS);
 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
-       ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+       ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
        BUG_ON(ret != SALRET_OK);
 #else
        #error not a supported configuration