diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 295b5abee72f..34e603cc1716 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -227,7 +227,7 @@ config IA64_SGI_SN_SIM
 
 config IA64_SGI_SN_XP
 	tristate "Support communication between SGI SSIs"
-	depends on MSPEC
+	select IA64_UNCACHED_ALLOCATOR
 	help
 	  An SGI machine can be divided into multiple Single System
 	  Images which act independently of each other and have
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 2c3c4a8af553..cd7ed73f0e7a 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -22,6 +22,7 @@
 #include <linux/cache.h>
 #include <linux/mmzone.h>
 #include <linux/nodemask.h>
+#include <asm/uncached.h>
 #include <asm/sn/bte.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
@@ -183,7 +184,7 @@ xpc_rsvd_page_init(void)
 	 * memory protections are never restricted.
 	 */
 	if ((amos_page = xpc_vars->amos_page) == NULL) {
-		amos_page = (AMO_t *) mspec_kalloc_page(0);
+		amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
 		if (amos_page == NULL) {
 			dev_err(xpc_part, "can't allocate page of AMOs\n");
 			return NULL;
@@ -200,7 +201,8 @@ xpc_rsvd_page_init(void)
 			if (ret != 0) {
 				dev_err(xpc_part, "can't change memory "
 					"protections\n");
-				mspec_kfree_page((unsigned long) amos_page);
+				uncached_free_page(__IA64_UNCACHED_OFFSET |
+						   TO_PHYS((u64) amos_page));
 				return NULL;
 			}
 		}
diff --git a/include/asm-ia64/sn/mspec.h b/include/asm-ia64/sn/mspec.h
new file mode 100644
index 000000000000..dbe13c6121a8
--- /dev/null
+++ b/include/asm-ia64/sn/mspec.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_MSPEC_H
+#define _ASM_IA64_SN_MSPEC_H
+
+#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
+
+#define FETCHOP_LOAD		0
+#define FETCHOP_INCREMENT	8
+#define FETCHOP_DECREMENT	16
+#define FETCHOP_CLEAR		24
+
+#define FETCHOP_STORE		0
+#define FETCHOP_AND		24
+#define FETCHOP_OR		32
+
+#define FETCHOP_CLEAR_CACHE	56
+
+#define FETCHOP_LOAD_OP(addr, op) ( \
+         *(volatile long *)((char*) (addr) + (op)))
+
+#define FETCHOP_STORE_OP(addr, op, x) ( \
+         *(volatile long *)((char*) (addr) + (op)) = (long) (x))
+
+#ifdef __KERNEL__
+
+/*
+ * Each Atomic Memory Operation (AMO formerly known as fetchop)
+ * variable is 64 bytes long.  The first 8 bytes are used.  The
+ * remaining 56 bytes are unaddressable due to the operation taking
+ * that portion of the address.
+ *
+ * NOTE: The AMO_t _MUST_ be placed in either the first or second half
+ * of the cache line.  The cache line _MUST NOT_ be used for anything
+ * other than additional AMO_t entries.  This is because there are two
+ * addresses which reference the same physical cache line.  One will
+ * be a cached entry with the memory type bits all set.  This address
+ * may be loaded into processor cache.  The AMO_t will be referenced
+ * uncached via the memory special memory type.  If any portion of the
+ * cached cache-line is modified, when that line is flushed, it will
+ * overwrite the uncached value in physical memory and lead to
+ * inconsistency.
+ */
+typedef struct {
+        u64 variable;
+        u64 unused[7];
+} AMO_t;
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_SN_MSPEC_H */