diff -uprN linux-3.13/arch/x86/Kconfig.debug linux-3.13-xpfo/arch/x86/Kconfig.debug
--- linux-3.13/arch/x86/Kconfig.debug	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/arch/x86/Kconfig.debug	2014-04-09 22:26:39.365036673 -0400
@@ -5,6 +5,24 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
+config XPFO
+	bool "Enable exclusive page frame ownership (XPFO)"
+	depends on DEBUG_KERNEL
+	depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	depends on X86
+	depends on !HIBERNATION && !DEBUG_PAGEALLOC && !KMEMCHECK
+	default n
+	---help---
+	  This option offers protection against 'ret2dir' (kernel) attacks.
+	  When enabled, every time a page frame is allocated to user space, it
+	  gets unmapped from the direct mapped RAM region in kernel space
+	  (physmap). Similarly, whenever page frames are freed/reclaimed, they
+	  are mapped back to physmap. Special care is given to minimize the
+	  impact in performance, by reducing TLB shootdowns and unnecessary page
+	  zero fills.
+
+	  If in doubt, say "N".
+
 config STRICT_DEVMEM
 	bool "Filter access to /dev/mem"
 	---help---
diff -uprN linux-3.13/arch/x86/mm/dump_pagetables.c linux-3.13-xpfo/arch/x86/mm/dump_pagetables.c
--- linux-3.13/arch/x86/mm/dump_pagetables.c	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/arch/x86/mm/dump_pagetables.c	2014-04-09 21:04:06.148668247 -0400
@@ -97,7 +97,7 @@ static void printk_prot(struct seq_file
 	static const char * const level_name[] =
 		{ "cr3", "pgd", "pud", "pmd", "pte" };
 
-	if (!pgprot_val(prot)) {
+	if (!(pr & _PAGE_PRESENT)) {
 		/* Not present */
 		seq_printf(m, "                          ");
 	} else {
diff -uprN linux-3.13/arch/x86/mm/highmem_32.c linux-3.13-xpfo/arch/x86/mm/highmem_32.c
--- linux-3.13/arch/x86/mm/highmem_32.c	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/arch/x86/mm/highmem_32.c	2014-04-09 21:04:06.148668247 -0400
@@ -5,9 +5,14 @@
 
 void *kmap(struct page *page)
 {
+	void *kaddr;
+
 	might_sleep();
-	if (!PageHighMem(page))
-		return page_address(page);
+	if (!PageHighMem(page)) {
+		kaddr = page_address(page);
+		xpfo_ctl(XPFO_CMD_KMAP, kaddr, page, 1);
+		return kaddr;
+	}
 	return kmap_high(page);
 }
 EXPORT_SYMBOL(kmap);
@@ -16,8 +21,10 @@ void kunmap(struct page *page)
 {
 	if (in_interrupt())
 		BUG();
-	if (!PageHighMem(page))
+	if (!PageHighMem(page)) {
+		xpfo_ctl(XPFO_CMD_KUNMAP, page_address(page), page, 1);
 		return;
+	}
 	kunmap_high(page);
 }
 EXPORT_SYMBOL(kunmap);
@@ -34,12 +41,16 @@ void *kmap_atomic_prot(struct page *page
 {
 	unsigned long vaddr;
 	int idx, type;
+	void *kaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 	pagefault_disable();
 
-	if (!PageHighMem(page))
-		return page_address(page);
+	if (!PageHighMem(page)) {
+		kaddr = page_address(page);
+		xpfo_ctl(XPFO_CMD_KMAP, kaddr, page, 1);
+		return kaddr;
+	}
 
 	type = kmap_atomic_idx_push();
 	idx = type + KM_TYPE_NR*smp_processor_id();
@@ -99,6 +110,7 @@ void __kunmap_atomic(void *kvaddr)
 	}
 #endif
 
+	xpfo_ctl(XPFO_CMD_KUNMAP, (void *)kvaddr, virt_to_page((void *)kvaddr), 1);
 	pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
diff -uprN linux-3.13/arch/x86/mm/init.c linux-3.13-xpfo/arch/x86/mm/init.c
--- linux-3.13/arch/x86/mm/init.c	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/arch/x86/mm/init.c	2014-04-09 21:04:06.148668247 -0400
@@ -123,7 +123,9 @@ static void __init probe_page_size_mask(
 {
 	init_gbpages();
 
-#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
+#if 	!defined(CONFIG_XPFO)			&& 	\
+	!defined(CONFIG_DEBUG_PAGEALLOC) 	&&	\
+       	!defined(CONFIG_KMEMCHECK)
 	/*
 	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
 	 * This will simplify cpa(), which otherwise needs to support splitting
diff -uprN linux-3.13/arch/x86/mm/pageattr.c linux-3.13-xpfo/arch/x86/mm/pageattr.c
--- linux-3.13/arch/x86/mm/pageattr.c	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/arch/x86/mm/pageattr.c	2014-04-09 21:04:06.148668247 -0400
@@ -1,6 +1,9 @@
 /*
  * Copyright 2002 Andi Kleen, SuSE Labs.
  * Thanks to Ben LaHaise for precious feedback.
+ *
+ * Support for XPFO was added by Vasileios P. Kemerlis <vpk@cs.columbia.edu>.
+ * Copyright (C) 2014, Columbia University, New York, NY, USA.
  */
 #include <linux/highmem.h>
 #include <linux/bootmem.h>
@@ -1350,6 +1353,186 @@ int set_pages_rw(struct page *page, int
 	return set_memory_rw(addr, numpages);
 }
 
+#ifdef CONFIG_XPFO
+
+/*
+ * Atomic update of a single, kernel page table entry.
+ *
+ * @pg:		page frame to map/unmap
+ * @kaddr:	kernel address of `pg' (in the direct-mapped memory region)
+ * @prot:	protection flags
+ */
+static inline void
+set_kpte(struct page *pg, unsigned long kaddr, pgprot_t prot) {
+	unsigned int	level;
+	pte_t		*kptep = lookup_address(kaddr, &level);
+
+	/* TODO: remove (sanity check) */
+	BUG_ON(!kptep || level != PG_LEVEL_4K);
+
+	set_pte_atomic(kptep, pfn_pte(page_to_pfn(pg), canon_pgprot(prot)));
+}
+
+/*
+ * Exclusive page frame ownership (XPFO).
+ *
+ * @act:	command/action (alloc, free, map, unmap ...)
+ * @kaddr:	kernel address (of `pg')
+ * @pg:		page frame (starting page frame if num > 1)
+ * @num:	number of (consecutive) page frames
+ */
+void
+xpfo_ctl(xpfo_cmd_t act, void *kaddr, struct page *pg, int num)
+{
+	int i, tlb_shoot = 0;
+	unsigned long __kaddr = (unsigned long)kaddr;
+
+	switch (act) {
+		/* page frame(s) allocated (destined to kernel space) */
+		case XPFO_CMD_KALLOC:
+			for (i = 0; i < num; i++)  {
+				/* TODO: remove (sanity check) */
+				WARN_ON(PageUserFp(pg + i) || PageUser(pg + i));
+			
+				/* enable XPFO on the page frame */
+				__SetPageKernel(pg + i);
+			}
+			
+			/* done */
+			break;
+
+		/* page frame(s) allocated (destined to user space) */
+		case XPFO_CMD_UALLOC:
+			for (i = 0; i < num; i++)  {
+				/* TODO: remove (sanity check) */
+				WARN_ON(PageUserFp(pg + i) || PageUser(pg + i));
+				
+				/* enable XPFO on the page frame */
+				__SetPageUserFp(pg + i);
+					
+				/* set the map counter */
+				xpfo_kmcnt_init(pg + i);
+
+				/* initialize the per-page frame lock */
+				xpfo_lock_init(pg + i);
+				
+				/*
+				 * the page frame was previously
+				 * allocated to kernel space
+				 */
+				if (__TestClearPageKernel(pg + i))
+					/* enable TLB shootdown */
+					tlb_shoot = 1;
+			}
+
+			/* perform TLB shootdown */
+			if (tlb_shoot)
+				flush_tlb_kernel_range(__kaddr,
+						__kaddr + (num * PAGE_SIZE));
+			
+			/* done */
+			break;
+
+		/* page frame(s) deallocated */
+		case XPFO_CMD_FREE:
+			for (	i = 0;
+				i < num;
+				i++, __kaddr += PAGE_SIZE, kaddr += PAGE_SIZE) {
+				/*
+				 * the page frame was previously
+				 * allocated to user space
+				 */
+				if (__TestClearPageUser(pg + i)) {
+					/* map it back to kernel space */
+					set_kpte(pg + i,
+						__kaddr,
+						__pgprot(__PAGE_KERNEL));
+				
+					/* no TLB update */
+
+					/* zap the contents of the page frame */
+					clear_page(kaddr);
+					
+					/* mark it accordingly (clean) */
+					__SetPageZap(pg + i);
+				}
+
+				/* reset XPFO */
+				__ClearPageUserFp(pg + i);
+			}
+
+			/* done */
+			break;
+
+		/* page frame (needs to be) mapped to kernel space */
+		case XPFO_CMD_KMAP:
+			/* TODO: remove (sanity check) */	
+			BUG_ON(num != 1);
+				
+			/* the page is allocated to kernel space */
+			if (PageKernel(pg))
+				/* done; fast path */
+				break;
+			
+			/* get the per-page frame lock */
+			xpfo_lock(pg);
+
+			/* the page was previously allocated to user space */
+			if (xpfo_kmcnt_get(pg) && PageUser(pg))
+				/* map it to kernel space */
+				set_kpte(pg, __kaddr, __pgprot(__PAGE_KERNEL));
+					
+			/* no TLB update */
+
+			/* release the per-page frame lock */
+			xpfo_unlock(pg);
+			
+			/* done */
+			break;
+
+		/* page frame (needs to be) unmaped from kernel space */
+		case XPFO_CMD_KUNMAP:
+			/* TODO: remove (sanity check) */
+			BUG_ON(num != 1);
+			
+			/* the page is allocated to kernel space */
+			if (PageKernel(pg))
+				/* done; fast path */
+				break;
+			
+			/* get the per-page frame lock */
+			xpfo_lock(pg);
+
+			/* the page frame is to be allocated to user space */
+			if (xpfo_kmcnt_put(pg) 	&&
+				(PageUserFp(pg) || PageUser(pg))) { 
+
+				/* unmap it from kernel space */
+				set_kpte(pg, __kaddr, __pgprot(0));
+				
+				/* local TLB update */
+				__flush_tlb_one(__kaddr);
+				
+				/* mark it accordingly (user) */
+				__SetPageUser(pg);
+			}
+			
+			/* release the per-page frame lock */
+			xpfo_unlock(pg);
+			
+			/* done */
+			break;
+
+		default:	/* sanity check */
+			BUG();
+
+			break;	/* make the compiler happy */
+	}
+}
+EXPORT_SYMBOL(xpfo_ctl);
+
+#endif /* CONFIG_XPFO */
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
 
 static int __set_pages_p(struct page *page, int numpages)
diff -uprN linux-3.13/arch/x86/mm/tlb.c linux-3.13-xpfo/arch/x86/mm/tlb.c
--- linux-3.13/arch/x86/mm/tlb.c	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/arch/x86/mm/tlb.c	2014-04-09 21:04:06.152668247 -0400
@@ -279,9 +279,13 @@ static void do_kernel_range_flush(void *
 	struct flush_tlb_info *f = info;
 	unsigned long addr;
 
+	count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+	
 	/* flush range by one by one 'invlpg' */
-	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) {
+		count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
 		__flush_tlb_single(addr);
+	}
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -292,6 +296,8 @@ void flush_tlb_kernel_range(unsigned lon
 	/* In modern CPU, last level tlb used for both data/ins */
 	act_entries = tlb_lld_4k[ENTRIES];
 
+	count_vm_event(NR_TLB_REMOTE_FLUSH);
+	
 	/* Balance as user space task's flush, a bit conservative */
 	if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
 		(end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
diff -uprN linux-3.13/include/linux/highmem.h linux-3.13-xpfo/include/linux/highmem.h
--- linux-3.13/include/linux/highmem.h	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/include/linux/highmem.h	2014-04-09 21:04:06.152668247 -0400
@@ -55,23 +55,33 @@ static inline struct page *kmap_to_page(
 #ifndef ARCH_HAS_KMAP
 static inline void *kmap(struct page *page)
 {
+	void *kaddr;
+	
 	might_sleep();
-	return page_address(page);
+	kaddr = page_address(page);
+	xpfo_ctl(XPFO_CMD_KMAP, kaddr, page, 1);
+	return kaddr;
 }
 
 static inline void kunmap(struct page *page)
 {
+	xpfo_ctl(XPFO_CMD_KUNMAP, page_address(page), page, 1);
 }
 
 static inline void *kmap_atomic(struct page *page)
 {
+	void *kaddr;
+
 	pagefault_disable();
-	return page_address(page);
+	kaddr = page_address(page);
+	xpfo_ctl(XPFO_CMD_KMAP, kaddr, page, 1);
+	return kaddr;
 }
 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
 
 static inline void __kunmap_atomic(void *addr)
 {
+	xpfo_ctl(XPFO_CMD_KUNMAP, addr, virt_to_page(addr), 1);
 	pagefault_enable();
 }
 
@@ -131,8 +141,11 @@ do {
 #ifndef clear_user_highpage
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-	void *addr = kmap_atomic(page);
-	clear_user_page(addr, vaddr, page);
+	void *addr;
+
+	addr = kmap_atomic(page);
+	if (!__TestClearPageZap(page))
+		clear_user_page(addr, vaddr, page);
 	kunmap_atomic(addr);
 }
 #endif
@@ -184,8 +197,11 @@ alloc_zeroed_user_highpage_movable(struc
 
 static inline void clear_highpage(struct page *page)
 {
-	void *kaddr = kmap_atomic(page);
-	clear_page(kaddr);
+	void *kaddr;
+
+	kaddr = kmap_atomic(page);
+	if (!__TestClearPageZap(page))
+		clear_page(kaddr);
 	kunmap_atomic(kaddr);
 }
 
diff -uprN linux-3.13/include/linux/mm.h linux-3.13-xpfo/include/linux/mm.h
--- linux-3.13/include/linux/mm.h	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/include/linux/mm.h	2014-04-09 21:04:06.152668247 -0400
@@ -2011,5 +2011,8 @@ void __init setup_nr_node_ids(void);
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+/* XPFO */
+#include <linux/xpfo.h>
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff -uprN linux-3.13/include/linux/mm_types.h linux-3.13-xpfo/include/linux/mm_types.h
--- linux-3.13/include/linux/mm_types.h	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/include/linux/mm_types.h	2014-04-09 22:20:34.265025237 -0400
@@ -195,6 +195,69 @@ struct page {
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 	int _last_cpupid;
 #endif
+
+#ifdef CONFIG_XPFO
+	/*
+	 * XPFO
+	 *
+	 * `xpfo_flags':	Bit-encoded flags. We do *not* use the page
+	 * 			flags (i.e., `struct page.flags'), since this
+	 * 			field is highly congested and we currently need
+	 * 			four (4) new flags for XPFO.
+	 *   [0]: `user_fp' ->	Denotes that the page frame is destined to user
+	 *   			space. This flag is used in the fast-path, where
+	 *   			we only mark the page frame accordingly and we
+	 *   			do *not* unmap it from the direct mapped RAM
+	 *   			region. In most cases, the kernel will need to
+	 *   			mangle the page frame immediately after its
+	 *   			acquisition (e.g., COW, zero-fill) -- we avoid
+	 *   			an unnecessary map operation with `user_fp'.
+	 *   [1]: `user'    ->	Denotes that the page frame is destined to user
+	 *   			space. This flag is used in the slow-path, where
+	 *   			we need to map/unmap the page frame in/from the
+	 *   			direct mapped RAM region, every time the kernel
+	 *   			accesses the contents of the page frame (e.g.,
+	 *   			COW, file-backed mmap-ed regions). In addition, 
+	 *   			`user' is used when page frames are deallocated.
+	 *   			If the page frame was previously assigned to
+	 *   			user space, it is zapped and mapped back to the 
+	 *   			direct mapped RAM region.
+	 *   [2]: `kernel'  ->	Denotes a page frame destined to kernel space.
+	 *   			This is used for identifying page frames that
+	 *   			are first assigned to kernel space (e.g., SLUB,
+	 *   			k*alloc), and then freed and mapped to user
+	 *   			space. In such cases, an expensive TLB shootdown
+	 *   			is necessary. However, by trying to allocate
+	 *   			previously-allocated kernel frames to kernel
+	 *   			space and previously-allocated user frames to
+	 *   			user space, we minimize TLB shootdowns. Page
+	 *   			frames allocated to user space, freed, and
+	 *   			subsequently allocated to user space, again,
+	 *   			require only local TLB invalidation.
+	 *   [3]: `zap'	    ->	Denotes that the page frame has been zapped.
+	 *   			This flag is used to avoid zapping page frames
+	 *   			twice. Whenever a page frame is freed and
+	 *   			previously mapped in user space, it needs to be
+	 *   			zapped before mapped back in the direct mapped
+	 *   			RAM region. If the page frame is subsequently
+	 *   			allocated with `__GFP_ZERO', we can avoid
+	 *   			clearing it again.
+	 *
+	 * `xpfo_kmcnt':	Reference counter; used for balancing calls for
+	 * 			mapping/unmapping. Upon multiple map requests,
+	 * 			for the *same* page frame, only the first
+	 * 			request maps the page frame back to kernel
+	 * 			space. Likewise, only the last unmap request
+	 * 			will remove the page frame from the direct
+	 * 			mapped RAM region.
+	 *
+	 * `xpfo_lock':		Mutex; used for concurrent map/unmap requests
+	 * 			for the *same* page frame.
+	 */
+	unsigned long	xpfo_flags;
+	atomic_t	xpfo_kmcnt;
+	spinlock_t	xpfo_lock;
+#endif
 }
 /*
  * The struct page can be forced to be double word aligned so that atomic ops
diff -uprN linux-3.13/include/linux/xpfo.h linux-3.13-xpfo/include/linux/xpfo.h
--- linux-3.13/include/linux/xpfo.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-3.13-xpfo/include/linux/xpfo.h	2014-04-09 21:04:06.152668247 -0400
@@ -0,0 +1,138 @@
+#ifndef _LINUX_XPFO_H
+#define _LINUX_XPFO_H
+
+typedef enum {
+	XPFO_CMD_UALLOC = 1,	/* user space page frame allocation	*/
+	XPFO_CMD_KALLOC,	/* kernel space page frame allocation	*/
+	XPFO_CMD_FREE,		/* page frame deallocation		*/
+	XPFO_CMD_KMAP,		/* page frame mapping (kernel space)	*/
+	XPFO_CMD_KUNMAP		/* page frame unmapping (kernel space)	*/	
+} xpfo_cmd_t;
+
+#ifdef CONFIG_XPFO
+typedef enum {
+	PG_user_fp = 0,	/* page frame allocated to user space (fast path) */
+	PG_user,	/* page frame allocated to user space		  */
+	PG_kernel,	/* page frame allocated to kernel space		  */
+	PG_zap,		/* clean page frame				  */
+} xpfo_gpf_t; 
+
+/* get the value of `PG_user_fp' */
+static inline int PageUserFp(struct page *page)
+{
+	return test_bit(PG_user_fp, &page->xpfo_flags);
+}
+
+/* assert `PG_user_fp' */
+static inline void __SetPageUserFp(struct page *page)
+{
+	__set_bit(PG_user_fp, &page->xpfo_flags);
+}
+
+/* clear `PG_user_fp' */
+static inline void __ClearPageUserFp(struct page *page)
+{
+	__clear_bit(PG_user_fp, &page->xpfo_flags);
+}
+
+/* get the value of `PG_user' */
+static inline int PageUser(struct page *page)
+{
+	return test_bit(PG_user, &page->xpfo_flags);
+}
+
+/* assert `PG_user' */
+static inline void __SetPageUser(struct page *page)
+{
+	__set_bit(PG_user, &page->xpfo_flags);
+}
+
+/* get the value `PG_user' and clear it afterwards */
+static inline int __TestClearPageUser(struct page *page)
+{
+	return __test_and_clear_bit(PG_user, &page->xpfo_flags);
+}
+
+/* get the value of `PG_kernel' */
+static inline int PageKernel(struct page *page)
+{
+	return test_bit(PG_kernel, &page->xpfo_flags);
+}
+
+/* assert `PG_kernel' */
+static inline void __SetPageKernel(struct page *page)
+{
+	__set_bit(PG_kernel, &page->xpfo_flags);
+}
+
+/* get the value `PG_kernel' and clear it afterwards */
+static inline int __TestClearPageKernel(struct page *page)
+{
+	return __test_and_clear_bit(PG_kernel, &page->xpfo_flags);
+}
+
+/* get the value of `PG_zap' */
+static inline int PageZap(struct page *page)
+{
+	return test_bit(PG_zap, &page->xpfo_flags);
+}
+
+/* assert `PG_zap' */
+static inline void __SetPageZap(struct page *page)
+{
+	__set_bit(PG_zap, &page->xpfo_flags);
+}
+
+/* clear `PG_zap' */
+static inline void __ClearPageZap(struct page *page)
+{
+	__clear_bit(PG_zap, &page->xpfo_flags);
+}
+
+/* get the value `PG_zap' and clear it afterwards */
+static inline int __TestClearPageZap(struct page *page)
+{
+	return __test_and_clear_bit(PG_zap, &page->xpfo_flags);
+}
+
+static inline void xpfo_kmcnt_init(struct page *page)
+{
+	atomic_set(&page->xpfo_kmcnt, 0);
+}
+
+static inline int xpfo_kmcnt_get(struct page *page)
+{
+	return (atomic_inc_return(&page->xpfo_kmcnt) == 1);
+}
+
+static inline int xpfo_kmcnt_put(struct page *page)
+{
+	return (atomic_dec_return(&page->xpfo_kmcnt) == 0);
+}
+
+static inline void xpfo_lock_init(struct page *page)
+{
+	spin_lock_init(&page->xpfo_lock);
+}
+
+static inline void xpfo_lock(struct page *page)
+{
+	spin_lock(&page->xpfo_lock);
+}
+
+static inline void xpfo_unlock(struct page *page)
+{
+	spin_unlock(&page->xpfo_lock);
+}
+
+extern void xpfo_ctl(xpfo_cmd_t act, void *kaddr, struct page *pg, int num);
+#else
+static inline int PageKernel(struct page *page) { return 0; }
+static inline int PageZap(struct page *page) { return 0; }
+static inline void __ClearPageZap(struct page *page) { }
+static inline int __TestClearPageZap(struct page *page) { return 0; }
+static
+inline void xpfo_ctl(xpfo_cmd_t act, void *kaddr, struct page *pg, int num) { }
+#endif
+
+#endif	/* _LINUX_XPFO_H */
diff -uprN linux-3.13/mm/page_alloc.c linux-3.13-xpfo/mm/page_alloc.c
--- linux-3.13/mm/page_alloc.c	2014-01-19 21:40:07.000000000 -0500
+++ linux-3.13-xpfo/mm/page_alloc.c	2014-04-09 21:04:06.152668247 -0400
@@ -12,6 +12,7 @@
  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
+ *  Support for XPFO added by Vasileios P. Kemerlis, Feb 2014         
  */
 
 #include <linux/stddef.h>
@@ -731,6 +732,8 @@ static bool free_pages_prepare(struct pa
 	arch_free_page(page, order);
 	kernel_map_pages(page, 1 << order, 0);
 
+	xpfo_ctl(XPFO_CMD_FREE, page_address(page), page, 1 << order);
+	
 	return true;
 }
 
@@ -870,8 +873,17 @@ static int prep_new_page(struct page *pa
 	arch_alloc_page(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 
+	if ((gfp_flags & GFP_HIGHUSER) == GFP_HIGHUSER)
+		xpfo_ctl(XPFO_CMD_UALLOC, page_address(page), page, 1 << order);
+	else
+		xpfo_ctl(XPFO_CMD_KALLOC, page_address(page), page, 1 << order);
+
 	if (gfp_flags & __GFP_ZERO)
 		prep_zero_page(page, order, gfp_flags);
+	else {
+		for (i = 0; i < (1 << order); i++) 
+			__ClearPageZap(page + i);
+	}
 
 	if (order && (gfp_flags & __GFP_COMP))
 		prep_compound_page(page, order);
@@ -1364,7 +1376,7 @@ void free_hot_cold_page(struct page *pag
 	}
 
 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
-	if (cold)
+	if (cold || PageKernel(page))
 		list_add_tail(&page->lru, &pcp->lists[migratetype]);
 	else
 		list_add(&page->lru, &pcp->lists[migratetype]);
