#define RCP_GR_BIT 50
#define RCP_GC_BIT 49
+/* User dirty bit for KVM's migration feature */
+#define KVM_UD_BIT 47
+
#ifndef __s390x__
/* Bits in the segment table address-space-control-element */
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
skey = page_get_storage_key(page_to_phys(page));
- if (skey & _PAGE_CHANGED)
+ if (skey & _PAGE_CHANGED) {
set_bit_simple(RCP_GC_BIT, pgste);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
if (skey & _PAGE_REFERENCED)
set_bit_simple(RCP_GR_BIT, pgste);
- if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
SetPageDirty(page);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
SetPageReferenced(page);
#endif
return pte;
}
+#ifdef CONFIG_PGSTE
+/*
+ * Get (and clear) the user dirty bit for a PTE.
+ */
+static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
+ pte_t *ptep)
+{
+ int dirty;
+ unsigned long *pgste;
+ struct page *page;
+ unsigned int skey;
+
+ if (!mm->context.pgstes)
+ return -EINVAL;
+ rcp_lock(ptep);
+ pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ page = virt_to_page(pte_val(*ptep));
+ skey = page_get_storage_key(page_to_phys(page));
+ if (skey & _PAGE_CHANGED) {
+ set_bit_simple(RCP_GC_BIT, pgste);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
+ SetPageDirty(page);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
+ dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
+ if (skey & _PAGE_CHANGED)
+ page_clear_dirty(page);
+ rcp_unlock(ptep);
+ return dirty;
+}
+#endif
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)