8229258: Backport patch from JDK 14 to allow building with Clang 13

This patch backports changes from JDK14 commit ae5615c6142a4dc0d9033462f4880d7b3c127e26. It does not rename the class to markWord as the original patch does, but instead converts markOop to a markWord equivalent in-place. This patch allows the JDK to be built with Clang 13, as it avoids undefined behavior. See here: https://bugs.openjdk.java.net/browse/JDK-8229258

Bug: 197273045
Test: builds
Change-Id: Ie7501cf66eb03e4bc5f30ea4fb0af9fd8fe16d38
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index e8a63ff..382800f 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -3397,11 +3397,11 @@
 
     // Check for existing monitor
     if ((EmitSync & 0x02) == 0) {
-      __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
+      __ tbnz(disp_hdr, exact_log2(markOop::monitor_value), object_has_monitor);
     }
 
     // Set tmp to be (markOop of object | UNLOCK_VALUE).
-    __ orr(tmp, disp_hdr, markOopDesc::unlocked_value);
+    __ orr(tmp, disp_hdr, markOop::unlocked_value);
 
     // Initialize the box. (Must happen before we update the object mark!)
     __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
@@ -3425,7 +3425,7 @@
     // markOop of object (disp_hdr) with the stack pointer.
     __ mov(rscratch1, sp);
     __ sub(disp_hdr, disp_hdr, rscratch1);
-    __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place));
+    __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOop::lock_mask_in_place));
     // If condition is true we are cont and hence we can store 0 as the
     // displaced header in the box, which indicates that it is a recursive lock.
     __ ands(tmp/*==0?*/, disp_hdr, tmp);   // Sets flags for result
@@ -3440,15 +3440,15 @@
       // otherwise m->owner may contain a thread or a stack address.
       //
       // Try to CAS m->owner from NULL to current thread.
-      __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
+      __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOop::monitor_value));
     __ cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
                /*release*/ true, /*weak*/ false, noreg); // Sets flags for result
 
       // Store a non-null value into the box to avoid looking like a re-entrant
       // lock. The fast-path monitor unlock code checks for
-      // markOopDesc::monitor_value so use markOopDesc::unused_mark which has the
+      // markOop::monitor_value so use markOop::unused_mark which has the
       // relevant bit set, and also matches ObjectSynchronizer::slow_enter.
-      __ mov(tmp, (address)markOopDesc::unused_mark());
+      __ mov(tmp, (address)markOop::unused_mark());
       __ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
     }
 
@@ -3488,7 +3488,7 @@
     // Handle existing monitor.
     if ((EmitSync & 0x02) == 0) {
       __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
-      __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
+      __ tbnz(disp_hdr, exact_log2(markOop::monitor_value), object_has_monitor);
     }
 
     // Check if it is still a light weight lock, this is is true if we
@@ -3504,7 +3504,7 @@
     // Handle existing monitor.
     if ((EmitSync & 0x02) == 0) {
       __ bind(object_has_monitor);
-      __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
+      __ add(tmp, tmp, -markOop::monitor_value); // monitor
       __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
       __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
       __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
index 218113b..647aa78 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
@@ -82,7 +82,7 @@
   // Load object header
   ldr(hdr, Address(obj, hdr_offset));
   // and mark it as unlocked
-  orr(hdr, hdr, markOopDesc::unlocked_value);
+  orr(hdr, hdr, markOop::unlocked_value);
   // save unlocked object header into the displaced header location on the stack
   str(hdr, Address(disp_hdr, 0));
   // test if object header is still the same (i.e. unlocked), and if so, store the
@@ -176,7 +176,7 @@
     ldr(t1, Address(klass, Klass::prototype_header_offset()));
   } else {
     // This assumes that all prototype bits fit in an int32_t
-    mov(t1, (int32_t)(intptr_t)markOopDesc::prototype());
+    mov(t1, (int32_t)(intptr_t)markOop::prototype());
   }
   str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
 
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
index abc689f..90bc16a 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
@@ -215,9 +215,9 @@
   Label done;
   __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
   __ eon(tmp, tmp, zr);
-  __ ands(zr, tmp, markOopDesc::lock_mask_in_place);
+  __ ands(zr, tmp, markOop::lock_mask_in_place);
   __ br(Assembler::NE, done);
-  __ orr(tmp, tmp, markOopDesc::marked_value);
+  __ orr(tmp, tmp, markOop::marked_value);
   __ eon(dst, tmp, zr);
   __ bind(done);
 
@@ -616,11 +616,11 @@
   Label slow_path;
   __ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes()));
   __ eon(tmp1, tmp1, zr);
-  __ ands(zr, tmp1, markOopDesc::lock_mask_in_place);
+  __ ands(zr, tmp1, markOop::lock_mask_in_place);
   __ br(Assembler::NE, slow_path);
 
   // Decode forwarded object.
-  __ orr(tmp1, tmp1, markOopDesc::marked_value);
+  __ orr(tmp1, tmp1, markOop::marked_value);
   __ eon(res, tmp1, zr);
   __ b(*stub->continuation());
 
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 4bab77d..9ab255d 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -475,7 +475,7 @@
     counters = BiasedLocking::counters();
 
   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+  assert(markOop::age_shift == markOop::lock_bits + markOop::biased_lock_bits, "biased locking makes assumptions about bit layout");
   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
   Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
   Address saved_mark_addr(lock_reg, 0);
@@ -492,15 +492,15 @@
     null_check_offset = offset();
     ldr(swap_reg, mark_addr);
   }
-  andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place);
-  cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+  andr(tmp_reg, swap_reg, markOop::biased_lock_mask_in_place);
+  cmp(tmp_reg, markOop::biased_lock_pattern);
   br(Assembler::NE, cas_label);
   // The bias pattern is present in the object's header. Need to check
   // whether the bias owner and the epoch are both still current.
   load_prototype_header(tmp_reg, obj_reg);
   orr(tmp_reg, tmp_reg, rthread);
   eor(tmp_reg, swap_reg, tmp_reg);
-  andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place));
+  andr(tmp_reg, tmp_reg, ~((int) markOop::age_mask_in_place));
   if (counters != NULL) {
     Label around;
     cbnz(tmp_reg, around);
@@ -523,7 +523,7 @@
   // If the low three bits in the xor result aren't clear, that means
   // the prototype header is no longer biased and we have to revoke
   // the bias on this object.
-  andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place);
+  andr(rscratch1, tmp_reg, markOop::biased_lock_mask_in_place);
   cbnz(rscratch1, try_revoke_bias);
 
   // Biasing is still enabled for this data type. See whether the
@@ -535,7 +535,7 @@
   // that the current epoch is invalid in order to do this because
   // otherwise the manipulations it performs on the mark word are
   // illegal.
-  andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place);
+  andr(rscratch1, tmp_reg, markOop::epoch_mask_in_place);
   cbnz(rscratch1, try_rebias);
 
   // The epoch of the current bias is still valid but we know nothing
@@ -546,7 +546,7 @@
   // don't accidentally blow away another thread's valid bias.
   {
     Label here;
-    mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+    mov(rscratch1, markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place);
     andr(swap_reg, swap_reg, rscratch1);
     orr(tmp_reg, swap_reg, rthread);
     cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
@@ -631,8 +631,8 @@
   // lock, the object could not be rebiased toward another thread, so
   // the bias bit would be clear.
   ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-  andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmp(temp_reg, markOopDesc::biased_lock_pattern);
+  andr(temp_reg, temp_reg, markOop::biased_lock_mask_in_place);
+  cmp(temp_reg, markOop::biased_lock_pattern);
   br(Assembler::EQ, done);
 }
 
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index 6fd216b..8c5a27d 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -3611,7 +3611,7 @@
     if (UseBiasedLocking) {
       __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
     } else {
-      __ mov(rscratch1, (intptr_t)markOopDesc::prototype());
+      __ mov(rscratch1, (intptr_t)markOop::prototype());
     }
     __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
     __ store_klass_gap(r0, zr);  // zero klass gap for compressed oops
diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
index facfbdd..a6de7c0 100644
--- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
@@ -101,7 +101,7 @@
   if(UseBiasedLocking && !len->is_valid()) {
     ldr(tmp, Address(klass, Klass::prototype_header_offset()));
   } else {
-    mov(tmp, (intptr_t)markOopDesc::prototype());
+    mov(tmp, (intptr_t)markOop::prototype());
   }
 
 #ifdef AARCH64
@@ -282,8 +282,8 @@
   ldr(hdr, obj);
 
   // Test if object is already locked
-  assert(markOopDesc::unlocked_value == 1, "adjust this code");
-  tbnz(hdr, exact_log2(markOopDesc::unlocked_value), fast_lock);
+  assert(markOop::unlocked_value == 1, "adjust this code");
+  tbnz(hdr, exact_log2(markOop::unlocked_value), fast_lock);
 
   // Check for recursive locking
   // See comments in InterpreterMacroAssembler::lock_object for
@@ -312,7 +312,7 @@
   ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
 
   str(obj, Address(disp_hdr, obj_offset));
-  tst(hdr, markOopDesc::unlocked_value);
+  tst(hdr, markOop::unlocked_value);
   b(fast_lock, ne);
 
   // Check for recursive locking
diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp
index 273f92b..ceb5deb 100644
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp
@@ -995,8 +995,8 @@
     ldr(Rmark, Robj);
 
     // Test if object is already locked
-    assert(markOopDesc::unlocked_value == 1, "adjust this code");
-    tbz(Rmark, exact_log2(markOopDesc::unlocked_value), already_locked);
+    assert(markOop::unlocked_value == 1, "adjust this code");
+    tbz(Rmark, exact_log2(markOop::unlocked_value), already_locked);
 
 #else // AARCH64
 
@@ -1009,7 +1009,7 @@
     ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
 
     // Test if object is already locked
-    tst(Rmark, markOopDesc::unlocked_value);
+    tst(Rmark, markOop::unlocked_value);
     b(already_locked, eq);
 
 #endif // !AARCH64
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
index 9e22fd1..87245ff 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
@@ -1933,7 +1933,7 @@
   }
 #endif
 
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+  assert(markOop::age_shift == markOop::lock_bits + markOop::biased_lock_bits, "biased locking makes assumptions about bit layout");
   Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
 
   // Biased locking
@@ -1955,8 +1955,8 @@
   // On MP platform loads could return 'stale' values in some cases.
   // That is acceptable since either CAS or slow case path is taken in the worst case.
 
-  andr(tmp_reg, swap_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
-  cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+  andr(tmp_reg, swap_reg, (uintx)markOop::biased_lock_mask_in_place);
+  cmp(tmp_reg, markOop::biased_lock_pattern);
 
   b(cas_label, ne);
 
@@ -1968,9 +1968,9 @@
   eor(tmp_reg, tmp_reg, swap_reg);
 
 #ifdef AARCH64
-  ands(tmp_reg, tmp_reg, ~((uintx) markOopDesc::age_mask_in_place));
+  ands(tmp_reg, tmp_reg, ~((uintx) markOop::age_mask_in_place));
 #else
-  bics(tmp_reg, tmp_reg, ((int) markOopDesc::age_mask_in_place));
+  bics(tmp_reg, tmp_reg, ((int) markOop::age_mask_in_place));
 #endif // AARCH64
 
 #ifndef PRODUCT
@@ -1993,7 +1993,7 @@
   // If the low three bits in the xor result aren't clear, that means
   // the prototype header is no longer biased and we have to revoke
   // the bias on this object.
-  tst(tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
+  tst(tmp_reg, (uintx)markOop::biased_lock_mask_in_place);
   b(try_revoke_bias, ne);
 
   // Biasing is still enabled for this data type. See whether the
@@ -2005,7 +2005,7 @@
   // that the current epoch is invalid in order to do this because
   // otherwise the manipulations it performs on the mark word are
   // illegal.
-  tst(tmp_reg, (uintx)markOopDesc::epoch_mask_in_place);
+  tst(tmp_reg, (uintx)markOop::epoch_mask_in_place);
   b(try_rebias, ne);
 
   // tmp_reg has the age, epoch and pattern bits cleared
@@ -2024,12 +2024,12 @@
 #ifdef AARCH64
   // Bit mask biased_lock + age + epoch is not a valid AArch64 logical immediate, as it has
   // cleared bit in the middle (cms bit). So it is loaded with separate instruction.
-  mov(tmp2, (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place));
+  mov(tmp2, (markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place));
   andr(swap_reg, swap_reg, tmp2);
 #else
   // until the assembler can be made smarter, we need to make some assumptions about the values
   // so we can optimize this:
-  assert((markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
+  assert((markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
 
   mov(swap_reg, AsmOperand(swap_reg, lsl, 23));
   mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markOop with thread bits cleared (for CAS)
@@ -2062,7 +2062,7 @@
 
   // owner bits 'random'. Set them to Rthread.
 #ifdef AARCH64
-  mov(tmp2, (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place));
+  mov(tmp2, (markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place));
   andr(tmp_reg, tmp_reg, tmp2);
 #else
   mov(tmp_reg, AsmOperand(tmp_reg, lsl, 23));
@@ -2097,7 +2097,7 @@
 
   // owner bits 'random'. Clear them
 #ifdef AARCH64
-  mov(tmp2, (markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place));
+  mov(tmp2, (markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place));
   andr(tmp_reg, tmp_reg, tmp2);
 #else
   mov(tmp_reg, AsmOperand(tmp_reg, lsl, 23));
@@ -2128,8 +2128,8 @@
   // the bias bit would be clear.
   ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 
-  andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
-  cmp(tmp_reg, markOopDesc::biased_lock_pattern);
+  andr(tmp_reg, tmp_reg, (uintx)markOop::biased_lock_mask_in_place);
+  cmp(tmp_reg, markOop::biased_lock_pattern);
   b(done, eq);
 }
 
@@ -3018,7 +3018,7 @@
   // Invariant: Rmark loaded below does not contain biased lock pattern
 
   ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
-  tst(Rmark, markOopDesc::unlocked_value);
+  tst(Rmark, markOop::unlocked_value);
   b(fast_lock, ne);
 
   // Check for recursive lock
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index 25366a8..6288177 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -1228,22 +1228,22 @@
 
     __ ldr(Rtemp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 
-    assert(markOopDesc::unlocked_value == 1, "adjust this code");
-    __ tbz(Rtemp, exact_log2(markOopDesc::unlocked_value), slow_case);
+    assert(markOop::unlocked_value == 1, "adjust this code");
+    __ tbz(Rtemp, exact_log2(markOop::unlocked_value), slow_case);
 
     if (UseBiasedLocking) {
-      assert(is_power_of_2(markOopDesc::biased_lock_bit_in_place), "adjust this code");
-      __ tbnz(Rtemp, exact_log2(markOopDesc::biased_lock_bit_in_place), slow_case);
+      assert(is_power_of_2(markOop::biased_lock_bit_in_place), "adjust this code");
+      __ tbnz(Rtemp, exact_log2(markOop::biased_lock_bit_in_place), slow_case);
     }
 
 #ifdef AARCH64
-    __ ands(Rtemp, Rtemp, (uintx)markOopDesc::hash_mask_in_place);
+    __ ands(Rtemp, Rtemp, (uintx)markOop::hash_mask_in_place);
     __ b(slow_case, eq);
-    __ logical_shift_right(R0, Rtemp, markOopDesc::hash_shift);
+    __ logical_shift_right(R0, Rtemp, markOop::hash_shift);
     __ ret();
 #else
-    __ bics(Rtemp, Rtemp, ~markOopDesc::hash_mask_in_place);
-    __ mov(R0, AsmOperand(Rtemp, lsr, markOopDesc::hash_shift), ne);
+    __ bics(Rtemp, Rtemp, ~markOop::hash_mask_in_place);
+    __ mov(R0, AsmOperand(Rtemp, lsr, markOop::hash_shift), ne);
     __ bx(LR, ne);
 #endif // AARCH64
 
@@ -1583,8 +1583,8 @@
     __ ldr(mark, sync_obj);
 
     // Test if object is already locked
-    assert(markOopDesc::unlocked_value == 1, "adjust this code");
-    __ tbnz(mark, exact_log2(markOopDesc::unlocked_value), fast_lock);
+    assert(markOop::unlocked_value == 1, "adjust this code");
+    __ tbnz(mark, exact_log2(markOop::unlocked_value), fast_lock);
 
     // Check for recursive lock
     // See comments in InterpreterMacroAssembler::lock_object for
@@ -1610,7 +1610,7 @@
 
     __ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
     __ sub(disp_hdr, FP, lock_slot_fp_offset);
-    __ tst(mark, markOopDesc::unlocked_value);
+    __ tst(mark, markOop::unlocked_value);
     __ b(fast_lock, ne);
 
     // Check for recursive lock
diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp
index 3adf9fb..2899100 100644
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp
@@ -4607,7 +4607,7 @@
     if (UseBiasedLocking) {
       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
     } else {
-      __ mov_slow(Rtemp, (intptr_t)markOopDesc::prototype());
+      __ mov_slow(Rtemp, (intptr_t)markOop::prototype());
     }
     // mark
     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 0be7290..c6f6007 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -110,7 +110,7 @@
   }
 
   // ... and mark it unlocked.
-  ori(Rmark, Rmark, markOopDesc::unlocked_value);
+  ori(Rmark, Rmark, markOop::unlocked_value);
 
   // Save unlocked object header into the displaced header location on the stack.
   std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
@@ -137,7 +137,7 @@
   bind(cas_failed);
   // We did not find an unlocked object so see if this is a recursive case.
   sub(Rscratch, Rscratch, R1_SP);
-  load_const_optimized(R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+  load_const_optimized(R0, (~(os::vm_page_size()-1) | markOop::lock_mask_in_place));
   and_(R0/*==0?*/, Rscratch, R0);
   std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
   bne(CCR0, slow_int);
@@ -215,7 +215,7 @@
   if (UseBiasedLocking && !len->is_valid()) {
     ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
   } else {
-    load_const_optimized(t1, (intx)markOopDesc::prototype());
+    load_const_optimized(t1, (intx)markOop::prototype());
   }
   std(t1, oopDesc::mark_offset_in_bytes(), obj);
   store_klass(obj, klass);
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index e800e7d..728f3c9 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -901,7 +901,7 @@
     }
 
     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
-    ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+    ori(displaced_header, displaced_header, markOop::unlocked_value);
 
     // monitor->lock()->set_displaced_header(displaced_header);
 
@@ -942,7 +942,7 @@
     sub(current_header, current_header, R1_SP);
 
     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
-    load_const_optimized(tmp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
+    load_const_optimized(tmp, ~(os::vm_page_size()-1) | markOop::lock_mask_in_place);
 
     and_(R0/*==0?*/, current_header, tmp);
     // If condition is true we are done and hence we can store 0 in the displaced
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index be0125d..112e7ce 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -2089,7 +2089,7 @@
   // whether the epoch is still valid
   // Note that the runtime guarantees sufficient alignment of JavaThread
   // pointers to allow age to be placed into low bits
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+  assert(markOop::age_shift == markOop::lock_bits + markOop::biased_lock_bits,
          "biased locking makes assumptions about bit layout");
 
   if (PrintBiasedLockingStatistics) {
@@ -2099,13 +2099,13 @@
     stwx(temp_reg, temp2_reg);
   }
 
-  andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+  andi(temp_reg, mark_reg, markOop::biased_lock_mask_in_place);
+  cmpwi(cr_reg, temp_reg, markOop::biased_lock_pattern);
   bne(cr_reg, cas_label);
 
   load_klass(temp_reg, obj_reg);
 
-  load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+  load_const_optimized(temp2_reg, ~((int) markOop::age_mask_in_place));
   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
   orr(temp_reg, R16_thread, temp_reg);
   xorr(temp_reg, mark_reg, temp_reg);
@@ -2136,7 +2136,7 @@
   // If the low three bits in the xor result aren't clear, that means
   // the prototype header is no longer biased and we have to revoke
   // the bias on this object.
-  andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
+  andi(temp2_reg, temp_reg, markOop::biased_lock_mask_in_place);
   cmpwi(cr_reg, temp2_reg, 0);
   bne(cr_reg, try_revoke_bias);
 
@@ -2150,10 +2150,10 @@
   // otherwise the manipulations it performs on the mark word are
   // illegal.
 
-  int shift_amount = 64 - markOopDesc::epoch_shift;
+  int shift_amount = 64 - markOop::epoch_shift;
   // rotate epoch bits to right (little) end and set other bits to 0
   // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
-  rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
+  rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOop::epoch_bits);
   // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
   bne(CCR0, try_rebias);
 
@@ -2163,9 +2163,9 @@
   // fails we will go in to the runtime to revoke the object's bias.
   // Note that we first construct the presumed unbiased header so we
   // don't accidentally blow away another thread's valid bias.
-  andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
-                                markOopDesc::age_mask_in_place |
-                                markOopDesc::epoch_mask_in_place));
+  andi(mark_reg, mark_reg, (markOop::biased_lock_mask_in_place |
+                                markOop::age_mask_in_place |
+                                markOop::epoch_mask_in_place));
   orr(temp_reg, R16_thread, mark_reg);
 
   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -2198,7 +2198,7 @@
   // bias in the current epoch. In other words, we allow transfer of
   // the bias from one thread to another directly in this situation.
   load_klass(temp_reg, obj_reg);
-  andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+  andi(temp2_reg, mark_reg, markOop::age_mask_in_place);
   orr(temp2_reg, R16_thread, temp2_reg);
   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
   orr(temp_reg, temp2_reg, temp_reg);
@@ -2235,7 +2235,7 @@
   // normal locking code.
   load_klass(temp_reg, obj_reg);
   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
-  andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+  andi(temp2_reg, mark_reg, markOop::age_mask_in_place);
   orr(temp_reg, temp_reg, temp2_reg);
 
   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -2275,9 +2275,9 @@
   // the bias bit would be clear.
 
   ld(temp_reg, 0, mark_addr);
-  andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
+  andi(temp_reg, temp_reg, markOop::biased_lock_mask_in_place);
 
-  cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+  cmpwi(cr_reg, temp_reg, markOop::biased_lock_pattern);
   beq(cr_reg, done);
 }
 
@@ -2698,7 +2698,7 @@
     load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
     bind(L_rtm_retry);
   }
-  andi_(R0, mark_word, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+  andi_(R0, mark_word, markOop::monitor_value);  // inflated vs stack-locked|neutral|biased
   bne(CCR0, IsInflated);
 
   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@@ -2717,8 +2717,8 @@
   tbegin_();
   beq(CCR0, L_on_abort);
   ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);         // Reload in transaction, conflicts need to be tracked.
-  andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
-  cmpwi(flag, R0, markOopDesc::unlocked_value);                // bits = 001 unlocked
+  andi(R0, mark_word, markOop::biased_lock_mask_in_place); // look at 3 lock bits
+  cmpwi(flag, R0, markOop::unlocked_value);                // bits = 001 unlocked
   beq(flag, DONE_LABEL);                                       // all done if unlocked
 
   if (UseRTMXendForLockBusy) {
@@ -2755,9 +2755,9 @@
   assert(UseRTMLocking, "why call this otherwise?");
   Label L_rtm_retry, L_decrement_retry, L_on_abort;
   // Clean monitor_value bit to get valid pointer.
-  int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+  int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOop::monitor_value;
 
-  // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
+  // Store non-null, using boxReg instead of (intptr_t)markOop::unused_mark().
   std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
   const Register tmpReg = boxReg;
   const Register owner_addr_Reg = mark_word;
@@ -2802,7 +2802,7 @@
     // Restore owner_addr_Reg
     ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);
 #ifdef ASSERT
-    andi_(R0, mark_word, markOopDesc::monitor_value);
+    andi_(R0, mark_word, markOop::monitor_value);
     asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint.
 #endif
     addi(owner_addr_Reg, mark_word, owner_offset);
@@ -2869,12 +2869,12 @@
   // Handle existing monitor.
   if ((EmitSync & 0x02) == 0) {
     // The object has an existing monitor iff (mark & monitor_value) != 0.
-    andi_(temp, displaced_header, markOopDesc::monitor_value);
+    andi_(temp, displaced_header, markOop::monitor_value);
     bne(CCR0, object_has_monitor);
   }
 
   // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
-  ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+  ori(displaced_header, displaced_header, markOop::unlocked_value);
 
   // Load Compare Value application register.
 
@@ -2905,7 +2905,7 @@
   // Check if the owner is self by comparing the value in the markOop of object
   // (current_header) with the stack pointer.
   sub(current_header, current_header, R1_SP);
-  load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
+  load_const_optimized(temp, ~(os::vm_page_size()-1) | markOop::lock_mask_in_place);
 
   and_(R0/*==0?*/, current_header, temp);
   // If condition is true we are cont and hence we can store 0 as the
@@ -2930,7 +2930,7 @@
 #endif // INCLUDE_RTM_OPT
 
     // Try to CAS m->owner from NULL to current thread.
-    addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
+    addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOop::monitor_value);
     cmpxchgd(/*flag=*/flag,
              /*current_value=*/current_header,
              /*compare_value=*/(intptr_t)0,
@@ -2985,8 +2985,8 @@
     assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
     Label L_regular_unlock;
     ld(current_header, oopDesc::mark_offset_in_bytes(), oop);         // fetch markword
-    andi(R0, current_header, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
-    cmpwi(flag, R0, markOopDesc::unlocked_value);                     // bits = 001 unlocked
+    andi(R0, current_header, markOop::biased_lock_mask_in_place); // look at 3 lock bits
+    cmpwi(flag, R0, markOop::unlocked_value);                     // bits = 001 unlocked
     bne(flag, L_regular_unlock);                                      // else RegularLock
     tend_();                                                          // otherwise end...
     b(cont);                                                          // ... and we're done
@@ -3006,7 +3006,7 @@
     // The object has an existing monitor iff (mark & monitor_value) != 0.
     RTM_OPT_ONLY( if (!(UseRTMForStackLocks && use_rtm)) ) // skip load if already done
     ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
-    andi_(R0, current_header, markOopDesc::monitor_value);
+    andi_(R0, current_header, markOop::monitor_value);
     bne(CCR0, object_has_monitor);
   }
 
@@ -3030,7 +3030,7 @@
     b(cont);
 
     bind(object_has_monitor);
-    addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
+    addi(current_header, current_header, -markOop::monitor_value); // monitor
     ld(temp,             ObjectMonitor::owner_offset_in_bytes(), current_header);
 
     // It's inflated.
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index b1411b0..5e59270 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -3806,7 +3806,7 @@
     if (UseBiasedLocking) {
       __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
     } else {
-      __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
+      __ load_const_optimized(Rscratch, markOop::prototype(), R0);
     }
     __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
 
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
index 3f6d7ee..5c71e7b 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
@@ -96,7 +96,7 @@
   }
 
   // and mark it as unlocked.
-  z_oill(hdr, markOopDesc::unlocked_value);
+  z_oill(hdr, markOop::unlocked_value);
   // Save unlocked object header into the displaced header location on the stack.
   z_stg(hdr, Address(disp_hdr, (intptr_t)0));
   // Test if object header is still the same (i.e. unlocked), and if so, store the
@@ -115,19 +115,19 @@
   // If the object header was not the same, it is now in the hdr register.
   // => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
   //
-  // 1) (hdr & markOopDesc::lock_mask_in_place) == 0
+  // 1) (hdr & markOop::lock_mask_in_place) == 0
   // 2) rsp <= hdr
   // 3) hdr <= rsp + page_size
   //
   // These 3 tests can be done by evaluating the following expression:
   //
-  // (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place)
+  // (hdr - Z_SP) & (~(page_size-1) | markOop::lock_mask_in_place)
   //
   // assuming both the stack pointer and page_size have their least
   // significant 2 bits cleared and page_size is a power of 2
   z_sgr(hdr, Z_SP);
 
-  load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+  load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOop::lock_mask_in_place));
   z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
   // For recursive locking, the result is zero. => Save it in the displaced header
   // location (NULL in the displaced hdr location indicates recursive locking).
@@ -192,7 +192,7 @@
     z_lg(t1, Address(klass, Klass::prototype_header_offset()));
   } else {
     // This assumes that all prototype bits fit in an int32_t.
-    load_const_optimized(t1, (intx)markOopDesc::prototype());
+    load_const_optimized(t1, (intx)markOop::prototype());
   }
   z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
 
diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp
index 5143dc4..e36f397 100644
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp
@@ -989,7 +989,7 @@
   }
 
   // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
-  z_oill(displaced_header, markOopDesc::unlocked_value);
+  z_oill(displaced_header, markOop::unlocked_value);
 
   // monitor->lock()->set_displaced_header(displaced_header);
 
@@ -1021,7 +1021,7 @@
 
   // The prior sequence "LGR, NGR, LTGR" can be done better
   // (Z_R1 is temp and not used after here).
-  load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+  load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markOop::lock_mask_in_place));
   z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
 
   // If condition is true we are done and hence we can store 0 in the displaced
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 58ed963..ad74844 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -3200,15 +3200,15 @@
   // whether the epoch is still valid.
   // Note that the runtime guarantees sufficient alignment of JavaThread
   // pointers to allow age to be placed into low bits.
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+  assert(markOop::age_shift == markOop::lock_bits + markOop::biased_lock_bits,
          "biased locking makes assumptions about bit layout");
   z_lr(temp_reg, mark_reg);
-  z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
-  z_chi(temp_reg, markOopDesc::biased_lock_pattern);
+  z_nilf(temp_reg, markOop::biased_lock_mask_in_place);
+  z_chi(temp_reg, markOop::biased_lock_pattern);
   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
 
   load_prototype_header(temp_reg, obj_reg);
-  load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+  load_const_optimized(temp2_reg, ~((int) markOop::age_mask_in_place));
 
   z_ogr(temp_reg, Z_thread);
   z_xgr(temp_reg, mark_reg);
@@ -3234,7 +3234,7 @@
   // If the low three bits in the xor result aren't clear, that means
   // the prototype header is no longer biased and we have to revoke
   // the bias on this object.
-  z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
+  z_tmll(temp_reg, markOop::biased_lock_mask_in_place);
   z_brnaz(try_revoke_bias);
 
   // Biasing is still enabled for this data type. See whether the
@@ -3246,7 +3246,7 @@
   // that the current epoch is invalid in order to do this because
   // otherwise the manipulations it performs on the mark word are
   // illegal.
-  z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
+  z_tmll(temp_reg, markOop::epoch_mask_in_place);
   z_brnaz(try_rebias);
 
   //----------------------------------------------------------------------------
@@ -3256,8 +3256,8 @@
   // fails we will go in to the runtime to revoke the object's bias.
   // Note that we first construct the presumed unbiased header so we
   // don't accidentally blow away another thread's valid bias.
-  z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
-         markOopDesc::epoch_mask_in_place);
+  z_nilf(mark_reg, markOop::biased_lock_mask_in_place | markOop::age_mask_in_place |
+         markOop::epoch_mask_in_place);
   z_lgr(temp_reg, Z_thread);
   z_llgfr(mark_reg, mark_reg);
   z_ogr(temp_reg, mark_reg);
@@ -3289,7 +3289,7 @@
   // bias in the current epoch. In other words, we allow transfer of
   // the bias from one thread to another directly in this situation.
 
-  z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+  z_nilf(mark_reg, markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place);
   load_prototype_header(temp_reg, obj_reg);
   z_llgfr(mark_reg, mark_reg);
 
@@ -3350,9 +3350,9 @@
   BLOCK_COMMENT("biased_locking_exit {");
 
   z_lg(temp_reg, 0, mark_addr);
-  z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
+  z_nilf(temp_reg, markOop::biased_lock_mask_in_place);
 
-  z_chi(temp_reg, markOopDesc::biased_lock_pattern);
+  z_chi(temp_reg, markOop::biased_lock_pattern);
   z_bre(done);
   BLOCK_COMMENT("} biased_locking_exit");
 }
@@ -3375,14 +3375,14 @@
   // Handle existing monitor.
   if ((EmitSync & 0x01) == 0) {
     // The object has an existing monitor iff (mark & monitor_value) != 0.
-    guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
+    guarantee(Immediate::is_uimm16(markOop::monitor_value), "must be half-word");
     z_lr(temp, displacedHeader);
-    z_nill(temp, markOopDesc::monitor_value);
+    z_nill(temp, markOop::monitor_value);
     z_brne(object_has_monitor);
   }
 
-  // Set mark to markOop | markOopDesc::unlocked_value.
-  z_oill(displacedHeader, markOopDesc::unlocked_value);
+  // Set mark to markOop | markOop::unlocked_value.
+  z_oill(displacedHeader, markOop::unlocked_value);
 
   // Load Compare Value application register.
 
@@ -3401,7 +3401,7 @@
   // We did not see an unlocked object so try the fast recursive case.
 
   z_sgr(currentHeader, Z_SP);
-  load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+  load_const_optimized(temp, (~(os::vm_page_size()-1) | markOop::lock_mask_in_place));
 
   z_ngr(currentHeader, temp);
   //   z_brne(done);
@@ -3412,7 +3412,7 @@
 
   if ((EmitSync & 0x01) == 0) {
     Register zero = temp;
-    Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
+    Register monitor_tagged = displacedHeader; // Tagged with markOop::monitor_value.
     bind(object_has_monitor);
     // The object's monitor m is unlocked iff m->owner == NULL,
     // otherwise m->owner may contain a thread or a stack address.
@@ -3463,8 +3463,8 @@
   if ((EmitSync & 0x02) == 0) {
     // The object has an existing monitor iff (mark & monitor_value) != 0.
     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
-    guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
-    z_nill(currentHeader, markOopDesc::monitor_value);
+    guarantee(Immediate::is_uimm16(markOop::monitor_value), "must be half-word");
+    z_nill(currentHeader, markOop::monitor_value);
     z_brne(object_has_monitor);
   }
 
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index 5f6c7f2..f095cc5 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -3865,7 +3865,7 @@
       __ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
     } else {
       __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
-                     (long)markOopDesc::prototype());
+                     (long)markOop::prototype());
     }
 
     __ store_klass_gap(Rzero, RallocatedObject);  // Zero klass gap for compressed oops.
diff --git a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp
index 27c525a..d9bbafd 100644
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp
@@ -97,7 +97,7 @@
   mov(Rbox, Rscratch);
 
   // and mark it unlocked
-  or3(Rmark, markOopDesc::unlocked_value, Rmark);
+  or3(Rmark, markOop::unlocked_value, Rmark);
 
   // save unlocked object header into the displaced header location on the stack
   st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
@@ -179,7 +179,7 @@
   if (UseBiasedLocking && !len->is_valid()) {
     ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
   } else {
-    set((intx)markOopDesc::prototype(), t1);
+    set((intx)markOop::prototype(), t1);
   }
   st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
   if (UseCompressedClassPointers) {
diff --git a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp
index d6ff62c..eaf8afa 100644
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp
@@ -1212,7 +1212,7 @@
     // (cas clobbers the destination register)
     mov(lock_reg, temp_reg);
     // set mark reg to be (markOop of object | UNLOCK_VALUE)
-    or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
+    or3(mark_reg, markOop::unlocked_value, mark_reg);
     // initialize the box  (Must happen before we update the object mark!)
     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
     // compare and exchange object_addr, markOop | 1, stack address of basicLock
@@ -1234,7 +1234,7 @@
     // (a) %sp -vs- markword proximity check, and,
     // (b) verify mark word LSBs == 0 (Stack-locked).
     //
-    // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
+    // FFFFF003/FFFFFFFFFFFF003 is (markOop::lock_mask_in_place | -os::vm_page_size())
     // Note that the page size used for %sp proximity testing is arbitrary and is
     // unrelated to the actual MMU page size.  We use a 'logical' page size of
     // 4096 bytes.   F..FFF003 is designed to fit conveniently in the SIMM13 immediate
diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
index 497991b..17bacfb 100644
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
@@ -2469,15 +2469,15 @@
   // whether the epoch is still valid
   // Note that the runtime guarantees sufficient alignment of JavaThread
   // pointers to allow age to be placed into low bits
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
-  and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
-  cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
+  assert(markOop::age_shift == markOop::lock_bits + markOop::biased_lock_bits, "biased locking makes assumptions about bit layout");
+  and3(mark_reg, markOop::biased_lock_mask_in_place, temp_reg);
+  cmp_and_brx_short(temp_reg, markOop::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
 
   load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
   or3(G2_thread, temp_reg, temp_reg);
   xor3(mark_reg, temp_reg, temp_reg);
-  andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
+  andcc(temp_reg, ~((int) markOop::age_mask_in_place), temp_reg);
   if (counters != NULL) {
     cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
     // Reload mark_reg as we may need it later
@@ -2500,7 +2500,7 @@
   // If the low three bits in the xor result aren't clear, that means
   // the prototype header is no longer biased and we have to revoke
   // the bias on this object.
-  btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
+  btst(markOop::biased_lock_mask_in_place, temp_reg);
   brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
 
   // Biasing is still enabled for this data type. See whether the
@@ -2512,7 +2512,7 @@
   // that the current epoch is invalid in order to do this because
   // otherwise the manipulations it performs on the mark word are
   // illegal.
-  delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
+  delayed()->btst(markOop::epoch_mask_in_place, temp_reg);
   brx(Assembler::notZero, false, Assembler::pn, try_rebias);
 
   // The epoch of the current bias is still valid but we know nothing
@@ -2522,7 +2522,7 @@
   // Note that we first construct the presumed unbiased header so we
   // don't accidentally blow away another thread's valid bias.
   delayed()->and3(mark_reg,
-                  markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
+                  markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place,
                   mark_reg);
   or3(G2_thread, mark_reg, temp_reg);
   cas_ptr(mark_addr.base(), mark_reg, temp_reg);
@@ -2603,8 +2603,8 @@
   // lock, the object could not be rebiased toward another thread, so
   // the bias bit would be clear.
   ld_ptr(mark_addr, temp_reg);
-  and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
-  cmp(temp_reg, markOopDesc::biased_lock_pattern);
+  and3(temp_reg, markOop::biased_lock_mask_in_place, temp_reg);
+  cmp(temp_reg, markOop::biased_lock_pattern);
   brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
   delayed();
   if (!allow_delay_slot_filling) {
@@ -2620,12 +2620,12 @@
 // box->dhw disposition - post-conditions at DONE_LABEL.
 // -   Successful inflated lock:  box->dhw != 0.
 //     Any non-zero value suffices.
-//     Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark()
+//     Consider G2_thread, rsp, boxReg, or markOop::unused_mark()
 // -   Successful Stack-lock: box->dhw == mark.
 //     box->dhw must contain the displaced mark word value
 // -   Failure -- icc.ZFlag == 0 and box->dhw is undefined.
 //     The slow-path fast_enter() and slow_enter() operators
-//     are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()).
+//     are responsible for setting box->dhw = NonZero (typically markOop::unused_mark()).
 // -   Biased: box->dhw is undefined
 //
 // SPARC refworkload performance - specifically jetstream and scimark - are
@@ -2667,8 +2667,8 @@
      // Save Rbox in Rscratch to be used for the cas operation
      mov(Rbox, Rscratch);
 
-     // set Rmark to markOop | markOopDesc::unlocked_value
-     or3(Rmark, markOopDesc::unlocked_value, Rmark);
+     // set Rmark to markOop | markOop::unlocked_value
+     or3(Rmark, markOop::unlocked_value, Rmark);
 
      // Initialize the box.  (Must happen before we update the object mark!)
      st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
@@ -2717,7 +2717,7 @@
       // Try stack-lock acquisition.
       // Beware: the 1st instruction is in a delay slot
       mov(Rbox,  Rscratch);
-      or3(Rmark, markOopDesc::unlocked_value, Rmark);
+      or3(Rmark, markOop::unlocked_value, Rmark);
       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
       cas_ptr(mark_addr.base(), Rmark, Rscratch);
       cmp(Rmark, Rscratch);
@@ -2779,7 +2779,7 @@
       // This presumes TSO, of course.
 
       mov(0, Rscratch);
-      or3(Rmark, markOopDesc::unlocked_value, Rmark);
+      or3(Rmark, markOop::unlocked_value, Rmark);
       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
       cas_ptr(mark_addr.base(), Rmark, Rscratch);
 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
@@ -2833,7 +2833,7 @@
       // set icc.zf : 1=success 0=failure
       // ST box->displaced_header = NonZero.
       // Any non-zero value suffices:
-      //    markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
+      //    markOop::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
       st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
       // Intentional fall-through into done
    }
diff --git a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp
index aa95c38..fafbdec 100644
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp
@@ -1835,19 +1835,19 @@
     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
     // vm: see markOop.hpp.
     __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
-    __ sethi(markOopDesc::hash_mask, mask);
-    __ btst(markOopDesc::unlocked_value, header);
+    __ sethi(markOop::hash_mask, mask);
+    __ btst(markOop::unlocked_value, header);
     __ br(Assembler::zero, false, Assembler::pn, slowCase);
     if (UseBiasedLocking) {
       // Check if biased and fall through to runtime if so
       __ delayed()->nop();
-      __ btst(markOopDesc::biased_lock_bit_in_place, header);
+      __ btst(markOop::biased_lock_bit_in_place, header);
       __ br(Assembler::notZero, false, Assembler::pn, slowCase);
     }
-    __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
+    __ delayed()->or3(mask, markOop::hash_mask & 0x3ff, mask);
 
     // Check for a valid (non-zero) hash code and get its value.
-    __ srlx(header, markOopDesc::hash_shift, hash);
+    __ srlx(header, markOop::hash_shift, hash);
     __ andcc(hash, mask, hash);
     __ br(Assembler::equal, false, Assembler::pn, slowCase);
     __ delayed()->nop();
diff --git a/src/hotspot/cpu/sparc/templateTable_sparc.cpp b/src/hotspot/cpu/sparc/templateTable_sparc.cpp
index 67792cf..4b90eda 100644
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp
@@ -3518,7 +3518,7 @@
   if (UseBiasedLocking) {
     __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
   } else {
-    __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
+    __ set((intptr_t)markOop::prototype(), G4_scratch);
   }
   __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes());       // mark
   __ store_klass_gap(G0, RallocatedObject);         // klass gap if compressed
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
index 2535344..eea1552 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
@@ -59,7 +59,7 @@
   // Load object header
   movptr(hdr, Address(obj, hdr_offset));
   // and mark it as unlocked
-  orptr(hdr, markOopDesc::unlocked_value);
+  orptr(hdr, markOop::unlocked_value);
   // save unlocked object header into the displaced header location on the stack
   movptr(Address(disp_hdr, 0), hdr);
   // test if object header is still the same (i.e. unlocked), and if so, store the
@@ -154,7 +154,7 @@
     movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
   } else {
     // This assumes that all prototype bits fit in an int32_t
-    movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
+    movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOop::prototype().value());
   }
 #ifdef _LP64
   if (UseCompressedClassPointers) { // Take care not to kill klass
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
index cffa789..769002a 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
@@ -653,7 +653,7 @@
 
   // Decode offending in-memory value.
   // Test if-forwarded
-  __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markOopDesc::marked_value);
+  __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markOop::marked_value);
   __ jcc(Assembler::noParity, L_failure);  // When odd number of bits, then not forwarded
   __ jcc(Assembler::zero, L_failure);      // When it is 00, then also not forwarded
 
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index d3d62ea..c72e950 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -1118,7 +1118,7 @@
   assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
   assert(tmp_reg != noreg, "tmp_reg must be supplied");
   assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+  assert(markOop::age_shift == markOop::lock_bits + markOop::biased_lock_bits, "biased locking makes assumptions about bit layout");
   Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
   NOT_LP64( Address saved_mark_addr(lock_reg, 0); )
 
@@ -1138,8 +1138,8 @@
     movptr(swap_reg, mark_addr);
   }
   movptr(tmp_reg, swap_reg);
-  andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
+  andptr(tmp_reg, markOop::biased_lock_mask_in_place);
+  cmpptr(tmp_reg, markOop::biased_lock_pattern);
   jcc(Assembler::notEqual, cas_label);
   // The bias pattern is present in the object's header. Need to check
   // whether the bias owner and the epoch are both still current.
@@ -1165,7 +1165,7 @@
   xorptr(swap_reg, tmp_reg);
   Register header_reg = swap_reg;
 #endif
-  andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
+  andptr(header_reg, ~((int) markOop::age_mask_in_place));
   if (counters != NULL) {
     cond_inc32(Assembler::zero,
                ExternalAddress((address) counters->biased_lock_entry_count_addr()));
@@ -1184,7 +1184,7 @@
   // If the low three bits in the xor result aren't clear, that means
   // the prototype header is no longer biased and we have to revoke
   // the bias on this object.
-  testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
+  testptr(header_reg, markOop::biased_lock_mask_in_place);
   jcc(Assembler::notZero, try_revoke_bias);
 
   // Biasing is still enabled for this data type. See whether the
@@ -1196,7 +1196,7 @@
   // that the current epoch is invalid in order to do this because
   // otherwise the manipulations it performs on the mark word are
   // illegal.
-  testptr(header_reg, markOopDesc::epoch_mask_in_place);
+  testptr(header_reg, markOop::epoch_mask_in_place);
   jccb(Assembler::notZero, try_rebias);
 
   // The epoch of the current bias is still valid but we know nothing
@@ -1207,7 +1207,7 @@
   // don't accidentally blow away another thread's valid bias.
   NOT_LP64( movptr(swap_reg, saved_mark_addr); )
   andptr(swap_reg,
-         markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+         markOop::biased_lock_mask_in_place | markOop::age_mask_in_place | markOop::epoch_mask_in_place);
 #ifdef _LP64
   movptr(tmp_reg, swap_reg);
   orptr(tmp_reg, r15_thread);
@@ -1307,8 +1307,8 @@
   // lock, the object could not be rebiased toward another thread, so
   // the bias bit would be clear.
   movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
-  andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
+  andptr(temp_reg, markOop::biased_lock_mask_in_place);
+  cmpptr(temp_reg, markOop::biased_lock_pattern);
   jcc(Assembler::equal, done);
 }
 
@@ -1499,7 +1499,7 @@
     bind(L_rtm_retry);
   }
   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
-  testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+  testptr(tmpReg, markOop::monitor_value);  // inflated vs stack-locked|neutral|biased
   jcc(Assembler::notZero, IsInflated);
 
   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
@@ -1514,8 +1514,8 @@
   }
   xbegin(L_on_abort);
   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));       // fetch markword
-  andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
-  cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
+  andptr(tmpReg, markOop::biased_lock_mask_in_place); // look at 3 lock bits
+  cmpptr(tmpReg, markOop::unlocked_value);            // bits = 001 unlocked
   jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
 
   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
@@ -1541,7 +1541,7 @@
 // Use RTM for inflating locks
 // inputs: objReg (object to lock)
 //         boxReg (on-stack box address (displaced header location) - KILLED)
-//         tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
+//         tmpReg (ObjectMonitor address + markOop::monitor_value)
 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
                                           Register scrReg, Register retry_on_busy_count_Reg,
                                           Register retry_on_abort_count_Reg,
@@ -1555,7 +1555,7 @@
   int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
 
   // Without cast to int32_t a movptr will destroy r10 which is typically obj
-  movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+  movptr(Address(boxReg, 0), (int32_t)intptr_t(markOop::unused_mark().value()));
   movptr(boxReg, tmpReg); // Save ObjectMonitor address
 
   if (RTMRetryCount > 0) {
@@ -1724,9 +1724,9 @@
     atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
   }
   if (EmitSync & 1) {
-      // set box->dhw = markOopDesc::unused_mark()
+      // set box->dhw = markOop::unused_mark()
       // Force all sync thru slow-path: slow_enter() and slow_exit()
-      movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+      movptr (Address(boxReg, 0), (int32_t)intptr_t(markOop::unused_mark().value()));
       cmpptr (rsp, (int32_t)NULL_WORD);
   } else {
     // Possible cases that we'll encounter in fast_lock
@@ -1768,11 +1768,11 @@
 #endif // INCLUDE_RTM_OPT
 
     movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));          // [FETCH]
-    testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+    testptr(tmpReg, markOop::monitor_value); // inflated vs stack-locked|neutral|biased
     jccb(Assembler::notZero, IsInflated);
 
     // Attempt stack-locking ...
-    orptr (tmpReg, markOopDesc::unlocked_value);
+    orptr (tmpReg, markOop::unlocked_value);
     movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
     if (os::is_MP()) {
       lock();
@@ -1798,7 +1798,7 @@
     jmp(DONE_LABEL);
 
     bind(IsInflated);
-    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
+    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOop::monitor_value
 
 #if INCLUDE_RTM_OPT
     // Use the same RTM locking code in 32- and 64-bit VM.
@@ -1813,7 +1813,7 @@
 
     // boxReg refers to the on-stack BasicLock in the current frame.
     // We'd like to write:
-    //   set box->_displaced_header = markOopDesc::unused_mark().  Any non-0 value suffices.
+    //   set box->_displaced_header = markOop::unused_mark().  Any non-0 value suffices.
     // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
     // additional latency as we have another ST in the store buffer that must drain.
 
@@ -1878,7 +1878,7 @@
        //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
        // Intentional fall-through into DONE_LABEL ...
     } else {
-       movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark()));  // results in ST-before-CAS penalty
+       movptr(Address(boxReg, 0), intptr_t(markOop::unused_mark()));  // results in ST-before-CAS penalty
        movptr(boxReg, tmpReg);
 
        // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
@@ -1923,9 +1923,9 @@
       lock();
     }
     cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
-    // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
+    // Unconditionally set box->_displaced_header = markOop::unused_mark().
     // Without cast to int32_t movptr will destroy r10 which is typically obj.
-    movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+    movptr(Address(boxReg, 0), (int32_t)intptr_t(markOop::unused_mark().value()));
     // Intentional fall-through into DONE_LABEL ...
     // Propagate ICC.ZF from CAS above into DONE_LABEL.
 #endif // _LP64
@@ -1999,8 +1999,8 @@
       assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
       Label L_regular_unlock;
       movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));           // fetch markword
-      andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
-      cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
+      andptr(tmpReg, markOop::biased_lock_mask_in_place); // look at 3 lock bits
+      cmpptr(tmpReg, markOop::unlocked_value);            // bits = 001 unlocked
       jccb(Assembler::notEqual, L_regular_unlock);  // if !HLE RegularLock
       xend();                                       // otherwise end...
       jmp(DONE_LABEL);                              // ... and we're done
@@ -2011,7 +2011,7 @@
     cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
     jcc   (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
     movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));             // Examine the object's markword
-    testptr(tmpReg, markOopDesc::monitor_value);    // Inflated?
+    testptr(tmpReg, markOop::monitor_value);    // Inflated?
     jccb  (Assembler::zero, Stacked);
 
     // It's inflated.
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp
index a68cbe8..973d6c4 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp
@@ -59,12 +59,12 @@
   __ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 
   // check if locked
-  __ testptr(result, markOopDesc::unlocked_value);
+  __ testptr(result, markOop::unlocked_value);
   __ jcc(Assembler::zero, slowCase);
 
   if (UseBiasedLocking) {
     // Check if biased and fall through to runtime if so
-    __ testptr(result, markOopDesc::biased_lock_bit_in_place);
+    __ testptr(result, markOop::biased_lock_bit_in_place);
     __ jcc(Assembler::notZero, slowCase);
   }
 
@@ -73,16 +73,16 @@
   // Read the header and build a mask to get its hash field.
   // Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place
   // because it could be larger than 32 bits in a 64-bit vm. See markOop.hpp.
-  __ shrptr(result, markOopDesc::hash_shift);
-  __ andptr(result, markOopDesc::hash_mask);
+  __ shrptr(result, markOop::hash_shift);
+  __ andptr(result, markOop::hash_mask);
 #else
-  __ andptr(result, markOopDesc::hash_mask_in_place);
+  __ andptr(result, markOop::hash_mask_in_place);
 #endif //_LP64
 
   // test if hashCode exists
   __ jcc(Assembler::zero, slowCase);
 #ifndef _LP64
-  __ shrptr(result, markOopDesc::hash_shift);
+  __ shrptr(result, markOop::hash_shift);
 #endif
   __ ret(0);
   __ bind(slowCase);
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 311a54f..b726023 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -4103,7 +4103,7 @@
       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
     } else {
       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
-                (intptr_t)markOopDesc::prototype()); // header
+                (intptr_t)markOop::prototype().value()); // header
       __ pop(rcx);   // get saved klass back in the register.
     }
 #ifdef _LP64
diff --git a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp
index 9331d02..c944ac6 100644
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp
@@ -274,7 +274,7 @@
   if (method->is_synchronized()) {
     monitor = (BasicObjectLock*) istate->stack_base();
     oop lockee = monitor->obj();
-    markOop disp = lockee->mark()->set_unlocked();
+    markOop disp = lockee->mark().set_unlocked();
 
     monitor->lock()->set_displaced_header(disp);
     if (lockee->cas_set_mark((markOop)monitor, disp) != disp) {
@@ -421,7 +421,7 @@
     monitor->set_obj(NULL);
 
     if (header != NULL) {
-      markOop old_header = markOopDesc::encode(lock);
+      markOop old_header = markOop::encode(lock);
       if (rcvr->cas_set_mark(header, old_header) != old_header) {
         monitor->set_obj(rcvr); {
           HandleMark hm(thread);
diff --git a/src/hotspot/share/classfile/altHashing.cpp b/src/hotspot/share/classfile/altHashing.cpp
index 25d2728..4bc17bc 100644
--- a/src/hotspot/share/classfile/altHashing.cpp
+++ b/src/hotspot/share/classfile/altHashing.cpp
@@ -57,8 +57,8 @@
 // objects.  We don't want to call the synchronizer hash code to install
 // this value because it may safepoint.
 static intptr_t object_hash(Klass* k) {
-  intptr_t hc = k->java_mirror()->mark()->hash();
-  return hc != markOopDesc::no_hash ? hc : os::random();
+  intptr_t hc = k->java_mirror()->mark().hash();
+  return hc != markOop::no_hash ? hc : os::random();
 }
 
 // Seed value used for each alternative hash calculated.
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 0360de2..629c9f6 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -2181,7 +2181,7 @@
       // NOTE that we must only do this when the class is initally
       // defined, not each time it is referenced from a new class loader
       if (k->class_loader() == class_loader()) {
-        k->set_prototype_header(markOopDesc::biased_locking_prototype());
+        k->set_prototype_header(markOop::biased_locking_prototype());
       }
     }
 
diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
index c98691e..08866e1 100644
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
@@ -7755,10 +7755,10 @@
   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
   size_t i = num;
   oop  cur = _overflow_list;
-  const markOop proto = markOopDesc::prototype();
+  const markOop proto = markOop::prototype();
   NOT_PRODUCT(ssize_t n = 0;)
   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
-    next = oop(cur->mark_raw());
+    next = oop(cur->mark_raw().to_pointer());
     cur->set_mark_raw(proto);   // until proven otherwise
     assert(oopDesc::is_oop(cur), "Should be an oop");
     bool res = stack->push(cur);
@@ -7842,8 +7842,8 @@
   size_t i = num;
   oop cur = prefix;
   // Walk down the first "num" objects, unless we reach the end.
-  for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
-  if (cur->mark_raw() == NULL) {
+  for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
+  if (cur->mark_raw().to_pointer() == NULL) {
     // We have "num" or fewer elements in the list, so there
     // is nothing to return to the global list.
     // Write back the NULL in lieu of the BUSY we wrote
@@ -7853,9 +7853,9 @@
     }
   } else {
     // Chop off the suffix and return it to the global list.
-    assert(cur->mark_raw() != BUSY, "Error");
-    oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
-    cur->set_mark_raw(NULL);           // break off suffix
+    assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
+    oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
+    cur->set_mark_raw(markOop::from_pointer(NULL));           // break off suffix
     // It's possible that the list is still in the empty(busy) state
     // we left it in a short while ago; in that case we may be
     // able to place back the suffix without incurring the cost
@@ -7875,18 +7875,18 @@
       // Too bad, someone else sneaked in (at least) an element; we'll need
       // to do a splice. Find tail of suffix so we can prepend suffix to global
       // list.
-      for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
+      for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
       oop suffix_tail = cur;
-      assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
+      assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
              "Tautology");
       observed_overflow_list = _overflow_list;
       do {
         cur_overflow_list = observed_overflow_list;
         if (cur_overflow_list != BUSY) {
           // Do the splice ...
-          suffix_tail->set_mark_raw(markOop(cur_overflow_list));
+          suffix_tail->set_mark_raw(markOop::from_pointer((void*)cur_overflow_list));
         } else { // cur_overflow_list == BUSY
-          suffix_tail->set_mark_raw(NULL);
+          suffix_tail->set_mark_raw(markOop::from_pointer(NULL));
         }
         // ... and try to place spliced list back on overflow_list ...
         observed_overflow_list =
@@ -7898,11 +7898,11 @@
 
   // Push the prefix elements on work_q
   assert(prefix != NULL, "control point invariant");
-  const markOop proto = markOopDesc::prototype();
+  const markOop proto = markOop::prototype();
   oop next;
   NOT_PRODUCT(ssize_t n = 0;)
   for (cur = prefix; cur != NULL; cur = next) {
-    next = oop(cur->mark_raw());
+    next = oop(cur->mark_raw().to_pointer());
     cur->set_mark_raw(proto);   // until proven otherwise
     assert(oopDesc::is_oop(cur), "Should be an oop");
     bool res = work_q->push(cur);
@@ -7921,7 +7921,7 @@
   NOT_PRODUCT(_num_par_pushes++;)
   assert(oopDesc::is_oop(p), "Not an oop");
   preserve_mark_if_necessary(p);
-  p->set_mark_raw((markOop)_overflow_list);
+  p->set_mark_raw(markOop::from_pointer(_overflow_list));
   _overflow_list = p;
 }
 
@@ -7935,9 +7935,9 @@
   do {
     cur_overflow_list = observed_overflow_list;
     if (cur_overflow_list != BUSY) {
-      p->set_mark_raw(markOop(cur_overflow_list));
+      p->set_mark_raw(markOop::from_pointer((void*)cur_overflow_list));
     } else {
-      p->set_mark_raw(NULL);
+      p->set_mark_raw(markOop::from_pointer(NULL));
     }
     observed_overflow_list =
       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
@@ -7970,14 +7970,14 @@
 // Single threaded
 void CMSCollector::preserve_mark_if_necessary(oop p) {
   markOop m = p->mark_raw();
-  if (m->must_be_preserved(p)) {
+  if (m.must_be_preserved(p)) {
     preserve_mark_work(p, m);
   }
 }
 
 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
   markOop m = p->mark_raw();
-  if (m->must_be_preserved(p)) {
+  if (m.must_be_preserved(p)) {
     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
     // Even though we read the mark word without holding
     // the lock, we are assured that it will not change
@@ -8017,7 +8017,7 @@
     oop p = _preserved_oop_stack.pop();
     assert(oopDesc::is_oop(p), "Should be an oop");
     assert(_span.contains(p), "oop should be in _span");
-    assert(p->mark_raw() == markOopDesc::prototype(),
+    assert(p->mark_raw() == markOop::prototype(),
            "Set when taken from overflow list");
     markOop m = _preserved_mark_stack.pop();
     p->set_mark_raw(m);
diff --git a/src/hotspot/share/gc/cms/freeChunk.hpp b/src/hotspot/share/gc/cms/freeChunk.hpp
index a48a6dd..2e0b7cc 100644
--- a/src/hotspot/share/gc/cms/freeChunk.hpp
+++ b/src/hotspot/share/gc/cms/freeChunk.hpp
@@ -62,8 +62,8 @@
   FreeChunk* _prev;
   FreeChunk* _next;
 
-  markOop mark()     const volatile { return (markOop)_size; }
-  void set_mark(markOop m)          { _size = (size_t)m; }
+  markOop mark()     const volatile { return markOop((uintptr_t)_size); }
+  void set_mark(markOop m)          { _size = (size_t)m.value(); }
 
  public:
   NOT_PRODUCT(static const size_t header_size();)
@@ -79,7 +79,7 @@
   }
 
   bool is_free() const volatile {
-    LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
+    LP64_ONLY(if (UseCompressedOops) return mark().is_cms_free_chunk(); else)
     return (((intptr_t)_prev) & 0x1) == 0x1;
   }
   bool cantCoalesce() const {
@@ -100,11 +100,11 @@
   debug_only(void* size_addr() const { return (void*)&_size; })
 
   size_t size() const volatile {
-    LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
+    LP64_ONLY(if (UseCompressedOops) return mark().get_size(); else )
     return _size;
   }
   void set_size(size_t sz) {
-    LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
+    LP64_ONLY(if (UseCompressedOops) set_mark(markOop::set_size_and_free(sz)); else )
     _size = sz;
   }
 
@@ -126,7 +126,7 @@
 #ifdef _LP64
     if (UseCompressedOops) {
       OrderAccess::storestore();
-      set_mark(markOopDesc::prototype());
+      set_mark(markOop::prototype());
     }
 #endif
     assert(!is_free(), "Error");
diff --git a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp
index 609a148..e736591 100644
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp
@@ -47,7 +47,7 @@
     OrderAccess::loadload();
     markOop m = obj->mark_raw();
     oop new_obj;
-    if (m->is_marked()) { // Contains forwarding pointer.
+    if (m.is_marked()) { // Contains forwarding pointer.
       new_obj = ParNewGeneration::real_forwardee(obj);
     } else {
       size_t obj_sz = obj->size_given_klass(objK);
@@ -112,7 +112,7 @@
       OrderAccess::loadload();
       markOop m = obj->mark_raw();
       oop new_obj;
-      if (m->is_marked()) { // Contains forwarding pointer.
+      if (m.is_marked()) { // Contains forwarding pointer.
         new_obj = ParNewGeneration::real_forwardee(obj);
         RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
         log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
diff --git a/src/hotspot/share/gc/cms/promotionInfo.cpp b/src/hotspot/share/gc/cms/promotionInfo.cpp
index 971368c..776e273 100644
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp
+++ b/src/hotspot/share/gc/cms/promotionInfo.cpp
@@ -95,13 +95,13 @@
   // make a copy of header as it may need to be spooled
   markOop mark = oop(trackOop)->mark_raw();
   trackOop->clear_next();
-  if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
+  if (mark.must_be_preserved_for_cms_scavenge(klassOfOop)) {
     // save non-prototypical header, and mark oop
     saveDisplacedHeader(mark);
     trackOop->setDisplacedMark();
   } else {
     // we'd like to assert something like the following:
-    // assert(mark == markOopDesc::prototype(), "consistency check");
+    // assert(mark == markOop::prototype(), "consistency check");
     // ... but the above won't work because the age bits have not (yet) been
     // cleared. The remainder of the check would be identical to the
     // condition checked in must_be_preserved() above, so we don't really
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
index 76e9e77..bfc3581 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
@@ -116,11 +116,11 @@
     } else {
       // Make sure object has the correct mark-word set or that it will be
       // fixed when restoring the preserved marks.
-      assert(object->mark_raw() == markOopDesc::prototype_for_object(object) || // Correct mark
-             object->mark_raw()->must_be_preserved(object) || // Will be restored by PreservedMarksSet
+      assert(object->mark_raw() == markOop::prototype_for_object(object) || // Correct mark
+             object->mark_raw().must_be_preserved(object) || // Will be restored by PreservedMarksSet
              (UseBiasedLocking && object->has_bias_pattern_raw()), // Will be restored by BiasedLocking
              "should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
-             p2i(object), p2i(object->mark_raw()), p2i(markOopDesc::prototype_for_object(object)));
+             p2i(object), p2i(object->mark_raw()), p2i(markOop::prototype_for_object(object)));
     }
     assert(object->forwardee() == NULL, "should be forwarded to NULL");
   }
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
index 98a2fe7..e0976fd 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
@@ -51,7 +51,7 @@
 
   // Marked by us, preserve if needed.
   markOop mark = obj->mark_raw();
-  if (mark->must_be_preserved(obj) &&
+  if (mark.must_be_preserved(obj) &&
       !G1ArchiveAllocator::is_open_archive_object(obj)) {
     preserved_stack()->push(obj, mark);
   }
diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
index f19a578..88e2a4e 100644
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
@@ -76,11 +76,11 @@
   oop forwardee = obj->forwardee();
   if (forwardee == NULL) {
     // Not forwarded, return current reference.
-    assert(obj->mark_raw() == markOopDesc::prototype_for_object(obj) || // Correct mark
-           obj->mark_raw()->must_be_preserved(obj) || // Will be restored by PreservedMarksSet
+    assert(obj->mark_raw() == markOop::prototype_for_object(obj) || // Correct mark
+           obj->mark_raw().must_be_preserved(obj) || // Will be restored by PreservedMarksSet
            (UseBiasedLocking && obj->has_bias_pattern_raw()), // Will be restored by BiasedLocking
            "Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
-           p2i(obj), p2i(obj->mark_raw()), p2i(markOopDesc::prototype_for_object(obj)));
+           p2i(obj), p2i(obj->mark_raw()), p2i(markOop::prototype_for_object(obj)));
     return;
   }
 
diff --git a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
index c254913..81f9e1d 100644
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
@@ -250,8 +250,8 @@
   if (state.is_in_cset()) {
     oop forwardee;
     markOop m = obj->mark_raw();
-    if (m->is_marked()) {
-      forwardee = (oop) m->decode_pointer();
+    if (m.is_marked()) {
+      forwardee = (oop) m.decode_pointer();
     } else {
       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
     }
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index 72a6071..d081551 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -188,8 +188,8 @@
 
 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
   if (state.is_young()) {
-    age = !m->has_displaced_mark_helper() ? m->age()
-                                          : m->displaced_mark_helper()->age();
+    age = !m.has_displaced_mark_helper() ? m.age()
+                                          : m.displaced_mark_helper().age();
     if (age < _tenuring_threshold) {
       return state;
     }
@@ -271,18 +271,18 @@
     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
 
     if (dest_state.is_young()) {
-      if (age < markOopDesc::max_age) {
+      if (age < markOop::max_age) {
         age++;
       }
-      if (old_mark->has_displaced_mark_helper()) {
+      if (old_mark.has_displaced_mark_helper()) {
         // In this case, we have to install the mark word first,
         // otherwise obj looks to be forwarded (the old mark word,
         // which contains the forward pointer, was copied)
         obj->set_mark_raw(old_mark);
-        markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
-        old_mark->set_displaced_mark_helper(new_mark);
+        markOop new_mark = old_mark.displaced_mark_helper().set_age(age);
+        old_mark.set_displaced_mark_helper(new_mark);
       } else {
-        obj->set_mark_raw(old_mark->set_age(age));
+        obj->set_mark_raw(old_mark.set_age(age));
       }
       _age_table.add(age, word_sz);
     } else {
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp
index f1fba8e..b8e2100 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp
@@ -42,8 +42,8 @@
   const InCSetState in_cset_state = _g1h->in_cset_state(obj);
   if (in_cset_state.is_in_cset()) {
     markOop m = obj->mark_raw();
-    if (m->is_marked()) {
-      obj = (oop) m->decode_pointer();
+    if (m.is_marked()) {
+      obj = (oop) m.decode_pointer();
     } else {
       obj = copy_to_survivor_space(in_cset_state, obj, m);
     }
diff --git a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp b/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp
index eed0fa5..43c3368 100644
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp
+++ b/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp
@@ -113,8 +113,8 @@
   const intx interval = PrefetchScanIntervalInBytes;
 
   while (q < t) {
-    assert(oop(q)->mark_raw()->is_marked() || oop(q)->mark_raw()->is_unlocked() ||
-           oop(q)->mark_raw()->has_bias_pattern(),
+    assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
+           oop(q)->mark_raw().has_bias_pattern(),
            "these are the only valid states during a mark sweep");
     if (oop(q)->is_gc_marked()) {
       /* prefetch beyond q */
@@ -259,7 +259,7 @@
   if (allowed_deadspace_words >= deadlength) {
     allowed_deadspace_words -= deadlength;
     CollectedHeap::fill_with_object(q, deadlength);
-    oop(q)->set_mark_raw(oop(q)->mark_raw()->set_marked());
+    oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
     assert((int) deadlength == oop(q)->size(), "bad filler object size");
     // Recall that we required "q == compaction_top".
     return true;
@@ -350,7 +350,7 @@
       q = t;
     } else {
       // $$$ Funky
-      q = (HeapWord*) oop(_first_dead)->mark_raw()->decode_pointer();
+      q = (HeapWord*) oop(_first_dead)->mark_raw().decode_pointer();
     }
   }
 
@@ -361,7 +361,7 @@
     if (!oop(q)->is_gc_marked()) {
       // mark is pointer to next marked oop
       debug_only(prev_q = q);
-      q = (HeapWord*) oop(q)->mark_raw()->decode_pointer();
+      q = (HeapWord*) oop(q)->mark_raw().decode_pointer();
       assert(q > prev_q, "we should be moving forward through memory");
     } else {
       // prefetch beyond q
diff --git a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp
index a36350d..e39588f 100644
--- a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp
+++ b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp
@@ -82,7 +82,7 @@
   // so they can always fill with an array.
   HeapWord* tlab_end = end() + filler_header_size;
   typeArrayOop filler_oop = (typeArrayOop) top();
-  filler_oop->set_mark_raw(markOopDesc::prototype());
+  filler_oop->set_mark_raw(markOop::prototype());
   filler_oop->set_klass(Universe::intArrayKlassObj());
   const size_t array_length =
     pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
index 1ef9007..15c95e3 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
@@ -119,13 +119,13 @@
   markOop test_mark = o->mark_raw();
 
   // The same test as "o->is_forwarded()"
-  if (!test_mark->is_marked()) {
+  if (!test_mark.is_marked()) {
     bool new_obj_is_tenured = false;
     size_t new_obj_size = o->size();
 
     // Find the objects age, MT safe.
-    uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
-      test_mark->displaced_mark_helper()->age() : test_mark->age();
+    uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+      test_mark.displaced_mark_helper().age() : test_mark.age();
 
     if (!promote_immediately) {
       // Try allocating obj in to-space (unless too old)
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index 1353c95..70f84d0 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -276,7 +276,7 @@
   heap->print_heap_before_gc();
   heap->trace_heap_before_gc(&_gc_tracer);
 
-  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
+  assert(!NeverTenure || _tenuring_threshold == markOop::max_age + 1, "Sanity");
   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 
   // Fill in TLABs
@@ -728,8 +728,8 @@
   // Arguments must have been parsed
 
   if (AlwaysTenure || NeverTenure) {
-    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
-           "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
+    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOop::max_age + 1,
+           "MaxTenuringThreshold should be 0 or markOop::max_age + 1, but is %d", (int) MaxTenuringThreshold);
     _tenuring_threshold = MaxTenuringThreshold;
   } else {
     // We want to smooth out our startup times for the AdaptiveSizePolicy
diff --git a/src/hotspot/share/gc/serial/markSweep.cpp b/src/hotspot/share/gc/serial/markSweep.cpp
index fcc4f40..c799afb 100644
--- a/src/hotspot/share/gc/serial/markSweep.cpp
+++ b/src/hotspot/share/gc/serial/markSweep.cpp
@@ -131,7 +131,7 @@
   T heap_oop = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(heap_oop)) {
     oop obj = CompressedOops::decode_not_null(heap_oop);
-    if (!obj->mark_raw()->is_marked()) {
+    if (!obj->mark_raw().is_marked()) {
       mark_object(obj);
       follow_object(obj);
     }
diff --git a/src/hotspot/share/gc/serial/markSweep.inline.hpp b/src/hotspot/share/gc/serial/markSweep.inline.hpp
index 25a5a31..1de9ce1 100644
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp
@@ -39,9 +39,9 @@
   // some marks may contain information we need to preserve so we store them away
   // and overwrite the mark.  We'll restore it at the end of markSweep.
   markOop mark = obj->mark_raw();
-  obj->set_mark_raw(markOopDesc::prototype()->set_marked());
+  obj->set_mark_raw(markOop::prototype().set_marked());
 
-  if (mark->must_be_preserved(obj)) {
+  if (mark.must_be_preserved(obj)) {
     preserve_mark(obj, mark);
   }
 }
@@ -50,7 +50,7 @@
   T heap_oop = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(heap_oop)) {
     oop obj = CompressedOops::decode_not_null(heap_oop);
-    if (!obj->mark_raw()->is_marked()) {
+    if (!obj->mark_raw().is_marked()) {
       mark_object(obj);
       _marking_stack.push(obj);
     }
@@ -79,11 +79,11 @@
     oop obj = CompressedOops::decode_not_null(heap_oop);
     assert(Universe::heap()->is_in(obj), "should be in heap");
 
-    oop new_obj = oop(obj->mark_raw()->decode_pointer());
+    oop new_obj = oop(obj->mark_raw().decode_pointer());
 
     assert(new_obj != NULL ||                         // is forwarding ptr?
-           obj->mark_raw() == markOopDesc::prototype() || // not gc marked?
-           (UseBiasedLocking && obj->mark_raw()->has_bias_pattern()),
+           obj->mark_raw() == markOop::prototype() || // not gc marked?
+           (UseBiasedLocking && obj->mark_raw().has_bias_pattern()),
            // not gc marked?
            "should be forwarded");
 
diff --git a/src/hotspot/share/gc/shared/ageTable.cpp b/src/hotspot/share/gc/shared/ageTable.cpp
index 14cac1b..f27e7dc 100644
--- a/src/hotspot/share/gc/shared/ageTable.cpp
+++ b/src/hotspot/share/gc/shared/ageTable.cpp
@@ -79,8 +79,8 @@
   uint result;
 
   if (AlwaysTenure || NeverTenure) {
-    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
-           "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
+    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOop::max_age + 1,
+           "MaxTenuringThreshold should be 0 or markOop::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
     result = MaxTenuringThreshold;
   } else {
     size_t total = 0;
diff --git a/src/hotspot/share/gc/shared/ageTable.hpp b/src/hotspot/share/gc/shared/ageTable.hpp
index 4d65fca..65deab3 100644
--- a/src/hotspot/share/gc/shared/ageTable.hpp
+++ b/src/hotspot/share/gc/shared/ageTable.hpp
@@ -41,7 +41,7 @@
 
  public:
   // constants
-  enum { table_size = markOopDesc::max_age + 1 };
+  enum { table_size = markOop::max_age + 1 };
 
   // instance variables
   size_t sizes[table_size];
diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp
index a77964e..1ebe571 100644
--- a/src/hotspot/share/gc/shared/gc_globals.hpp
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp
@@ -816,12 +816,12 @@
                                                                             \
   product(uintx, MaxTenuringThreshold,    15,                               \
           "Maximum value for tenuring threshold")                           \
-          range(0, markOopDesc::max_age + 1)                                \
+          range(0, markOop::max_age + 1)                                \
           constraint(MaxTenuringThresholdConstraintFunc,AfterErgo)          \
                                                                             \
   product(uintx, InitialTenuringThreshold,    7,                            \
           "Initial value for tenuring threshold")                           \
-          range(0, markOopDesc::max_age + 1)                                \
+          range(0, markOop::max_age + 1)                                \
           constraint(InitialTenuringThresholdConstraintFunc,AfterErgo)      \
                                                                             \
   product(uintx, TargetSurvivorRatio,    50,                                \
diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp
index e84af11..5b8d78c 100644
--- a/src/hotspot/share/gc/shared/memAllocator.cpp
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp
@@ -400,7 +400,7 @@
     oopDesc::set_mark_raw(mem, _klass->prototype_header());
   } else {
     // May be bootstrapping
-    oopDesc::set_mark_raw(mem, markOopDesc::prototype());
+    oopDesc::set_mark_raw(mem, markOop::prototype());
   }
   // Need a release store to ensure array/class length, mark word, and
   // object zeroing are visible before setting the klass non-NULL, for
diff --git a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp
index e246a8a..42691d6 100644
--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp
+++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp
@@ -31,7 +31,7 @@
 #include "utilities/stack.inline.hpp"
 
 inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const {
-  return m->must_be_preserved_for_promotion_failure(obj);
+  return m.must_be_preserved_for_promotion_failure(obj);
 }
 
 inline void PreservedMarks::push(oop obj, markOop m) {
diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp
index 5b6b2b1..a46bac0 100644
--- a/src/hotspot/share/gc/shared/space.cpp
+++ b/src/hotspot/share/gc/shared/space.cpp
@@ -651,14 +651,14 @@
     // allocate uninitialized int array
     typeArrayOop t = (typeArrayOop) allocate(size);
     assert(t != NULL, "allocation should succeed");
-    t->set_mark_raw(markOopDesc::prototype());
+    t->set_mark_raw(markOop::prototype());
     t->set_klass(Universe::intArrayKlassObj());
     t->set_length((int)length);
   } else {
     assert(size == CollectedHeap::min_fill_size(),
            "size for smallest fake object doesn't match");
     instanceOop obj = (instanceOop) allocate(size);
-    obj->set_mark_raw(markOopDesc::prototype());
+    obj->set_mark_raw(markOop::prototype());
     obj->set_klass_gap(0);
     obj->set_klass(SystemDictionary::Object_klass());
   }
diff --git a/src/hotspot/share/gc/shared/space.inline.hpp b/src/hotspot/share/gc/shared/space.inline.hpp
index ad22b6c..3732606 100644
--- a/src/hotspot/share/gc/shared/space.inline.hpp
+++ b/src/hotspot/share/gc/shared/space.inline.hpp
@@ -118,7 +118,7 @@
       _allowed_deadspace_words -= dead_length;
       CollectedHeap::fill_with_object(dead_start, dead_length);
       oop obj = oop(dead_start);
-      obj->set_mark_raw(obj->mark_raw()->set_marked());
+      obj->set_mark_raw(obj->mark_raw().set_marked());
 
       assert(dead_length == (size_t)obj->size(), "bad filler object size");
       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
@@ -165,8 +165,8 @@
 
   while (cur_obj < scan_limit) {
     assert(!space->scanned_block_is_obj(cur_obj) ||
-           oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() ||
-           oop(cur_obj)->mark_raw()->has_bias_pattern(),
+           oop(cur_obj)->mark_raw().is_marked() || oop(cur_obj)->mark_raw().is_unlocked() ||
+           oop(cur_obj)->mark_raw().has_bias_pattern(),
            "these are the only valid states during a mark sweep");
     if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) {
       // prefetch beyond cur_obj
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
index 1f8b79d..d9ecdc8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
@@ -60,7 +60,7 @@
   r->print_on(&ss);
 
   stringStream mw_ss;
-  obj->mark()->print_on(&mw_ss);
+  obj->mark().print_on(&mw_ss);
 
   ShenandoahMarkingContext* const ctx = heap->marking_context();
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp
index 50d9c50..de79a38 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp
@@ -40,8 +40,8 @@
   // fwdptr. That object is still not forwarded, and we need to return
   // the object itself.
   markOop mark = obj->mark_raw();
-  if (mark->is_marked()) {
-    HeapWord* fwdptr = (HeapWord*) mark->clear_lock_bits();
+  if (mark.is_marked()) {
+    HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits();
     if (fwdptr != NULL) {
       return fwdptr;
     }
@@ -55,8 +55,8 @@
   assert(Thread::current()->is_Java_thread(), "Must be a mutator thread");
 
   markOop mark = obj->mark_raw();
-  if (mark->is_marked()) {
-    HeapWord* fwdptr = (HeapWord*)mark->clear_lock_bits();
+  if (mark.is_marked()) {
+    HeapWord* fwdptr = (HeapWord*)mark.clear_lock_bits();
     assert(fwdptr != NULL, "Forwarding pointer is never null here");
     return oop(fwdptr);
   } else {
@@ -70,21 +70,21 @@
 }
 
 inline bool ShenandoahForwarding::is_forwarded(oop obj) {
-  return obj->mark_raw()->is_marked();
+  return obj->mark_raw().is_marked();
 }
 
 inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
   markOop old_mark = obj->mark_raw();
-  if (old_mark->is_marked()) {
-    return (oop) old_mark->clear_lock_bits();
+  if (old_mark.is_marked()) {
+    return (oop) old_mark.clear_lock_bits();
   }
 
-  markOop new_mark = markOopDesc::encode_pointer_as_mark(update);
+  markOop new_mark = markOop::encode_pointer_as_mark(update);
   markOop prev_mark = obj->cas_set_mark_raw(new_mark, old_mark);
   if (prev_mark == old_mark) {
     return update;
   } else {
-    return (oop) prev_mark->clear_lock_bits();
+    return (oop) prev_mark.clear_lock_bits();
   }
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
index edcb772..3a72358 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStringDedup.cpp
@@ -50,14 +50,14 @@
     const markOop mark = java_string->mark();
 
     // Having/had displaced header, too risk to deal with them, skip
-    if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) {
+    if (mark == markOop::INFLATING() || mark.has_displaced_mark_helper()) {
       return;
     }
 
     // Increase string age and enqueue it when it rearches age threshold
-    markOop new_mark = mark->incr_age();
+    markOop new_mark = mark.incr_age();
     if (mark == java_string->cas_set_mark(new_mark, mark)) {
-      if (mark->age() == StringDeduplicationAgeThreshold) {
+      if (mark.age() == StringDeduplicationAgeThreshold) {
         StringDedupQueue::push(ShenandoahWorkerSession::worker_id(), java_string);
       }
     }
diff --git a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp
index bdb1dae..c039514 100644
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp
@@ -665,17 +665,17 @@
         BasicObjectLock* mon = &istate->monitor_base()[-1];
         mon->set_obj(rcvr);
         bool success = false;
-        uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+        uintptr_t epoch_mask_in_place = (uintptr_t)markOop::epoch_mask_in_place;
         markOop mark = rcvr->mark();
-        intptr_t hash = (intptr_t) markOopDesc::no_hash;
+        intptr_t hash = (intptr_t) markOop::no_hash;
         // Implies UseBiasedLocking.
-        if (mark->has_bias_pattern()) {
+        if (mark.has_bias_pattern()) {
           uintptr_t thread_ident;
           uintptr_t anticipated_bias_locking_value;
           thread_ident = (uintptr_t)istate->thread();
           anticipated_bias_locking_value =
             (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
-            ~((uintptr_t) markOopDesc::age_mask_in_place);
+            ~((uintptr_t) markOop::age_mask_in_place);
 
           if (anticipated_bias_locking_value == 0) {
             // Already biased towards this thread, nothing to do.
@@ -683,10 +683,10 @@
               (* BiasedLocking::biased_lock_entry_count_addr())++;
             }
             success = true;
-          } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+          } else if ((anticipated_bias_locking_value & markOop::biased_lock_mask_in_place) != 0) {
             // Try to revoke bias.
             markOop header = rcvr->klass()->prototype_header();
-            if (hash != markOopDesc::no_hash) {
+            if (hash != markOop::no_hash) {
               header = header->copy_set_hash(hash);
             }
             if (rcvr->cas_set_mark(header, mark) == mark) {
@@ -696,7 +696,7 @@
           } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
             // Try to rebias.
             markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
-            if (hash != markOopDesc::no_hash) {
+            if (hash != markOop::no_hash) {
               new_header = new_header->copy_set_hash(hash);
             }
             if (rcvr->cas_set_mark(new_header, mark) == mark) {
@@ -710,9 +710,9 @@
           } else {
             // Try to bias towards thread in case object is anonymously biased.
             markOop header = (markOop) ((uintptr_t) mark &
-                                        ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
-                                         (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
-            if (hash != markOopDesc::no_hash) {
+                                        ((uintptr_t)markOop::biased_lock_mask_in_place |
+                                         (uintptr_t)markOop::age_mask_in_place | epoch_mask_in_place));
+            if (hash != markOop::no_hash) {
               header = header->copy_set_hash(hash);
             }
             markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
@@ -731,7 +731,7 @@
 
         // Traditional lightweight locking.
         if (!success) {
-          markOop displaced = rcvr->mark()->set_unlocked();
+          markOop displaced = rcvr->mark().set_unlocked();
           mon->lock()->set_displaced_header(displaced);
           bool call_vm = UseHeavyMonitors;
           if (call_vm || rcvr->cas_set_mark((markOop)mon, displaced) != displaced) {
@@ -850,18 +850,18 @@
       assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
       entry->set_obj(lockee);
       bool success = false;
-      uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+      uintptr_t epoch_mask_in_place = (uintptr_t)markOop::epoch_mask_in_place;
 
       markOop mark = lockee->mark();
-      intptr_t hash = (intptr_t) markOopDesc::no_hash;
+      intptr_t hash = (intptr_t) markOop::no_hash;
       // implies UseBiasedLocking
-      if (mark->has_bias_pattern()) {
+      if (mark.has_bias_pattern()) {
         uintptr_t thread_ident;
         uintptr_t anticipated_bias_locking_value;
         thread_ident = (uintptr_t)istate->thread();
         anticipated_bias_locking_value =
           (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
-          ~((uintptr_t) markOopDesc::age_mask_in_place);
+          ~((uintptr_t) markOop::age_mask_in_place);
 
         if  (anticipated_bias_locking_value == 0) {
           // already biased towards this thread, nothing to do
@@ -869,10 +869,10 @@
             (* BiasedLocking::biased_lock_entry_count_addr())++;
           }
           success = true;
-        } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+        } else if ((anticipated_bias_locking_value & markOop::biased_lock_mask_in_place) != 0) {
           // try revoke bias
           markOop header = lockee->klass()->prototype_header();
-          if (hash != markOopDesc::no_hash) {
+          if (hash != markOop::no_hash) {
             header = header->copy_set_hash(hash);
           }
           if (lockee->cas_set_mark(header, mark) == mark) {
@@ -883,7 +883,7 @@
         } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
           // try rebias
           markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
-          if (hash != markOopDesc::no_hash) {
+          if (hash != markOop::no_hash) {
                 new_header = new_header->copy_set_hash(hash);
           }
           if (lockee->cas_set_mark(new_header, mark) == mark) {
@@ -896,9 +896,9 @@
           success = true;
         } else {
           // try to bias towards thread in case object is anonymously biased
-          markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
-                                                          (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
-          if (hash != markOopDesc::no_hash) {
+          markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOop::biased_lock_mask_in_place |
+                                                          (uintptr_t)markOop::age_mask_in_place | epoch_mask_in_place));
+          if (hash != markOop::no_hash) {
             header = header->copy_set_hash(hash);
           }
           markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
@@ -917,7 +917,7 @@
 
       // traditional lightweight locking
       if (!success) {
-        markOop displaced = lockee->mark()->set_unlocked();
+        markOop displaced = lockee->mark().set_unlocked();
         entry->lock()->set_displaced_header(displaced);
         bool call_vm = UseHeavyMonitors;
         if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
@@ -1790,18 +1790,18 @@
         if (entry != NULL) {
           entry->set_obj(lockee);
           int success = false;
-          uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+          uintptr_t epoch_mask_in_place = (uintptr_t)markOop::epoch_mask_in_place;
 
           markOop mark = lockee->mark();
-          intptr_t hash = (intptr_t) markOopDesc::no_hash;
+          intptr_t hash = (intptr_t) markOop::no_hash;
           // implies UseBiasedLocking
-          if (mark->has_bias_pattern()) {
+          if (mark.has_bias_pattern()) {
             uintptr_t thread_ident;
             uintptr_t anticipated_bias_locking_value;
             thread_ident = (uintptr_t)istate->thread();
             anticipated_bias_locking_value =
               (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
-              ~((uintptr_t) markOopDesc::age_mask_in_place);
+              ~((uintptr_t) markOop::age_mask_in_place);
 
             if  (anticipated_bias_locking_value == 0) {
               // already biased towards this thread, nothing to do
@@ -1810,10 +1810,10 @@
               }
               success = true;
             }
-            else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+            else if ((anticipated_bias_locking_value & markOop::biased_lock_mask_in_place) != 0) {
               // try revoke bias
               markOop header = lockee->klass()->prototype_header();
-              if (hash != markOopDesc::no_hash) {
+              if (hash != markOop::no_hash) {
                 header = header->copy_set_hash(hash);
               }
               if (lockee->cas_set_mark(header, mark) == mark) {
@@ -1824,7 +1824,7 @@
             else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
               // try rebias
               markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
-              if (hash != markOopDesc::no_hash) {
+              if (hash != markOop::no_hash) {
                 new_header = new_header->copy_set_hash(hash);
               }
               if (lockee->cas_set_mark(new_header, mark) == mark) {
@@ -1838,10 +1838,10 @@
             }
             else {
               // try to bias towards thread in case object is anonymously biased
-              markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
-                                                              (uintptr_t)markOopDesc::age_mask_in_place |
+              markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOop::biased_lock_mask_in_place |
+                                                              (uintptr_t)markOop::age_mask_in_place |
                                                               epoch_mask_in_place));
-              if (hash != markOopDesc::no_hash) {
+              if (hash != markOop::no_hash) {
                 header = header->copy_set_hash(hash);
               }
               markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
@@ -1860,7 +1860,7 @@
 
           // traditional lightweight locking
           if (!success) {
-            markOop displaced = lockee->mark()->set_unlocked();
+            markOop displaced = lockee->mark().set_unlocked();
             entry->lock()->set_displaced_header(displaced);
             bool call_vm = UseHeavyMonitors;
             if (call_vm || lockee->cas_set_mark((markOop)entry, displaced) != displaced) {
@@ -1891,11 +1891,11 @@
             BasicLock* lock = most_recent->lock();
             markOop header = lock->displaced_header();
             most_recent->set_obj(NULL);
-            if (!lockee->mark()->has_bias_pattern()) {
+            if (!lockee->mark().has_bias_pattern()) {
               bool call_vm = UseHeavyMonitors;
               // If it isn't recursive we either must swap old header or call the runtime
               if (header != NULL || call_vm) {
-                markOop old_header = markOopDesc::encode(lock);
+                markOop old_header = markOop::encode(lock);
                 if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
                   // restore object for the slow case
                   most_recent->set_obj(lockee);
@@ -2181,7 +2181,7 @@
               if (UseBiasedLocking) {
                 result->set_mark(ik->prototype_header());
               } else {
-                result->set_mark(markOopDesc::prototype());
+                result->set_mark(markOop::prototype());
               }
               result->set_klass_gap(0);
               result->set_klass(ik);
@@ -3031,10 +3031,10 @@
           markOop header = lock->displaced_header();
           end->set_obj(NULL);
 
-          if (!lockee->mark()->has_bias_pattern()) {
+          if (!lockee->mark().has_bias_pattern()) {
             // If it isn't recursive we either must swap old header or call the runtime
             if (header != NULL) {
-              markOop old_header = markOopDesc::encode(lock);
+              markOop old_header = markOop::encode(lock);
               if (lockee->cas_set_mark(header, old_header) != old_header) {
                 // restore object for the slow case
                 end->set_obj(lockee);
@@ -3106,11 +3106,11 @@
             markOop header = lock->displaced_header();
             base->set_obj(NULL);
 
-            if (!rcvr->mark()->has_bias_pattern()) {
+            if (!rcvr->mark().has_bias_pattern()) {
               base->set_obj(NULL);
               // If it isn't recursive we either must swap old header or call the runtime
               if (header != NULL) {
-                markOop old_header = markOopDesc::encode(lock);
+                markOop old_header = markOop::encode(lock);
                 if (rcvr->cas_set_mark(header, old_header) != old_header) {
                   // restore object for the slow case
                   base->set_obj(rcvr);
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp
index e436746..b484ece 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp
@@ -131,7 +131,7 @@
   if (!_mark_bits->is_marked(pointee)) {
     _mark_bits->mark_obj(pointee);
     // is the pointee a sample object?
-    if (NULL == pointee->mark()) {
+    if (NULL == pointee->mark().to_pointer()) {
       add_chain(reference, pointee);
     }
 
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
index c347901..b1a1fca 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
@@ -121,7 +121,7 @@
   assert(_mark_bits->is_marked(pointee), "invariant");
 
   // is the pointee a sample object?
-  if (NULL == pointee->mark()) {
+  if (NULL == pointee->mark().to_pointer()) {
     add_chain();
   }
 
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp
index ad549f4..b9f09fa 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp
@@ -237,8 +237,8 @@
   StoredEdge* const leak_context_edge = put(edge->reference());
   oop sample_object = edge->pointee();
   assert(sample_object != NULL, "invariant");
-  assert(NULL == sample_object->mark(), "invariant");
-  sample_object->set_mark(markOop(leak_context_edge));
+  assert(NULL == sample_object->mark().to_pointer(), "invariant");
+  sample_object->set_mark(markOop::from_pointer(leak_context_edge));
   return leak_context_edge;
 }
 
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp
index 699fd4a..f4c9250 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp
@@ -36,7 +36,7 @@
 #include "runtime/handles.inline.hpp"
 
 bool EdgeUtils::is_leak_edge(const Edge& edge) {
-  return (const Edge*)edge.pointee()->mark() == &edge;
+  return (const Edge*)edge.pointee()->mark().to_pointer() == &edge;
 }
 
 static bool is_static_field(const oop ref_owner, const InstanceKlass* ik, int offset) {
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp
index 9a97a37..3d51edf 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp
@@ -45,7 +45,7 @@
                         const markOop mark_oop) : _obj(obj),
                                                   _mark_oop(mark_oop) {}
    public:
-    ObjectSampleMarkOop() : _obj(NULL), _mark_oop(NULL) {}
+    ObjectSampleMarkOop() : _obj(NULL), _mark_oop(markOop::zero()) {}
   };
 
   GrowableArray<ObjectSampleMarkOop>* _store;
@@ -72,8 +72,8 @@
     // This is an "impossible" state during a safepoint,
     // hence we will use it to quickly identify sample objects
     // during the reachability search from gc roots.
-    assert(NULL == markOopDesc::INFLATING(), "invariant");
-    obj->set_mark(markOopDesc::INFLATING());
+    assert(NULL == markOop::INFLATING(), "invariant");
+    obj->set_mark(markOop::INFLATING());
     assert(NULL == obj->mark(), "invariant");
   }
 };
diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp
index 02d9c2d..438f917 100644
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp
@@ -111,7 +111,7 @@
   traceid gc_root_id = 0;
   const Edge* edge = NULL;
   if (SafepointSynchronize::is_at_safepoint()) {
-    edge = (const Edge*)(*object_addr)->mark();
+    edge = (const Edge*)(*object_addr)->mark().to_pointer();
   }
   if (edge == NULL) {
     // In order to dump out a representation of the event
diff --git a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp
index 3cfff5a..d5b7d71 100644
--- a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp
@@ -27,7 +27,7 @@
 #include "jfr/leakprofiler/utilities/saveRestore.hpp"
 #include "oops/oop.inline.hpp"
 
-MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(NULL) {}
+MarkOopContext::MarkOopContext() : _obj(NULL), _mark_oop(markOop::zero()) {}
 
 MarkOopContext::MarkOopContext(const oop obj) : _obj(obj), _mark_oop(obj->mark()) {
   assert(_obj->mark() == _mark_oop, "invariant");
@@ -36,8 +36,8 @@
   // This is an "impossible" state during a safepoint,
   // hence we will use it to quickly identify objects
   // during the reachability search from gc roots.
-  assert(NULL == markOopDesc::INFLATING(), "invariant");
-  _obj->set_mark(markOopDesc::INFLATING());
+  assert(NULL == markOop::INFLATING(), "invariant");
+  _obj->set_mark(markOop::INFLATING());
   assert(NULL == obj->mark(), "invariant");
 }
 
@@ -48,7 +48,7 @@
   }
 }
 
-MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(NULL) {
+MarkOopContext::MarkOopContext(const MarkOopContext& rhs) : _obj(NULL), _mark_oop(markOop::zero()) {
   swap(const_cast<MarkOopContext&>(rhs));
 }
 
diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
index 7467eb7..33496d4 100644
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
@@ -566,7 +566,7 @@
   declare_constant(Klass::_lh_array_tag_type_value)                       \
   declare_constant(Klass::_lh_array_tag_obj_value)                        \
                                                                           \
-  declare_constant(markOopDesc::no_hash)                                  \
+  declare_constant(markOop::no_hash)                                  \
                                                                           \
   declare_constant(Method::_caller_sensitive)                             \
   declare_constant(Method::_force_inline)                                 \
@@ -598,19 +598,19 @@
   declare_constant(InvocationCounter::count_increment)                    \
   declare_constant(InvocationCounter::count_shift)                        \
                                                                           \
-  declare_constant(markOopDesc::hash_shift)                               \
+  declare_constant(markOop::hash_shift)                               \
                                                                           \
-  declare_constant(markOopDesc::biased_lock_mask_in_place)                \
-  declare_constant(markOopDesc::age_mask_in_place)                        \
-  declare_constant(markOopDesc::epoch_mask_in_place)                      \
-  declare_constant(markOopDesc::hash_mask)                                \
-  declare_constant(markOopDesc::hash_mask_in_place)                       \
+  declare_constant(markOop::biased_lock_mask_in_place)                \
+  declare_constant(markOop::age_mask_in_place)                        \
+  declare_constant(markOop::epoch_mask_in_place)                      \
+  declare_constant(markOop::hash_mask)                                \
+  declare_constant(markOop::hash_mask_in_place)                       \
                                                                           \
-  declare_constant(markOopDesc::unlocked_value)                           \
-  declare_constant(markOopDesc::biased_lock_pattern)                      \
+  declare_constant(markOop::unlocked_value)                           \
+  declare_constant(markOop::biased_lock_pattern)                      \
                                                                           \
-  declare_constant(markOopDesc::no_hash_in_place)                         \
-  declare_constant(markOopDesc::no_lock_in_place)                         \
+  declare_constant(markOop::no_hash_in_place)                         \
+  declare_constant(markOop::no_lock_in_place)                         \
 
 #define VM_ADDRESSES(declare_address, declare_preprocessor_address, declare_function) \
   declare_function(SharedRuntime::register_finalizer)                     \
diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp
index 46ebf3d..81e45dc 100644
--- a/src/hotspot/share/memory/metaspaceShared.cpp
+++ b/src/hotspot/share/memory/metaspaceShared.cpp
@@ -1898,7 +1898,7 @@
 }
 
 unsigned MetaspaceShared::obj_hash(oop const& p) {
-  assert(!p->mark()->has_bias_pattern(),
+  assert(!p->mark().has_bias_pattern(),
          "this object should never have been locked");  // so identity_hash won't safepoin
   unsigned hash = (unsigned)p->identity_hash();
   return hash;
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index eab9576..7969582 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -1310,12 +1310,12 @@
 }
 
 uintptr_t Universe::verify_mark_mask() {
-  return markOopDesc::lock_mask_in_place;
+  return markOop::lock_mask_in_place;
 }
 
 uintptr_t Universe::verify_mark_bits() {
   intptr_t mask = verify_mark_mask();
-  intptr_t bits = (intptr_t)markOopDesc::prototype();
+  intptr_t bits = (intptr_t)markOop::prototype();
   assert((bits & ~mask) == 0, "no stray header bits");
   return bits;
 }
diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp
index d67f624..ab83062 100644
--- a/src/hotspot/share/memory/virtualspace.cpp
+++ b/src/hotspot/share/memory/virtualspace.cpp
@@ -628,9 +628,9 @@
     initialize(size, alignment, large, NULL, false);
   }
 
-  assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
+  assert(markOop::encode_pointer_as_mark(_base)->decode_pointer() == _base,
          "area must be distinguishable from marks for mark-sweep");
-  assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
+  assert(markOop::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
          "area must be distinguishable from marks for mark-sweep");
 
   if (base() != NULL) {
diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp
index 01c5ed5..2cc6543 100644
--- a/src/hotspot/share/oops/klass.cpp
+++ b/src/hotspot/share/oops/klass.cpp
@@ -195,7 +195,7 @@
 // Need to set the _java_mirror field explicitly to not hit an assert that the field
 // should be NULL before setting it.
 Klass::Klass(KlassID id) : _id(id),
-                           _prototype_header(markOopDesc::prototype()),
+                           _prototype_header(markOop::prototype()),
                            _shared_class_path_index(-1),
                            _java_mirror(NULL) {
   CDS_ONLY(_shared_class_flags = 0;)
@@ -682,7 +682,7 @@
 
   if (WizardMode) {
      // print header
-     obj->mark()->print_on(st);
+     obj->mark().print_on(st);
   }
 
   // print class
diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp
index 1d1c524..d534715 100644
--- a/src/hotspot/share/oops/klass.hpp
+++ b/src/hotspot/share/oops/klass.hpp
@@ -28,6 +28,7 @@
 #include "classfile/classLoaderData.hpp"
 #include "memory/iterator.hpp"
 #include "memory/memRegion.hpp"
+#include "oops/markOop.hpp"
 #include "oops/metadata.hpp"
 #include "oops/oop.hpp"
 #include "oops/oopHandle.hpp"
diff --git a/src/hotspot/share/oops/markOop.cpp b/src/hotspot/share/oops/markOop.cpp
index 0224ca7..62cb157 100644
--- a/src/hotspot/share/oops/markOop.cpp
+++ b/src/hotspot/share/oops/markOop.cpp
@@ -27,7 +27,7 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 
-void markOopDesc::print_on(outputStream* st) const {
+void markOop::print_on(outputStream* st) const {
   if (is_marked()) {
     st->print(" marked(" INTPTR_FORMAT ")", value());
   } else if (has_monitor()) {
diff --git a/src/hotspot/share/oops/markOop.hpp b/src/hotspot/share/oops/markOop.hpp
index 649217c..d056166 100644
--- a/src/hotspot/share/oops/markOop.hpp
+++ b/src/hotspot/share/oops/markOop.hpp
@@ -101,10 +101,9 @@
 class ObjectMonitor;
 class JavaThread;
 
-class markOopDesc: public oopDesc {
+class markOop {
  private:
-  // Conversion
-  uintptr_t value() const { return (uintptr_t) this; }
+  uintptr_t _value;
 
  public:
   // Constants
@@ -117,6 +116,30 @@
          epoch_bits               = 2
   };
 
+  explicit markOop(uintptr_t value) : _value(value) {}
+
+  markOop() { /* uninitialized */}
+
+  // It is critical for performance that this class be trivially
+  // destructable, copyable, and assignable.
+
+  static markOop from_pointer(void* ptr) {
+    return markOop((uintptr_t)ptr);
+  }
+  void* to_pointer() const {
+    return (void*)_value;
+  }
+
+  bool operator==(const markOop& other) const {
+    return _value == other._value;
+  }
+  bool operator!=(const markOop& other) const {
+    return !operator==(other);
+  }
+
+  // Conversion
+  uintptr_t value() const { return _value; }
+
   // The biased locking code currently requires that the age bits be
   // contiguous to the lock bits.
   enum { lock_shift               = 0,
@@ -164,6 +187,9 @@
 
   enum { max_bias_epoch           = epoch_mask };
 
+  // Creates a markOop with all bits set to zero.
+  static markOop zero() { return markOop(uintptr_t(0)); }
+
   // Biased Locking accessors.
   // These must be checked by all code which calls into the
   // ObjectSynchronizer and other code. The biasing is not understood
@@ -218,13 +244,13 @@
   // Code that looks at mark outside a lock need to take this into account.
   bool is_being_inflated() const { return (value() == 0); }
 
-  // Distinguished markword value - used when inflating over
-  // an existing stacklock.  0 indicates the markword is "BUSY".
+  // Distinguished markOop value - used when inflating over
+  // an existing stacklock.  0 indicates the markOop is "BUSY".
   // Lockword mutators that use a LD...CAS idiom should always
   // check for and avoid overwriting a 0 value installed by some
   // other thread.  (They should spin or block instead.  The 0 value
   // is transient and *should* be short-lived).
-  static markOop INFLATING() { return (markOop) 0; }    // inflate-in-progress
+  static markOop INFLATING() { return zero(); }    // inflate-in-progress
 
   // Should this header be preserved during GC?
   inline bool must_be_preserved(oop obj_containing_mark) const;
@@ -294,17 +320,17 @@
   markOop copy_set_hash(intptr_t hash) const {
     intptr_t tmp = value() & (~hash_mask_in_place);
     tmp |= ((hash & hash_mask) << hash_shift);
-    return (markOop)tmp;
+    return markOop(tmp);
   }
   // it is only used to be stored into BasicLock as the
   // indicator that the lock is using heavyweight monitor
   static markOop unused_mark() {
-    return (markOop) marked_value;
+    return markOop(marked_value);
   }
   // the following two functions create the markOop to be
   // stored into object header, it encodes monitor info
   static markOop encode(BasicLock* lock) {
-    return (markOop) lock;
+    return from_pointer(lock);
   }
   static markOop encode(ObjectMonitor* monitor) {
     intptr_t tmp = (intptr_t) monitor;
@@ -330,7 +356,7 @@
     assert((v & ~age_mask) == 0, "shouldn't overflow age field");
     return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
   }
-  markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
+  markOop incr_age()          const { return age() == max_age ? markOop(_value) : set_age(age() + 1); }
 
   // hash operations
   intptr_t hash() const {
@@ -353,10 +379,10 @@
   void print_on(outputStream* st) const;
 
   // Prepare address of oop for placement into mark
-  inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
+  inline static markOop encode_pointer_as_mark(void* p) { return from_pointer(p).set_marked(); }
 
   // Recover address of oop from encoded form used in mark
-  inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
+  inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
 
   // These markOops indicate cms free chunk blocks and not objects.
   // In 64 bit, the markOop is set to distinguish them from oops.
@@ -375,7 +401,7 @@
 
 #ifdef _LP64
   static markOop cms_free_prototype() {
-    return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
+    return markOop(((intptr_t)prototype().value() & ~cms_mask_in_place) |
                    ((cms_free_chunk_pattern & cms_mask) << cms_shift));
   }
   uintptr_t cms_encoding() const {
@@ -389,10 +415,20 @@
   size_t get_size() const       { return (size_t)(value() >> size_shift); }
   static markOop set_size_and_free(size_t size) {
     assert((size & ~size_mask) == 0, "shouldn't overflow size field");
-    return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
+    return markOop(((intptr_t)cms_free_prototype().value() & ~size_mask_in_place) |
                    (((intptr_t)size & size_mask) << size_shift));
   }
 #endif // _LP64
 };
 
+// Support atomic operations.
+template<>
+struct PrimitiveConversions::Translate<markOop> : public TrueType {
+  typedef markOop Value;
+  typedef uintptr_t Decayed;
+
+  static Decayed decay(const Value& x) { return x.value(); }
+  static Value recover(Decayed x) { return Value(x); }
+};
+
 #endif // SHARE_VM_OOPS_MARKOOP_HPP
diff --git a/src/hotspot/share/oops/markOop.inline.hpp b/src/hotspot/share/oops/markOop.inline.hpp
index 77d2fee..d6e653d 100644
--- a/src/hotspot/share/oops/markOop.inline.hpp
+++ b/src/hotspot/share/oops/markOop.inline.hpp
@@ -31,7 +31,7 @@
 #include "runtime/globals.hpp"
 
 // Should this header be preserved during GC (when biased locking is enabled)?
-inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) const {
+inline bool markOop::must_be_preserved_with_bias(oop obj_containing_mark) const {
   assert(UseBiasedLocking, "unexpected");
   if (has_bias_pattern()) {
     // Will reset bias at end of collection
@@ -39,7 +39,7 @@
     return false;
   }
   markOop prototype_header = prototype_for_object(obj_containing_mark);
-  if (prototype_header->has_bias_pattern()) {
+  if (prototype_header.has_bias_pattern()) {
     // Individual instance which has its bias revoked; must return
     // true for correctness
     return true;
@@ -48,7 +48,7 @@
 }
 
 // Should this header be preserved during GC?
-inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
+inline bool markOop::must_be_preserved(oop obj_containing_mark) const {
   if (!UseBiasedLocking)
     return (!is_unlocked() || !has_no_hash());
   return must_be_preserved_with_bias(obj_containing_mark);
@@ -56,7 +56,7 @@
 
 // Should this header be preserved in the case of a promotion failure
 // during scavenge (when biased locking is enabled)?
-inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
+inline bool markOop::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
   assert(UseBiasedLocking, "unexpected");
   // We don't explicitly save off the mark words of biased and
   // currently-locked objects during scavenges, so if during a
@@ -68,7 +68,7 @@
   // BiasedLocking::preserve_marks() / restore_marks() in the middle
   // of a scavenge when a promotion failure has first been detected.
   if (has_bias_pattern() ||
-      prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
+      prototype_for_object(obj_containing_mark).has_bias_pattern()) {
     return true;
   }
   return (!is_unlocked() || !has_no_hash());
@@ -76,7 +76,7 @@
 
 // Should this header be preserved in the case of a promotion failure
 // during scavenge?
-inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
+inline bool markOop::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
   if (!UseBiasedLocking)
     return (!is_unlocked() || !has_no_hash());
   return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
@@ -85,11 +85,11 @@
 
 // Same as must_be_preserved_with_bias_for_promotion_failure() except that
 // it takes a Klass* argument, instead of the object of which this is the mark word.
-inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
+inline bool markOop::must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
   assert(UseBiasedLocking, "unexpected");
   // CMS scavenges preserve mark words in similar fashion to promotion failures; see above
   if (has_bias_pattern() ||
-      klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
+      klass_of_obj_containing_mark->prototype_header().has_bias_pattern()) {
     return true;
   }
   return (!is_unlocked() || !has_no_hash());
@@ -97,16 +97,16 @@
 
 // Same as must_be_preserved_for_promotion_failure() except that
 // it takes a Klass* argument, instead of the object of which this is the mark word.
-inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
+inline bool markOop::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
   if (!UseBiasedLocking)
     return (!is_unlocked() || !has_no_hash());
   return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
 }
 
-inline markOop markOopDesc::prototype_for_object(oop obj) {
+inline markOop markOop::prototype_for_object(oop obj) {
 #ifdef ASSERT
   markOop prototype_header = obj->klass()->prototype_header();
-  assert(prototype_header == prototype() || prototype_header->has_bias_pattern(), "corrupt prototype header");
+  assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
 #endif
   return obj->klass()->prototype_header();
 }
diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp
index 0470cbb..a7bbd6b 100644
--- a/src/hotspot/share/oops/oop.cpp
+++ b/src/hotspot/share/oops/oop.cpp
@@ -130,7 +130,7 @@
   if (ignore_mark_word) {
     return true;
   }
-  if (obj->mark_raw() != NULL) {
+  if (obj->mark_raw().value() != 0) {
     return true;
   }
   return !SafepointSynchronize::is_at_safepoint();
diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp
index 4430530..6311ee9 100644
--- a/src/hotspot/share/oops/oop.hpp
+++ b/src/hotspot/share/oops/oop.hpp
@@ -28,6 +28,7 @@
 #include "memory/iterator.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/access.hpp"
+#include "oops/markOop.hpp"
 #include "oops/metadata.hpp"
 #include "runtime/atomic.hpp"
 #include "utilities/macros.hpp"
diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp
index 6c631f5..9eefc62 100644
--- a/src/hotspot/share/oops/oop.inline.hpp
+++ b/src/hotspot/share/oops/oop.inline.hpp
@@ -44,23 +44,24 @@
 // We need a separate file to avoid circular references
 
 markOop  oopDesc::mark()      const {
-  return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
+  uintptr_t v = HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
+  return markOop(v);
 }
 
 markOop  oopDesc::mark_raw()  const {
-  return _mark;
+  return Atomic::load(&_mark);;
 }
 
 markOop* oopDesc::mark_addr_raw() const {
   return (markOop*) &_mark;
 }
 
-void oopDesc::set_mark(volatile markOop m) {
-  HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
+void oopDesc::set_mark(markOop m) {
+  HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
 }
 
-void oopDesc::set_mark_raw(volatile markOop m) {
-  _mark = m;
+void oopDesc::set_mark_raw(markOop m) {
+  Atomic::store(m, &_mark);
 }
 
 void oopDesc::set_mark_raw(HeapWord* mem, markOop m) {
@@ -68,11 +69,12 @@
 }
 
 void oopDesc::release_set_mark(markOop m) {
-  HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
+  HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
 }
 
 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
-  return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
+  uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(new_mark.value(), as_oop(), mark_offset_in_bytes(), old_mark.value());
+  return markOop(v);
 }
 
 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
@@ -80,11 +82,11 @@
 }
 
 void oopDesc::init_mark() {
-  set_mark(markOopDesc::prototype_for_object(this));
+  set_mark(markOop::prototype_for_object(this));
 }
 
 void oopDesc::init_mark_raw() {
-  set_mark_raw(markOopDesc::prototype_for_object(this));
+  set_mark_raw(markOop::prototype_for_object(this));
 }
 
 Klass* oopDesc::klass() const {
@@ -318,31 +320,31 @@
 inline void    oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
 
 bool oopDesc::is_locked() const {
-  return mark()->is_locked();
+  return mark().is_locked();
 }
 
 bool oopDesc::is_unlocked() const {
-  return mark()->is_unlocked();
+  return mark().is_unlocked();
 }
 
 bool oopDesc::has_bias_pattern() const {
-  return mark()->has_bias_pattern();
+  return mark().has_bias_pattern();
 }
 
 bool oopDesc::has_bias_pattern_raw() const {
-  return mark_raw()->has_bias_pattern();
+  return mark_raw().has_bias_pattern();
 }
 
 // Used only for markSweep, scavenging
 bool oopDesc::is_gc_marked() const {
-  return mark_raw()->is_marked();
+  return mark_raw().is_marked();
 }
 
 // Used by scavengers
 bool oopDesc::is_forwarded() const {
   // The extra heap check is needed since the obj might be locked, in which case the
   // mark would point to a stack location and have the sentinel bit cleared
-  return mark_raw()->is_marked();
+  return mark_raw().is_marked();
 }
 
 // Used by scavengers
@@ -354,8 +356,8 @@
   assert(!MetaspaceShared::is_archive_object(oop(this)) &&
          !MetaspaceShared::is_archive_object(p),
          "forwarding archive object");
-  markOop m = markOopDesc::encode_pointer_as_mark(p);
-  assert(m->decode_pointer() == p, "encoding must be reversable");
+  markOop m = markOop::encode_pointer_as_mark(p);
+  assert(m.decode_pointer() == p, "encoding must be reversable");
   set_mark_raw(m);
 }
 
@@ -365,20 +367,20 @@
          "forwarding to something not aligned");
   assert(Universe::heap()->is_in_reserved(p),
          "forwarding to something not in heap");
-  markOop m = markOopDesc::encode_pointer_as_mark(p);
-  assert(m->decode_pointer() == p, "encoding must be reversable");
+  markOop m = markOop::encode_pointer_as_mark(p);
+  assert(m.decode_pointer() == p, "encoding must be reversable");
   return cas_set_mark_raw(m, compare, order) == compare;
 }
 
 oop oopDesc::forward_to_atomic(oop p, atomic_memory_order order) {
   markOop oldMark = mark_raw();
-  markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
+  markOop forwardPtrMark = markOop::encode_pointer_as_mark(p);
   markOop curMark;
 
-  assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
+  assert(forwardPtrMark.decode_pointer() == p, "encoding must be reversable");
   assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
 
-  while (!oldMark->is_marked()) {
+  while (!oldMark.is_marked()) {
     curMark = cas_set_mark_raw(forwardPtrMark, oldMark, order);
     assert(is_forwarded(), "object should have been forwarded");
     if (curMark == oldMark) {
@@ -396,33 +398,32 @@
 // The forwardee is used when copying during scavenge and mark-sweep.
 // It does need to clear the low two locking- and GC-related bits.
 oop oopDesc::forwardee() const {
-  return (oop) mark_raw()->decode_pointer();
+  return (oop) mark_raw().decode_pointer();
 }
 
 // Note that the forwardee is not the same thing as the displaced_mark.
 // The forwardee is used when copying during scavenge and mark-sweep.
 // It does need to clear the low two locking- and GC-related bits.
 oop oopDesc::forwardee_acquire() const {
-  markOop m = OrderAccess::load_acquire(&_mark);
-  return (oop) m->decode_pointer();
+  return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
 }
 
 // The following method needs to be MT safe.
 uint oopDesc::age() const {
   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
   if (has_displaced_mark_raw()) {
-    return displaced_mark_raw()->age();
+    return displaced_mark_raw().age();
   } else {
-    return mark_raw()->age();
+    return mark_raw().age();
   }
 }
 
 void oopDesc::incr_age() {
   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
   if (has_displaced_mark_raw()) {
-    set_displaced_mark_raw(displaced_mark_raw()->incr_age());
+    set_displaced_mark_raw(displaced_mark_raw().incr_age());
   } else {
-    set_mark_raw(mark_raw()->incr_age());
+    set_mark_raw(mark_raw().incr_age());
   }
 }
 
@@ -489,25 +490,25 @@
   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   // Note: The mark must be read into local variable to avoid concurrent updates.
   markOop mrk = mark();
-  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
-    return mrk->hash();
-  } else if (mrk->is_marked()) {
-    return mrk->hash();
+  if (mrk.is_unlocked() && !mrk.has_no_hash()) {
+    return mrk.hash();
+  } else if (mrk.is_marked()) {
+    return mrk.hash();
   } else {
     return slow_identity_hash();
   }
 }
 
 bool oopDesc::has_displaced_mark_raw() const {
-  return mark_raw()->has_displaced_mark_helper();
+  return mark_raw().has_displaced_mark_helper();
 }
 
 markOop oopDesc::displaced_mark_raw() const {
-  return mark_raw()->displaced_mark_helper();
+  return mark_raw().displaced_mark_helper();
 }
 
 void oopDesc::set_displaced_mark_raw(markOop m) {
-  mark_raw()->set_displaced_mark_helper(m);
+  mark_raw().set_displaced_mark_helper(m);
 }
 
 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP
diff --git a/src/hotspot/share/oops/oopsHierarchy.hpp b/src/hotspot/share/oops/oopsHierarchy.hpp
index d419aa1..f5ae408 100644
--- a/src/hotspot/share/oops/oopsHierarchy.hpp
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp
@@ -40,7 +40,6 @@
 typedef juint  narrowKlass;
 
 typedef void* OopOrNarrowOopStar;
-typedef class   markOopDesc*                markOop;
 
 #ifndef CHECK_UNHANDLED_OOPS
 
@@ -120,7 +119,6 @@
   operator oopDesc* () const volatile { return obj(); }
   operator intptr_t* () const         { return (intptr_t*)obj(); }
   operator PromotedObject* () const   { return (PromotedObject*)obj(); }
-  operator markOop () const volatile  { return markOop(obj()); }
   operator address   () const         { return (address)obj(); }
 
   // from javaCalls.cpp
diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp
index 900e956..f243739 100644
--- a/src/hotspot/share/opto/library_call.cpp
+++ b/src/hotspot/share/opto/library_call.cpp
@@ -3961,9 +3961,9 @@
   Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 
   // Test the header to see if it is unlocked.
-  Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
+  Node *lock_mask      = _gvn.MakeConX(markOop::biased_lock_mask_in_place);
   Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
-  Node *unlocked_val   = _gvn.MakeConX(markOopDesc::unlocked_value);
+  Node *unlocked_val   = _gvn.MakeConX(markOop::unlocked_value);
   Node *chk_unlocked   = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
   Node *test_unlocked  = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
 
@@ -3973,8 +3973,8 @@
   // We depend on hash_mask being at most 32 bits and avoid the use of
   // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
   // vm: see markOop.hpp.
-  Node *hash_mask      = _gvn.intcon(markOopDesc::hash_mask);
-  Node *hash_shift     = _gvn.intcon(markOopDesc::hash_shift);
+  Node *hash_mask      = _gvn.intcon(markOop::hash_mask);
+  Node *hash_shift     = _gvn.intcon(markOop::hash_shift);
   Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
   // This hack lets the hash bits live anywhere in the mark object now, as long
   // as the shift drops the relevant bits into the low 32 bits.  Note that
@@ -3983,7 +3983,7 @@
   hshifted_header      = ConvX2I(hshifted_header);
   Node *hash_val       = _gvn.transform(new AndINode(hshifted_header, hash_mask));
 
-  Node *no_hash_val    = _gvn.intcon(markOopDesc::no_hash);
+  Node *no_hash_val    = _gvn.intcon(markOop::no_hash);
   Node *chk_assigned   = _gvn.transform(new CmpINode( hash_val, no_hash_val));
   Node *test_assigned  = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
 
diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp
index b66982f..9183fe9 100644
--- a/src/hotspot/share/opto/macro.cpp
+++ b/src/hotspot/share/opto/macro.cpp
@@ -1773,7 +1773,7 @@
   if (UseBiasedLocking && (length == NULL)) {
     mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
   } else {
-    mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
+    mark_node = makecon(TypeRawPtr::make((address)markOop::prototype().value()));
   }
   rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
 
@@ -2330,8 +2330,8 @@
 
     // Get fast path - mark word has the biased lock pattern.
     ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
-                         markOopDesc::biased_lock_mask_in_place,
-                         markOopDesc::biased_lock_pattern, true);
+                         markOop::biased_lock_mask_in_place,
+                         markOop::biased_lock_pattern, true);
     // fast_lock_region->in(1) is set to slow path.
     fast_lock_mem_phi->init_req(1, mem);
 
@@ -2360,7 +2360,7 @@
 
     // Get slow path - mark word does NOT match the value.
     Node* not_biased_ctrl =  opt_bits_test(ctrl, region, 3, x_node,
-                                      (~markOopDesc::age_mask_in_place), 0);
+                                      (~markOop::age_mask_in_place), 0);
     // region->in(3) is set to fast path - the object is biased to the current thread.
     mem_phi->init_req(3, mem);
 
@@ -2371,7 +2371,7 @@
     // First, check biased pattern.
     // Get fast path - _prototype_header has the same biased lock pattern.
     ctrl =  opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node,
-                          markOopDesc::biased_lock_mask_in_place, 0, true);
+                          markOop::biased_lock_mask_in_place, 0, true);
 
     not_biased_ctrl = fast_lock_region->in(2); // Slow path
     // fast_lock_region->in(2) - the prototype header is no longer biased
@@ -2393,7 +2393,7 @@
 
     // Get slow path - mark word does NOT match epoch bits.
     Node* epoch_ctrl =  opt_bits_test(ctrl, rebiased_region, 1, x_node,
-                                      markOopDesc::epoch_mask_in_place, 0);
+                                      markOop::epoch_mask_in_place, 0);
     // The epoch of the current bias is not valid, attempt to rebias the object
     // toward the current thread.
     rebiased_region->init_req(2, epoch_ctrl);
@@ -2403,9 +2403,9 @@
     // rebiased_region->in(1) is set to fast path.
     // The epoch of the current bias is still valid but we know
     // nothing about the owner; it might be set or it might be clear.
-    Node* cmask   = MakeConX(markOopDesc::biased_lock_mask_in_place |
-                             markOopDesc::age_mask_in_place |
-                             markOopDesc::epoch_mask_in_place);
+    Node* cmask   = MakeConX(markOop::biased_lock_mask_in_place |
+                             markOop::age_mask_in_place |
+                             markOop::epoch_mask_in_place);
     Node* old = transform_later(new AndXNode(mark_node, cmask));
     cast_thread = transform_later(new CastP2XNode(ctrl, thread));
     Node* new_mark = transform_later(new OrXNode(cast_thread, old));
@@ -2520,8 +2520,8 @@
 
     Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
     ctrl = opt_bits_test(ctrl, region, 3, mark_node,
-                         markOopDesc::biased_lock_mask_in_place,
-                         markOopDesc::biased_lock_pattern);
+                         markOop::biased_lock_mask_in_place,
+                         markOop::biased_lock_pattern);
   } else {
     region  = new RegionNode(3);
     // create a Phi for the memory state
diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp
index 093c790..28ef478 100644
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp
@@ -965,16 +965,16 @@
     {
       markOop mark = hobj()->mark();
 
-      if (!mark->has_monitor()) {
+      if (!mark.has_monitor()) {
         // this object has a lightweight monitor
 
-        if (mark->has_locker()) {
-          owner = (address)mark->locker(); // save the address of the Lock word
+        if (mark.has_locker()) {
+          owner = (address)mark.locker(); // save the address of the Lock word
         }
         // implied else: no owner
       } else {
         // this object has a heavyweight monitor
-        mon = mark->monitor();
+        mon = mark.monitor();
 
         // The owner field of a heavyweight monitor may be NULL for no
         // owner, a JavaThread * or it may still be the address of the
diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp
index 1baa32c..bd861a5 100644
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp
@@ -1645,7 +1645,7 @@
   void do_object(oop o) {
     if (o != NULL) {
       markOop mark = o->mark();
-      if (mark->is_marked()) {
+      if (mark.is_marked()) {
         o->init_mark();
       }
     }
@@ -1723,23 +1723,23 @@
 // mark an object
 inline void ObjectMarker::mark(oop o) {
   assert(Universe::heap()->is_in(o), "sanity check");
-  assert(!o->mark()->is_marked(), "should only mark an object once");
+  assert(!o->mark().is_marked(), "should only mark an object once");
 
   // object's mark word
   markOop mark = o->mark();
 
-  if (mark->must_be_preserved(o)) {
+  if (mark.must_be_preserved(o)) {
     _saved_mark_stack->push(mark);
     _saved_oop_stack->push(o);
   }
 
   // mark the object
-  o->set_mark(markOopDesc::prototype()->set_marked());
+  o->set_mark(markOop::prototype().set_marked());
 }
 
 // return true if object is marked
 inline bool ObjectMarker::visited(oop o) {
-  return o->mark()->is_marked();
+  return o->mark().is_marked();
 }
 
 // Stack allocated class to help ensure that ObjectMarker is used
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index 4485234..f4d967f 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -1666,7 +1666,7 @@
 
 WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
   oop obj_oop = JNIHandles::resolve(obj);
-  return (jboolean) obj_oop->mark()->has_monitor();
+  return (jboolean) obj_oop->mark().has_monitor();
 WB_END
 
 WB_ENTRY(void, WB_ForceSafepoint(JNIEnv* env, jobject wb))
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index cc9366d..20855a5 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -2879,7 +2879,7 @@
       if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != JVMFlag::SUCCESS) {
         return JNI_EINVAL;
       }
-      if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1) != JVMFlag::SUCCESS) {
+      if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOop::max_age + 1) != JVMFlag::SUCCESS) {
         return JNI_EINVAL;
       }
     } else if (match_option(option, "-XX:+AlwaysTenure")) {
diff --git a/src/hotspot/share/runtime/basicLock.cpp b/src/hotspot/share/runtime/basicLock.cpp
index 5d69b30..2090e49 100644
--- a/src/hotspot/share/runtime/basicLock.cpp
+++ b/src/hotspot/share/runtime/basicLock.cpp
@@ -29,8 +29,8 @@
 void BasicLock::print_on(outputStream* st) const {
   st->print("monitor");
   markOop moop = displaced_header();
-  if (moop != NULL)
-    moop->print_on(st);
+  if (moop.value() != 0)
+    moop.print_on(st);
 }
 
 void BasicLock::move_to(oop obj, BasicLock* dest) {
@@ -62,7 +62,7 @@
   // is small (given the support for inflated fast-path locking in the fast_lock, etc)
   // we'll leave that optimization for another time.
 
-  if (displaced_header()->is_neutral()) {
+  if (displaced_header().is_neutral()) {
     ObjectSynchronizer::inflate_helper(obj);
     // WARNING: We can not put check here, because the inflation
     // will not update the displaced header. Once BasicLock is inflated,
@@ -75,6 +75,6 @@
     // we can find any flavor mark in the displaced mark.
   }
 // [RGV] The next line appears to do nothing!
-  intptr_t dh = (intptr_t) displaced_header();
+  intptr_t dh = (intptr_t) displaced_header().value();
   dest->set_displaced_header(displaced_header());
 }
diff --git a/src/hotspot/share/runtime/basicLock.hpp b/src/hotspot/share/runtime/basicLock.hpp
index 68fd6c3..3887417 100644
--- a/src/hotspot/share/runtime/basicLock.hpp
+++ b/src/hotspot/share/runtime/basicLock.hpp
@@ -26,6 +26,7 @@
 #define SHARE_VM_RUNTIME_BASICLOCK_HPP
 
 #include "oops/markOop.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/handles.hpp"
 
 class BasicLock {
@@ -34,8 +35,13 @@
  private:
   volatile markOop _displaced_header;
  public:
-  markOop      displaced_header() const               { return _displaced_header; }
-  void         set_displaced_header(markOop header)   { _displaced_header = header; }
+  markOop displaced_header() const {
+    return Atomic::load(&_displaced_header);
+  }
+
+  void set_displaced_header(markOop header) {
+    Atomic::store(header, &_displaced_header);
+  }
 
   void print_on(outputStream* st) const;
 
diff --git a/src/hotspot/share/runtime/biasedLocking.cpp b/src/hotspot/share/runtime/biasedLocking.cpp
index a324120..9e15ea3 100644
--- a/src/hotspot/share/runtime/biasedLocking.cpp
+++ b/src/hotspot/share/runtime/biasedLocking.cpp
@@ -49,7 +49,7 @@
 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
 
 static void enable_biased_locking(InstanceKlass* k) {
-  k->set_prototype_header(markOopDesc::biased_locking_prototype());
+  k->set_prototype_header(markOop::biased_locking_prototype());
 }
 
 class VM_EnableBiasedLocking: public VM_Operation {
@@ -150,27 +150,27 @@
   return info;
 }
 
-// After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
+// After the call, *biased_locker will be set to obj->mark().biased_locker() if biased_locker != NULL,
 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
   markOop mark = obj->mark();
-  if (!mark->has_bias_pattern()) {
+  if (!mark.has_bias_pattern()) {
     if (log_is_enabled(Info, biasedlocking)) {
       ResourceMark rm;
       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
                               ", mark " INTPTR_FORMAT ", type %s"
                               ", requesting thread " INTPTR_FORMAT
                               " because it's no longer biased)",
-                              p2i((void *)obj), (intptr_t) mark,
+                              p2i((void *)obj), (intptr_t) mark.value(),
                               obj->klass()->external_name(),
                               (intptr_t) requesting_thread);
     }
     return BiasedLocking::NOT_BIASED;
   }
 
-  uint age = mark->age();
-  markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
-  markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
+  uint age = mark.age();
+  markOop   biased_prototype = markOop::biased_locking_prototype().set_age(age);
+  markOop unbiased_prototype = markOop::prototype().set_age(age);
 
   // Log at "info" level if not bulk, else "trace" level
   if (!is_bulk) {
@@ -179,9 +179,9 @@
                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
                             ", allow rebias %d, requesting thread " INTPTR_FORMAT,
                             p2i((void *)obj),
-                            (intptr_t) mark,
+                            (intptr_t) mark.value(),
                             obj->klass()->external_name(),
-                            (intptr_t) obj->klass()->prototype_header(),
+                            (intptr_t) obj->klass()->prototype_header().value(),
                             (allow_rebias ? 1 : 0),
                             (intptr_t) requesting_thread);
   } else {
@@ -190,14 +190,14 @@
                              INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
                              " , allow rebias %d , requesting thread " INTPTR_FORMAT,
                              p2i((void *)obj),
-                             (intptr_t) mark,
+                             (intptr_t) mark.value(),
                              obj->klass()->external_name(),
-                             (intptr_t) obj->klass()->prototype_header(),
+                             (intptr_t) obj->klass()->prototype_header().value(),
                              (allow_rebias ? 1 : 0),
                              (intptr_t) requesting_thread);
   }
 
-  JavaThread* biased_thread = mark->biased_locker();
+  JavaThread* biased_thread = mark.biased_locker();
   if (biased_thread == NULL) {
     // Object is anonymously biased. We can get here if, for
     // example, we revoke the bias due to an identity hash code
@@ -262,7 +262,7 @@
                                p2i((void *) mon_info->owner()),
                                p2i((void *) obj));
       // Assume recursive case and fix up highest lock later
-      markOop mark = markOopDesc::encode((BasicLock*) NULL);
+      markOop mark = markOop::encode((BasicLock*) NULL);
       highest_lock = mon_info->lock();
       highest_lock->set_displaced_header(mark);
     } else {
@@ -278,8 +278,8 @@
     // Reset object header to point to displaced mark.
     // Must release storing the lock address for platforms without TSO
     // ordering (e.g. ppc).
-    obj->release_set_mark(markOopDesc::encode(highest_lock));
-    assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
+    obj->release_set_mark(markOop::encode(highest_lock));
+    assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
     // Log at "info" level if not bulk, else "trace" level
     if (!is_bulk) {
       log_info(biasedlocking)("  Revoked bias of currently-locked object");
@@ -320,7 +320,7 @@
 
 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
   markOop mark = o->mark();
-  if (!mark->has_bias_pattern()) {
+  if (!mark.has_bias_pattern()) {
     return HR_NOT_BIASED;
   }
 
@@ -381,7 +381,7 @@
                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
                           (bulk_rebias ? "rebias" : "revoke"),
                           p2i((void *) o),
-                          (intptr_t) o->mark(),
+                          (intptr_t) o->mark().value(),
                           o->klass()->external_name());
 
   jlong cur_time = os::javaTimeMillis();
@@ -405,10 +405,10 @@
       // try to update the epoch -- assume another VM operation came in
       // and reset the header to the unbiased state, which will
       // implicitly cause all existing biases to be revoked
-      if (klass->prototype_header()->has_bias_pattern()) {
-        int prev_epoch = klass->prototype_header()->bias_epoch();
-        klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
-        int cur_epoch = klass->prototype_header()->bias_epoch();
+      if (klass->prototype_header().has_bias_pattern()) {
+        int prev_epoch = klass->prototype_header().bias_epoch();
+        klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
+        int cur_epoch = klass->prototype_header().bias_epoch();
 
         // Now walk all threads' stacks and adjust epochs of any biased
         // and locked objects of this data type we encounter
@@ -418,10 +418,10 @@
             MonitorInfo* mon_info = cached_monitor_info->at(i);
             oop owner = mon_info->owner();
             markOop mark = owner->mark();
-            if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+            if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
               // We might have encountered this object already in the case of recursive locking
-              assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
-              owner->set_mark(mark->set_bias_epoch(cur_epoch));
+              assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+              owner->set_mark(mark.set_bias_epoch(cur_epoch));
             }
           }
         }
@@ -429,7 +429,7 @@
 
       // At this point we're done. All we have to do is potentially
       // adjust the header of the given object to revoke its bias.
-      revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
+      revoke_bias(o, attempt_rebias_of_object && klass->prototype_header().has_bias_pattern(), true, requesting_thread, NULL);
     } else {
       if (log_is_enabled(Info, biasedlocking)) {
         ResourceMark rm;
@@ -440,7 +440,7 @@
       // cause future instances to not be biased, but existing biased
       // instances will notice that this implicitly caused their biases
       // to be revoked.
-      klass->set_prototype_header(markOopDesc::prototype());
+      klass->set_prototype_header(markOop::prototype());
 
       // Now walk all threads' stacks and forcibly revoke the biases of
       // any locked and biased objects of this data type we encounter.
@@ -450,7 +450,7 @@
           MonitorInfo* mon_info = cached_monitor_info->at(i);
           oop owner = mon_info->owner();
           markOop mark = owner->mark();
-          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
+          if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
             revoke_bias(owner, false, true, requesting_thread, NULL);
           }
         }
@@ -467,17 +467,17 @@
   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
 
   if (attempt_rebias_of_object &&
-      o->mark()->has_bias_pattern() &&
-      klass->prototype_header()->has_bias_pattern()) {
-    markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
-                                           klass->prototype_header()->bias_epoch());
+      o->mark().has_bias_pattern() &&
+      klass->prototype_header().has_bias_pattern()) {
+    markOop new_mark = markOop::encode(requesting_thread, o->mark().age(),
+                                           klass->prototype_header().bias_epoch());
     o->set_mark(new_mark);
     status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
     log_info(biasedlocking)("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
   }
 
-  assert(!o->mark()->has_bias_pattern() ||
-         (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
+  assert(!o->mark().has_bias_pattern() ||
+         (attempt_rebias_of_object && (o->mark().biased_locker() == requesting_thread)),
          "bug in bulk bias revocation");
 
   return status_code;
@@ -523,13 +523,13 @@
     // there is nothing to do and we avoid a safepoint.
     if (_obj != NULL) {
       markOop mark = (*_obj)()->mark();
-      if (mark->has_bias_pattern()) {
+      if (mark.has_bias_pattern()) {
         return true;
       }
     } else {
       for ( int i = 0 ; i < _objs->length(); i++ ) {
         markOop mark = (_objs->at(i))()->mark();
-        if (mark->has_bias_pattern()) {
+        if (mark.has_bias_pattern()) {
           return true;
         }
       }
@@ -629,7 +629,7 @@
   // update the heuristics because doing so may cause unwanted bulk
   // revocations (which are expensive) to occur.
   markOop mark = obj->mark();
-  if (mark->is_biased_anonymously() && !attempt_rebias) {
+  if (mark.is_biased_anonymously() && !attempt_rebias) {
     // We are probably trying to revoke the bias of this object due to
     // an identity hash code computation. Try to revoke the bias
     // without a safepoint. This is possible if we can successfully
@@ -637,15 +637,15 @@
     // the object, meaning that no other thread has raced to acquire
     // the bias of the object.
     markOop biased_value       = mark;
-    markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+    markOop unbiased_prototype = markOop::prototype().set_age(mark.age());
     markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
     if (res_mark == biased_value) {
       return BIAS_REVOKED;
     }
-  } else if (mark->has_bias_pattern()) {
+  } else if (mark.has_bias_pattern()) {
     Klass* k = obj->klass();
     markOop prototype_header = k->prototype_header();
-    if (!prototype_header->has_bias_pattern()) {
+    if (!prototype_header.has_bias_pattern()) {
       // This object has a stale bias from before the bulk revocation
       // for this data type occurred. It's pointless to update the
       // heuristics at this point so simply update the header with a
@@ -654,9 +654,9 @@
       // with it.
       markOop biased_value       = mark;
       markOop res_mark = obj->cas_set_mark(prototype_header, mark);
-      assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
+      assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
       return BIAS_REVOKED;
-    } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
+    } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
       // The epoch of this biasing has expired indicating that the
       // object is effectively unbiased. Depending on whether we need
       // to rebias or revoke the bias of this object we can do it
@@ -667,14 +667,14 @@
       if (attempt_rebias) {
         assert(THREAD->is_Java_thread(), "");
         markOop biased_value       = mark;
-        markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
+        markOop rebiased_prototype = markOop::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
         markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark);
         if (res_mark == biased_value) {
           return BIAS_REVOKED_AND_REBIASED;
         }
       } else {
         markOop biased_value       = mark;
-        markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+        markOop unbiased_prototype = markOop::prototype().set_age(mark.age());
         markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
         if (res_mark == biased_value) {
           return BIAS_REVOKED;
@@ -689,8 +689,8 @@
   } else if (heuristics == HR_SINGLE_REVOKE) {
     Klass *k = obj->klass();
     markOop prototype_header = k->prototype_header();
-    if (mark->biased_locker() == THREAD &&
-        prototype_header->bias_epoch() == mark->bias_epoch()) {
+    if (mark.biased_locker() == THREAD &&
+        prototype_header.bias_epoch() == mark.bias_epoch()) {
       // A thread is trying to revoke the bias of an object biased
       // toward it, again likely due to an identity hash code
       // computation. We can again avoid a safepoint in this case
@@ -814,7 +814,7 @@
             oop owner = mon_info->owner();
             if (owner != NULL) {
               markOop mark = owner->mark();
-              if (mark->has_bias_pattern()) {
+              if (mark.has_bias_pattern()) {
                 _preserved_oop_stack->push(Handle(cur, owner));
                 _preserved_mark_stack->push(mark);
               }
diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp
index c87fd02..e3bdf6f 100644
--- a/src/hotspot/share/runtime/deoptimization.cpp
+++ b/src/hotspot/share/runtime/deoptimization.cpp
@@ -1106,14 +1106,14 @@
       if (!mon_info->owner_is_scalar_replaced()) {
         Handle obj(thread, mon_info->owner());
         markOop mark = obj->mark();
-        if (UseBiasedLocking && mark->has_bias_pattern()) {
+        if (UseBiasedLocking && mark.has_bias_pattern()) {
           // New allocated objects may have the mark set to anonymously biased.
           // Also the deoptimized method may called methods with synchronization
           // where the thread-local object is bias locked to the current thread.
-          assert(mark->is_biased_anonymously() ||
-                 mark->biased_locker() == thread, "should be locked to current thread");
+          assert(mark.is_biased_anonymously() ||
+                 mark.biased_locker() == thread, "should be locked to current thread");
           // Reset mark word to unbiased prototype.
-          markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+          markOop unbiased_prototype = markOop::prototype().set_age(mark.age());
           obj->set_mark(unbiased_prototype);
         }
         BasicLock* lock = mon_info->lock();
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 3d2e4e6..d3d1941 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -2586,7 +2586,7 @@
   product(uintx, StringDeduplicationAgeThreshold, 3,                        \
           "A string must reach this age (or be promoted to an old region) " \
           "to be considered for deduplication")                             \
-          range(1, markOopDesc::max_age)                                    \
+          range(1, markOop::max_age)                                        \
                                                                             \
   diagnostic(bool, StringDeduplicationResizeALot, false,                    \
           "Force table resize every time the table is scanned")             \
diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp
index a9690c2..e9f7f49 100644
--- a/src/hotspot/share/runtime/objectMonitor.cpp
+++ b/src/hotspot/share/runtime/objectMonitor.cpp
@@ -302,7 +302,7 @@
   if (Knob_SpinEarly && TrySpin (Self) > 0) {
     assert(_owner == Self, "invariant");
     assert(_recursions == 0, "invariant");
-    assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+    assert(((oop)(object()))->mark() == markOop::encode(this), "invariant");
     Self->_Stalled = 0;
     return;
   }
@@ -387,7 +387,7 @@
   assert(_recursions == 0, "invariant");
   assert(_owner == Self, "invariant");
   assert(_succ != Self, "invariant");
-  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  assert(((oop)(object()))->mark() == markOop::encode(this), "invariant");
 
   // The thread -- now the owner -- is back in vm mode.
   // Report the glorious news via TI,DTrace and jvmstat.
@@ -621,7 +621,7 @@
   assert(_owner == Self, "invariant");
   assert(object() != NULL, "invariant");
   // I'd like to write:
-  //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
+  //   guarantee (((oop)(object()))->mark() == markOop::encode(this), "invariant") ;
   // but as we're at a safepoint that's not safe.
 
   UnlinkAfterAcquire(Self, &node);
@@ -694,7 +694,7 @@
   assert(SelfNode != NULL, "invariant");
   assert(SelfNode->_thread == Self, "invariant");
   assert(_waiters > 0, "invariant");
-  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  assert(((oop)(object()))->mark() == markOop::encode(this), "invariant");
   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
   JavaThread * jt = (JavaThread *) Self;
 
@@ -769,7 +769,7 @@
   // In addition, Self.TState is stable.
 
   assert(_owner == Self, "invariant");
-  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  assert(((oop)(object()))->mark() == markOop::encode(this), "invariant");
   UnlinkAfterAcquire(Self, SelfNode);
   if (_succ == Self) _succ = NULL;
   assert(_succ != Self, "invariant");
@@ -1620,7 +1620,7 @@
   // Verify a few postconditions
   assert(_owner == Self, "invariant");
   assert(_succ != Self, "invariant");
-  assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
+  assert(((oop)(object()))->mark() == markOop::encode(this), "invariant");
 
   if (SyncFlags & 32) {
     OrderAccess::fence();
diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp
index d73c8a9..d122382 100644
--- a/src/hotspot/share/runtime/objectMonitor.hpp
+++ b/src/hotspot/share/runtime/objectMonitor.hpp
@@ -27,6 +27,7 @@
 
 #include "memory/allocation.hpp"
 #include "memory/padded.hpp"
+#include "oops/markOop.hpp"
 #include "runtime/os.hpp"
 #include "runtime/park.hpp"
 #include "runtime/perfData.hpp"
@@ -218,7 +219,7 @@
   static int succ_offset_in_bytes()        { return offset_of(ObjectMonitor, _succ); }
   static int EntryList_offset_in_bytes()   { return offset_of(ObjectMonitor, _EntryList); }
 
-  // ObjectMonitor references can be ORed with markOopDesc::monitor_value
+  // ObjectMonitor references can be ORed with markOop::monitor_value
   // as part of the ObjectMonitor tagging mechanism. When we combine an
   // ObjectMonitor reference with an offset, we need to remove the tag
   // value in order to generate the proper address.
@@ -230,7 +231,7 @@
   // to the ObjectMonitor reference manipulation code:
   //
   #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
-    ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
+    ((ObjectMonitor::f ## _offset_in_bytes()) - markOop::monitor_value)
 
   markOop   header() const;
   volatile markOop* header_addr();
diff --git a/src/hotspot/share/runtime/objectMonitor.inline.hpp b/src/hotspot/share/runtime/objectMonitor.inline.hpp
index 951d647..abb8cc5 100644
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_RUNTIME_OBJECTMONITOR_INLINE_HPP
 #define SHARE_VM_RUNTIME_OBJECTMONITOR_INLINE_HPP
 
+#include "runtime/atomic.hpp"
+
 inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
   if (THREAD == _owner || THREAD->is_lock_owned((address) _owner)) {
     return 1;
@@ -33,7 +35,7 @@
 }
 
 inline markOop ObjectMonitor::header() const {
-  return _header;
+  return Atomic::load(&_header);
 }
 
 inline volatile markOop* ObjectMonitor::header_addr() {
@@ -42,7 +44,7 @@
 }
 
 inline void ObjectMonitor::set_header(markOop hdr) {
-  _header = hdr;
+  Atomic::store(hdr, &_header);
 }
 
 inline jint ObjectMonitor::count() const {
@@ -58,14 +60,14 @@
 }
 
 inline void ObjectMonitor::clear() {
-  assert(_header, "Fatal logic error in ObjectMonitor header!");
+  assert(Atomic::load(&_header).value() != 0, "Fatal logic error in ObjectMonitor header!");
   assert(_count == 0, "Fatal logic error in ObjectMonitor count!");
   assert(_waiters == 0, "Fatal logic error in ObjectMonitor waiters!");
   assert(_recursions == 0, "Fatal logic error in ObjectMonitor recursions!");
   assert(_object != NULL, "Fatal logic error in ObjectMonitor object!");
   assert(_owner == 0, "Fatal logic error in ObjectMonitor owner!");
 
-  _header = NULL;
+  Atomic::store(markOop::zero(), &_header);
   _object = NULL;
 }
 
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index 595ff74..cee41fc 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -3080,10 +3080,10 @@
     if (kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
       BasicLock *lock = kptr2->lock();
       // Inflate so the displaced header becomes position-independent
-      if (lock->displaced_header()->is_unlocked())
+      if (lock->displaced_header().is_unlocked())
         ObjectSynchronizer::inflate_helper(kptr2->obj());
       // Now the displaced header is free to move
-      buf[i++] = (intptr_t)lock->displaced_header();
+      buf[i++] = (intptr_t)lock->displaced_header().value();
       buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
     }
   }
diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp
index e7b3272..71fc794 100644
--- a/src/hotspot/share/runtime/synchronizer.cpp
+++ b/src/hotspot/share/runtime/synchronizer.cpp
@@ -160,14 +160,14 @@
   if (obj == NULL) return false;  // slow-path for invalid obj
   const markOop mark = obj->mark();
 
-  if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
+  if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) {
     // Degenerate notify
     // stack-locked by caller so by definition the implied waitset is empty.
     return true;
   }
 
-  if (mark->has_monitor()) {
-    ObjectMonitor * const mon = mark->monitor();
+  if (mark.has_monitor()) {
+    ObjectMonitor * const mon = mark.monitor();
     assert(mon->object() == obj, "invariant");
     if (mon->owner() != self) return false;  // slow-path for IMS exception
 
@@ -210,8 +210,8 @@
   if (obj == NULL) return false;       // Need to throw NPE
   const markOop mark = obj->mark();
 
-  if (mark->has_monitor()) {
-    ObjectMonitor * const m = mark->monitor();
+  if (mark.has_monitor()) {
+    ObjectMonitor * const m = mark.monitor();
     assert(m->object() == obj, "invariant");
     Thread * const owner = (Thread *) m->_owner;
 
@@ -235,7 +235,7 @@
     // stack-locking in the object's header, the third check is for
     // recursive stack-locking in the displaced header in the BasicLock,
     // and last are the inflated Java Monitor (ObjectMonitor) checks.
-    lock->set_displaced_header(markOopDesc::unused_mark());
+    lock->set_displaced_header(markOop::unused_mark());
 
     if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
       assert(m->_recursions == 0, "invariant");
@@ -273,7 +273,7 @@
       assert(!attempt_rebias, "can not rebias toward VM thread");
       BiasedLocking::revoke_at_safepoint(obj);
     }
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   slow_enter(obj, lock, THREAD);
@@ -282,22 +282,22 @@
 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
   markOop mark = object->mark();
   // We cannot check for Biased Locking if we are racing an inflation.
-  assert(mark == markOopDesc::INFLATING() ||
-         !mark->has_bias_pattern(), "should not see bias pattern here");
+  assert(mark == markOop::INFLATING() ||
+         !mark.has_bias_pattern(), "should not see bias pattern here");
 
   markOop dhw = lock->displaced_header();
-  if (dhw == NULL) {
+  if (dhw.value() == 0) {
     // If the displaced header is NULL, then this exit matches up with
     // a recursive enter. No real work to do here except for diagnostics.
 #ifndef PRODUCT
-    if (mark != markOopDesc::INFLATING()) {
+    if (mark != markOop::INFLATING()) {
       // Only do diagnostics if we are not racing an inflation. Simply
       // exiting a recursive enter of a Java Monitor that is being
       // inflated is safe; see the has_monitor() comment below.
-      assert(!mark->is_neutral(), "invariant");
-      assert(!mark->has_locker() ||
-             THREAD->is_lock_owned((address)mark->locker()), "invariant");
-      if (mark->has_monitor()) {
+      assert(!mark.is_neutral(), "invariant");
+      assert(!mark.has_locker() ||
+             THREAD->is_lock_owned((address)mark.locker()), "invariant");
+      if (mark.has_monitor()) {
         // The BasicLock's displaced_header is marked as a recursive
         // enter and we have an inflated Java Monitor (ObjectMonitor).
         // This is a special case where the Java Monitor was inflated
@@ -306,7 +306,7 @@
         // Monitor owner's stack and update the BasicLocks because a
         // Java Monitor can be asynchronously inflated by a thread that
         // does not own the Java Monitor.
-        ObjectMonitor * m = mark->monitor();
+        ObjectMonitor * m = mark.monitor();
         assert(((oop)(m->object()))->mark() == mark, "invariant");
         assert(m->is_entered(THREAD), "invariant");
       }
@@ -315,10 +315,10 @@
     return;
   }
 
-  if (mark == (markOop) lock) {
+  if (mark == markOop::from_pointer(lock)) {
     // If the object is stack-locked by the current thread, try to
     // swing the displaced header from the BasicLock back to the mark.
-    assert(dhw->is_neutral(), "invariant");
+    assert(dhw.is_neutral(), "invariant");
     if (object->cas_set_mark(dhw, mark) == mark) {
       TEVENT(fast_exit: release stack-lock);
       return;
@@ -338,22 +338,22 @@
 // failed in the interpreter/compiler code.
 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
   markOop mark = obj->mark();
-  assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+  assert(!mark.has_bias_pattern(), "should not see bias pattern here");
 
-  if (mark->is_neutral()) {
+  if (mark.is_neutral()) {
     // Anticipate successful CAS -- the ST of the displaced mark must
     // be visible <= the ST performed by the CAS.
     lock->set_displaced_header(mark);
-    if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
+    if (mark == obj()->cas_set_mark(markOop::from_pointer(lock), mark)) {
       TEVENT(slow_enter: release stacklock);
       return;
     }
     // Fall through to inflate() ...
-  } else if (mark->has_locker() &&
-             THREAD->is_lock_owned((address)mark->locker())) {
-    assert(lock != mark->locker(), "must not re-lock the same lock");
-    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
-    lock->set_displaced_header(NULL);
+  } else if (mark.has_locker() &&
+             THREAD->is_lock_owned((address)mark.locker())) {
+    assert(lock != mark.locker(), "must not re-lock the same lock");
+    assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
+    lock->set_displaced_header(markOop::from_pointer(NULL));
     return;
   }
 
@@ -361,7 +361,7 @@
   // so it does not matter what the value is, except that it
   // must be non-zero to avoid looking like a re-entrant lock,
   // and must not look locked either.
-  lock->set_displaced_header(markOopDesc::unused_mark());
+  lock->set_displaced_header(markOop::unused_mark());
   ObjectSynchronizer::inflate(THREAD,
                               obj(),
                               inflate_cause_monitor_enter)->enter(THREAD);
@@ -391,7 +391,7 @@
   TEVENT(complete_exit);
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
@@ -406,7 +406,7 @@
   TEVENT(reenter);
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
@@ -423,7 +423,7 @@
   TEVENT(jni_enter);
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
   THREAD->set_current_pending_monitor_is_from_java(false);
   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
@@ -438,7 +438,7 @@
     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
     obj = h_obj();
   }
-  assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+  assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
 
   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
                                                        obj,
@@ -479,7 +479,7 @@
 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
   if (millis < 0) {
     TEVENT(wait - throw IAX);
@@ -502,7 +502,7 @@
 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
   if (millis < 0) {
     TEVENT(wait - throw IAX);
@@ -516,11 +516,11 @@
 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   markOop mark = obj->mark();
-  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+  if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
     return;
   }
   ObjectSynchronizer::inflate(THREAD,
@@ -532,11 +532,11 @@
 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   markOop mark = obj->mark();
-  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
+  if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
     return;
   }
   ObjectSynchronizer::inflate(THREAD,
@@ -583,14 +583,14 @@
 
 static markOop ReadStableMark(oop obj) {
   markOop mark = obj->mark();
-  if (!mark->is_being_inflated()) {
+  if (!mark.is_being_inflated()) {
     return mark;       // normal fast-path return
   }
 
   int its = 0;
   for (;;) {
     markOop mark = obj->mark();
-    if (!mark->is_being_inflated()) {
+    if (!mark.is_being_inflated()) {
       return mark;    // normal fast-path return
     }
 
@@ -630,7 +630,7 @@
         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
         Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
-        while (obj->mark() == markOopDesc::INFLATING()) {
+        while (obj->mark() == markOop::INFLATING()) {
           // Beware: NakedYield() is advisory and has almost no effect on some platforms
           // so we periodically call Self->_ParkEvent->park(1).
           // We use a mixed spin/yield/block mechanism.
@@ -700,9 +700,9 @@
     value = v;
   }
 
-  value &= markOopDesc::hash_mask;
+  value &= markOop::hash_mask;
   if (value == 0) value = 0xBAD;
-  assert(value != markOopDesc::no_hash, "invariant");
+  assert(value != markOop::no_hash, "invariant");
   TEVENT(hashCode: GENERATE);
   return value;
 }
@@ -716,7 +716,7 @@
     // been checked to make sure they can handle a safepoint. The
     // added check of the bias pattern is to avoid useless calls to
     // thread-local storage.
-    if (obj->mark()->has_bias_pattern()) {
+    if (obj->mark().has_bias_pattern()) {
       // Handle for oop obj in case of STW safepoint
       Handle hobj(Self, obj);
       // Relaxing assertion for bug 6320749.
@@ -725,7 +725,7 @@
              "biases should not be seen by VM thread here");
       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
       obj = hobj();
-      assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+      assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
     }
   }
 
@@ -744,15 +744,15 @@
   markOop mark = ReadStableMark(obj);
 
   // object should remain ineligible for biased locking
-  assert(!mark->has_bias_pattern(), "invariant");
+  assert(!mark.has_bias_pattern(), "invariant");
 
-  if (mark->is_neutral()) {
-    hash = mark->hash();              // this is a normal header
+  if (mark.is_neutral()) {
+    hash = mark.hash();              // this is a normal header
     if (hash) {                       // if it has hash, just return it
       return hash;
     }
     hash = get_next_hash(Self, obj);  // allocate a new hash code
-    temp = mark->copy_set_hash(hash); // merge the hash code into header
+    temp = mark.copy_set_hash(hash); // merge the hash code into header
     // use (machine word version) atomic operation to install the hash
     test = obj->cas_set_mark(temp, mark);
     if (test == mark) {
@@ -761,19 +761,19 @@
     // If atomic operation failed, we must inflate the header
     // into heavy weight monitor. We could add more code here
     // for fast path, but it does not worth the complexity.
-  } else if (mark->has_monitor()) {
-    monitor = mark->monitor();
+  } else if (mark.has_monitor()) {
+    monitor = mark.monitor();
     temp = monitor->header();
-    assert(temp->is_neutral(), "invariant");
-    hash = temp->hash();
+    assert(temp.is_neutral(), "invariant");
+    hash = temp.hash();
     if (hash) {
       return hash;
     }
     // Skip to the following code to reduce code size
-  } else if (Self->is_lock_owned((address)mark->locker())) {
-    temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
-    assert(temp->is_neutral(), "invariant");
-    hash = temp->hash();              // by current thread, check if the displaced
+  } else if (Self->is_lock_owned((address)mark.locker())) {
+    temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
+    assert(temp.is_neutral(), "invariant");
+    hash = temp.hash();              // by current thread, check if the displaced
     if (hash) {                       // header contains hash code
       return hash;
     }
@@ -792,19 +792,19 @@
   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
   // Load displaced header and check it has hash code
   mark = monitor->header();
-  assert(mark->is_neutral(), "invariant");
-  hash = mark->hash();
+  assert(mark.is_neutral(), "invariant");
+  hash = mark.hash();
   if (hash == 0) {
     hash = get_next_hash(Self, obj);
-    temp = mark->copy_set_hash(hash); // merge hash code into header
+    temp = mark.copy_set_hash(hash); // merge hash code into header
     assert(temp->is_neutral(), "invariant");
     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
     if (test != mark) {
       // The only update to the header in the monitor (outside GC)
       // is install the hash code. If someone add new usage of
       // displaced header, please update this code
-      hash = test->hash();
-      assert(test->is_neutral(), "invariant");
+      hash = test.hash();
+      assert(test.is_neutral(), "invariant");
       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
     }
   }
@@ -823,7 +823,7 @@
                                                    Handle h_obj) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
-    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   assert(thread == JavaThread::current(), "Can only be called on current thread");
@@ -832,16 +832,16 @@
   markOop mark = ReadStableMark(obj);
 
   // Uncontended case, header points to stack
-  if (mark->has_locker()) {
-    return thread->is_lock_owned((address)mark->locker());
+  if (mark.has_locker()) {
+    return thread->is_lock_owned((address)mark.locker());
   }
   // Contended case, header points to ObjectMonitor (tagged pointer)
-  if (mark->has_monitor()) {
-    ObjectMonitor* monitor = mark->monitor();
+  if (mark.has_monitor()) {
+    ObjectMonitor* monitor = mark.monitor();
     return monitor->is_entered(thread) != 0;
   }
   // Unlocked case, header in place
-  assert(mark->is_neutral(), "sanity check");
+  assert(mark.is_neutral(), "sanity check");
   return false;
 }
 
@@ -859,10 +859,10 @@
 
   // Possible mark states: neutral, biased, stack-locked, inflated
 
-  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+  if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
     // CASE: biased
     BiasedLocking::revoke_and_rebias(h_obj, false, self);
-    assert(!h_obj->mark()->has_bias_pattern(),
+    assert(!h_obj->mark().has_bias_pattern(),
            "biases should be revoked by now");
   }
 
@@ -871,23 +871,23 @@
   markOop mark = ReadStableMark(obj);
 
   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
-  if (mark->has_locker()) {
-    return self->is_lock_owned((address)mark->locker()) ?
+  if (mark.has_locker()) {
+    return self->is_lock_owned((address)mark.locker()) ?
       owner_self : owner_other;
   }
 
   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
   // The Object:ObjectMonitor relationship is stable as long as we're
   // not at a safepoint.
-  if (mark->has_monitor()) {
-    void * owner = mark->monitor()->_owner;
+  if (mark.has_monitor()) {
+    void * owner = mark.monitor()->_owner;
     if (owner == NULL) return owner_none;
     return (owner == self ||
             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
   }
 
   // CASE: neutral
-  assert(mark->is_neutral(), "sanity check");
+  assert(mark.is_neutral(), "sanity check");
   return owner_none;           // it's unlocked
 }
 
@@ -899,7 +899,7 @@
     } else {
       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
     }
-    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+    assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
   }
 
   oop obj = h_obj();
@@ -908,13 +908,13 @@
   markOop mark = ReadStableMark(obj);
 
   // Uncontended case, header points to stack
-  if (mark->has_locker()) {
-    owner = (address) mark->locker();
+  if (mark.has_locker()) {
+    owner = (address) mark.locker();
   }
 
   // Contended case, header points to ObjectMonitor (tagged pointer)
-  if (mark->has_monitor()) {
-    ObjectMonitor* monitor = mark->monitor();
+  if (mark.has_monitor()) {
+    ObjectMonitor* monitor = mark.monitor();
     assert(monitor != NULL, "monitor should be non-null");
     owner = (address) monitor->owner();
   }
@@ -927,7 +927,7 @@
   // Unlocked case, header in place
   // Cannot have assertion since this object may have been
   // locked by another thread when reaching here.
-  // assert(mark->is_neutral(), "sanity check");
+  // assert(mark.is_neutral(), "sanity check");
 
   return NULL;
 }
@@ -1374,10 +1374,10 @@
 // Fast path code shared by multiple functions
 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
   markOop mark = obj->mark();
-  if (mark->has_monitor()) {
-    assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
-    assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
-    return mark->monitor();
+  if (mark.has_monitor()) {
+    assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
+    assert(mark.monitor()->header()->is_neutral(), "monitor must record a good object header");
+    return mark.monitor();
   }
   return ObjectSynchronizer::inflate(Thread::current(),
                                      obj,
@@ -1397,7 +1397,7 @@
 
   for (;;) {
     const markOop mark = object->mark();
-    assert(!mark->has_bias_pattern(), "invariant");
+    assert(!mark.has_bias_pattern(), "invariant");
 
     // The mark can be in one of the following states:
     // *  Inflated     - just return
@@ -1407,8 +1407,8 @@
     // *  BIASED       - Illegal.  We should never see this
 
     // CASE: inflated
-    if (mark->has_monitor()) {
-      ObjectMonitor * inf = mark->monitor();
+    if (mark.has_monitor()) {
+      ObjectMonitor * inf = mark.monitor();
       assert(inf->header()->is_neutral(), "invariant");
       assert(inf->object() == object, "invariant");
       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
@@ -1421,7 +1421,7 @@
     // The INFLATING value is transient.
     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
     // We could always eliminate polling by parking the thread on some auxiliary list.
-    if (mark == markOopDesc::INFLATING()) {
+    if (mark == markOop::INFLATING()) {
       TEVENT(Inflate: spin while INFLATING);
       ReadStableMark(object);
       continue;
@@ -1446,7 +1446,7 @@
     // before or after the CAS(INFLATING) operation.
     // See the comments in omAlloc().
 
-    if (mark->has_locker()) {
+    if (mark.has_locker()) {
       ObjectMonitor * m = omAlloc(Self);
       // Optimistically prepare the objectmonitor - anticipate successful CAS
       // We do this before the CAS in order to minimize the length of time
@@ -1456,7 +1456,7 @@
       m->_recursions   = 0;
       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
 
-      markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
+      markOop cmp = object->cas_set_mark(markOop::INFLATING(), mark);
       if (cmp != mark) {
         omRelease(Self, m, true);
         continue;       // Interference -- just retry
@@ -1484,7 +1484,7 @@
       // drop the lock (restoring the header from the basiclock to the object)
       // while inflation is in-progress.  This protocol avoids races that might
       // would otherwise permit hashCode values to change or "flicker" for an object.
-      // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
+      // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
       // 0 serves as a "BUSY" inflate-in-progress indicator.
 
 
@@ -1492,25 +1492,25 @@
       // The owner can't die or unwind past the lock while our INFLATING
       // object is in the mark.  Furthermore the owner can't complete
       // an unlock on the object, either.
-      markOop dmw = mark->displaced_mark_helper();
+      markOop dmw = mark.displaced_mark_helper();
       assert(dmw->is_neutral(), "invariant");
 
       // Setup monitor fields to proper values -- prepare the monitor
       m->set_header(dmw);
 
-      // Optimization: if the mark->locker stack address is associated
+      // Optimization: if the mark.locker stack address is associated
       // with this thread we could simply set m->_owner = Self.
       // Note that a thread can inflate an object
       // that it has stack-locked -- as might happen in wait() -- directly
       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
-      m->set_owner(mark->locker());
+      m->set_owner(mark.locker());
       m->set_object(object);
       // TODO-FIXME: assert BasicLock->dhw != 0.
 
       // Must preserve store ordering. The monitor state must
       // be stable at the time of publishing the monitor address.
-      guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
-      object->release_set_mark(markOopDesc::encode(m));
+      guarantee(object->mark() == markOop::INFLATING(), "invariant");
+      object->release_set_mark(markOop::encode(m));
 
       // Hopefully the performance counters are allocated on distinct cache lines
       // to avoid false sharing on MP systems ...
@@ -1520,7 +1520,7 @@
         if (object->is_instance()) {
           ResourceMark rm;
           log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                                      p2i(object), p2i(object->mark()),
+                                      p2i(object), object->mark().value(),
                                       object->klass()->external_name());
         }
       }
@@ -1540,7 +1540,7 @@
     // An inflateTry() method that we could call from fast_enter() and slow_enter()
     // would be useful.
 
-    assert(mark->is_neutral(), "invariant");
+    assert(mark.is_neutral(), "invariant");
     ObjectMonitor * m = omAlloc(Self);
     // prepare m for installation - set monitor to initial state
     m->Recycle();
@@ -1551,7 +1551,7 @@
     m->_Responsible  = NULL;
     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
 
-    if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
+    if (object->cas_set_mark(markOop::encode(m), mark) != mark) {
       m->set_object(NULL);
       m->set_owner(NULL);
       m->Recycle();
@@ -1571,7 +1571,7 @@
       if (object->is_instance()) {
         ResourceMark rm;
         log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
-                                    p2i(object), p2i(object->mark()),
+                                    p2i(object), object->mark().value(),
                                     object->klass()->external_name());
       }
     }
@@ -1622,9 +1622,9 @@
                                          ObjectMonitor** freeTailp) {
   bool deflated;
   // Normal case ... The monitor is associated with obj.
-  guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
-  guarantee(mid == obj->mark()->monitor(), "invariant");
-  guarantee(mid->header()->is_neutral(), "invariant");
+  guarantee(obj->mark() == markOop::encode(mid), "invariant");
+  guarantee(mid == obj->mark().monitor(), "invariant");
+  guarantee(mid->header().is_neutral(), "invariant");
 
   if (mid->is_busy()) {
     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
@@ -1639,7 +1639,7 @@
         ResourceMark rm;
         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
                                     "mark " INTPTR_FORMAT " , type %s",
-                                    p2i(obj), p2i(obj->mark()),
+                                    p2i(obj), obj->mark().value(),
                                     obj->klass()->external_name());
       }
     }
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index 159e877..721e640 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -177,7 +177,7 @@
 // Support for forcing alignment of thread objects for biased locking
 void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
   if (UseBiasedLocking) {
-    const int alignment = markOopDesc::biased_lock_alignment;
+    const int alignment = markOop::biased_lock_alignment;
     size_t aligned_size = size + (alignment - sizeof(intptr_t));
     void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
                                           : AllocateHeap(aligned_size, flags, CURRENT_PC,
@@ -304,9 +304,9 @@
 #endif // CHECK_UNHANDLED_OOPS
 #ifdef ASSERT
   if (UseBiasedLocking) {
-    assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
+    assert((((uintptr_t) this) & (markOop::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
     assert(this == _real_malloc_address ||
-           this == align_up(_real_malloc_address, (int)markOopDesc::biased_lock_alignment),
+           this == align_up(_real_malloc_address, (int)markOop::biased_lock_alignment),
            "bug in forced alignment of thread objects");
   }
 #endif // ASSERT
diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp
index cca3642..7dfb2e4 100644
--- a/src/hotspot/share/runtime/vframe.cpp
+++ b/src/hotspot/share/runtime/vframe.cpp
@@ -222,7 +222,7 @@
       if (monitor->owner() != NULL) {
         // the monitor is associated with an object, i.e., it is locked
 
-        markOop mark = NULL;
+        markOop mark = markOop::zero();
         const char *lock_state = "locked"; // assume we have the monitor locked
         if (!found_first_monitor && frame_count == 0) {
           // If this is the first frame and we haven't found an owned
@@ -231,17 +231,17 @@
           // an inflated monitor that is first on the monitor list in
           // the first frame can block us on a monitor enter.
           mark = monitor->owner()->mark();
-          if (mark->has_monitor() &&
+          if (mark.has_monitor() &&
               ( // we have marked ourself as pending on this monitor
-                mark->monitor() == thread()->current_pending_monitor() ||
+                mark.monitor() == thread()->current_pending_monitor() ||
                 // we are not the owner of this monitor
-                !mark->monitor()->is_entered(thread())
+                !mark.monitor()->is_entered(thread())
               )) {
             lock_state = "waiting to lock";
           } else {
             // We own the monitor which is not as interesting so
             // disable the extra printing below.
-            mark = NULL;
+            mark = markOop::zero();
           }
         } else if (frame_count != 0) {
           // This is not the first frame so we either own this monitor
@@ -250,23 +250,23 @@
           // numbered frame on the stack, we have to check all the
           // monitors on the list for this frame.
           mark = monitor->owner()->mark();
-          if (mark->has_monitor() &&
+          if (mark.has_monitor() &&
               ( // we have marked ourself as pending on this monitor
-                mark->monitor() == thread()->current_pending_monitor() ||
+                mark.monitor() == thread()->current_pending_monitor() ||
                 // we are not the owner of this monitor
-                !mark->monitor()->is_entered(thread())
+                !mark.monitor()->is_entered(thread())
               )) {
             lock_state = "waiting to re-lock in wait()";
           } else {
             // We own the monitor which is not as interesting so
             // disable the extra printing below.
-            mark = NULL;
+            mark = markOop::zero();
           }
         }
         print_locked_object_class_name(st, Handle(THREAD, monitor->owner()), lock_state);
-        if (ObjectMonitor::Knob_Verbose && mark != NULL) {
+        if (ObjectMonitor::Knob_Verbose && mark.to_pointer() != NULL) {
           st->print("\t- lockbits=");
-          mark->print_on(st);
+          mark.print_on(st);
           st->cr();
         }
 
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index 8553016..9523c13 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -1285,7 +1285,6 @@
     declare_type(arrayOopDesc, oopDesc)                                   \
       declare_type(objArrayOopDesc, arrayOopDesc)                         \
     declare_type(instanceOopDesc, oopDesc)                                \
-    declare_type(markOopDesc, oopDesc)                                    \
                                                                           \
   /**************************************************/                    \
   /* MetadataOopDesc hierarchy (NOTE: some missing) */                    \
@@ -1325,7 +1324,6 @@
   /* Oops */                                                              \
   /********/                                                              \
                                                                           \
-  declare_oop_type(markOop)                                               \
   declare_oop_type(objArrayOop)                                           \
   declare_oop_type(oop)                                                   \
   declare_oop_type(narrowOop)                                             \
@@ -1981,6 +1979,7 @@
             declare_type(BitMapView, BitMap)                              \
                                                                           \
    declare_integer_type(AccessFlags)  /* FIXME: wrong type (not integer) */\
+   declare_integer_type(markOop)                                          \
   declare_toplevel_type(address)      /* FIXME: should this be an integer type? */\
    declare_integer_type(BasicType)   /* FIXME: wrong type (not integer) */\
   JVMTI_ONLY(declare_toplevel_type(BreakpointInfo))                       \
@@ -2642,45 +2641,45 @@
   /* consistency. The mask constants are the only ones requiring */       \
   /* 64 bits (on 64-bit platforms). */                                    \
                                                                           \
-  declare_constant(markOopDesc::age_bits)                                 \
-  declare_constant(markOopDesc::lock_bits)                                \
-  declare_constant(markOopDesc::biased_lock_bits)                         \
-  declare_constant(markOopDesc::max_hash_bits)                            \
-  declare_constant(markOopDesc::hash_bits)                                \
+  declare_constant(markOop::age_bits)                                 \
+  declare_constant(markOop::lock_bits)                                \
+  declare_constant(markOop::biased_lock_bits)                         \
+  declare_constant(markOop::max_hash_bits)                            \
+  declare_constant(markOop::hash_bits)                                \
                                                                           \
-  declare_constant(markOopDesc::lock_shift)                               \
-  declare_constant(markOopDesc::biased_lock_shift)                        \
-  declare_constant(markOopDesc::age_shift)                                \
-  declare_constant(markOopDesc::hash_shift)                               \
+  declare_constant(markOop::lock_shift)                               \
+  declare_constant(markOop::biased_lock_shift)                        \
+  declare_constant(markOop::age_shift)                                \
+  declare_constant(markOop::hash_shift)                               \
                                                                           \
-  declare_constant(markOopDesc::lock_mask)                                \
-  declare_constant(markOopDesc::lock_mask_in_place)                       \
-  declare_constant(markOopDesc::biased_lock_mask)                         \
-  declare_constant(markOopDesc::biased_lock_mask_in_place)                \
-  declare_constant(markOopDesc::biased_lock_bit_in_place)                 \
-  declare_constant(markOopDesc::age_mask)                                 \
-  declare_constant(markOopDesc::age_mask_in_place)                        \
-  declare_constant(markOopDesc::epoch_mask)                               \
-  declare_constant(markOopDesc::epoch_mask_in_place)                      \
-  declare_constant(markOopDesc::hash_mask)                                \
-  declare_constant(markOopDesc::hash_mask_in_place)                       \
-  declare_constant(markOopDesc::biased_lock_alignment)                    \
+  declare_constant(markOop::lock_mask)                                \
+  declare_constant(markOop::lock_mask_in_place)                       \
+  declare_constant(markOop::biased_lock_mask)                         \
+  declare_constant(markOop::biased_lock_mask_in_place)                \
+  declare_constant(markOop::biased_lock_bit_in_place)                 \
+  declare_constant(markOop::age_mask)                                 \
+  declare_constant(markOop::age_mask_in_place)                        \
+  declare_constant(markOop::epoch_mask)                               \
+  declare_constant(markOop::epoch_mask_in_place)                      \
+  declare_constant(markOop::hash_mask)                                \
+  declare_constant(markOop::hash_mask_in_place)                       \
+  declare_constant(markOop::biased_lock_alignment)                    \
                                                                           \
-  declare_constant(markOopDesc::locked_value)                             \
-  declare_constant(markOopDesc::unlocked_value)                           \
-  declare_constant(markOopDesc::monitor_value)                            \
-  declare_constant(markOopDesc::marked_value)                             \
-  declare_constant(markOopDesc::biased_lock_pattern)                      \
+  declare_constant(markOop::locked_value)                             \
+  declare_constant(markOop::unlocked_value)                           \
+  declare_constant(markOop::monitor_value)                            \
+  declare_constant(markOop::marked_value)                             \
+  declare_constant(markOop::biased_lock_pattern)                      \
                                                                           \
-  declare_constant(markOopDesc::no_hash)                                  \
-  declare_constant(markOopDesc::no_hash_in_place)                         \
-  declare_constant(markOopDesc::no_lock_in_place)                         \
-  declare_constant(markOopDesc::max_age)                                  \
+  declare_constant(markOop::no_hash)                                  \
+  declare_constant(markOop::no_hash_in_place)                         \
+  declare_constant(markOop::no_lock_in_place)                         \
+  declare_constant(markOop::max_age)                                  \
                                                                           \
   /* Constants in markOop used by CMS. */                                 \
-  declare_constant(markOopDesc::cms_shift)                                \
-  declare_constant(markOopDesc::cms_mask)                                 \
-  declare_constant(markOopDesc::size_shift)                               \
+  declare_constant(markOop::cms_shift)                                \
+  declare_constant(markOop::cms_mask)                                 \
+  declare_constant(markOop::size_shift)                               \
                                                                           \
   /* InvocationCounter constants */                                       \
   declare_constant(InvocationCounter::count_increment)                    \
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java
index 796ac85..024f9c9 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java
@@ -51,39 +51,39 @@
     Type type  = db.lookupType("oopDesc");
     markField  = type.getCIntegerField("_mark");
 
-    ageBits             = db.lookupLongConstant("markOopDesc::age_bits").longValue();
-    lockBits            = db.lookupLongConstant("markOopDesc::lock_bits").longValue();
-    biasedLockBits      = db.lookupLongConstant("markOopDesc::biased_lock_bits").longValue();
-    maxHashBits         = db.lookupLongConstant("markOopDesc::max_hash_bits").longValue();
-    hashBits            = db.lookupLongConstant("markOopDesc::hash_bits").longValue();
-    lockShift           = db.lookupLongConstant("markOopDesc::lock_shift").longValue();
-    biasedLockShift     = db.lookupLongConstant("markOopDesc::biased_lock_shift").longValue();
-    ageShift            = db.lookupLongConstant("markOopDesc::age_shift").longValue();
-    hashShift           = db.lookupLongConstant("markOopDesc::hash_shift").longValue();
-    lockMask            = db.lookupLongConstant("markOopDesc::lock_mask").longValue();
-    lockMaskInPlace     = db.lookupLongConstant("markOopDesc::lock_mask_in_place").longValue();
-    biasedLockMask      = db.lookupLongConstant("markOopDesc::biased_lock_mask").longValue();
-    biasedLockMaskInPlace  = db.lookupLongConstant("markOopDesc::biased_lock_mask_in_place").longValue();
-    biasedLockBitInPlace  = db.lookupLongConstant("markOopDesc::biased_lock_bit_in_place").longValue();
-    ageMask             = db.lookupLongConstant("markOopDesc::age_mask").longValue();
-    ageMaskInPlace      = db.lookupLongConstant("markOopDesc::age_mask_in_place").longValue();
-    hashMask            = db.lookupLongConstant("markOopDesc::hash_mask").longValue();
-    hashMaskInPlace     = db.lookupLongConstant("markOopDesc::hash_mask_in_place").longValue();
-    biasedLockAlignment  = db.lookupLongConstant("markOopDesc::biased_lock_alignment").longValue();
-    lockedValue         = db.lookupLongConstant("markOopDesc::locked_value").longValue();
-    unlockedValue       = db.lookupLongConstant("markOopDesc::unlocked_value").longValue();
-    monitorValue        = db.lookupLongConstant("markOopDesc::monitor_value").longValue();
-    markedValue         = db.lookupLongConstant("markOopDesc::marked_value").longValue();
-    biasedLockPattern = db.lookupLongConstant("markOopDesc::biased_lock_pattern").longValue();
-    noHash              = db.lookupLongConstant("markOopDesc::no_hash").longValue();
-    noHashInPlace       = db.lookupLongConstant("markOopDesc::no_hash_in_place").longValue();
-    noLockInPlace       = db.lookupLongConstant("markOopDesc::no_lock_in_place").longValue();
-    maxAge              = db.lookupLongConstant("markOopDesc::max_age").longValue();
+    ageBits             = db.lookupLongConstant("markOop::age_bits").longValue();
+    lockBits            = db.lookupLongConstant("markOop::lock_bits").longValue();
+    biasedLockBits      = db.lookupLongConstant("markOop::biased_lock_bits").longValue();
+    maxHashBits         = db.lookupLongConstant("markOop::max_hash_bits").longValue();
+    hashBits            = db.lookupLongConstant("markOop::hash_bits").longValue();
+    lockShift           = db.lookupLongConstant("markOop::lock_shift").longValue();
+    biasedLockShift     = db.lookupLongConstant("markOop::biased_lock_shift").longValue();
+    ageShift            = db.lookupLongConstant("markOop::age_shift").longValue();
+    hashShift           = db.lookupLongConstant("markOop::hash_shift").longValue();
+    lockMask            = db.lookupLongConstant("markOop::lock_mask").longValue();
+    lockMaskInPlace     = db.lookupLongConstant("markOop::lock_mask_in_place").longValue();
+    biasedLockMask      = db.lookupLongConstant("markOop::biased_lock_mask").longValue();
+    biasedLockMaskInPlace  = db.lookupLongConstant("markOop::biased_lock_mask_in_place").longValue();
+    biasedLockBitInPlace  = db.lookupLongConstant("markOop::biased_lock_bit_in_place").longValue();
+    ageMask             = db.lookupLongConstant("markOop::age_mask").longValue();
+    ageMaskInPlace      = db.lookupLongConstant("markOop::age_mask_in_place").longValue();
+    hashMask            = db.lookupLongConstant("markOop::hash_mask").longValue();
+    hashMaskInPlace     = db.lookupLongConstant("markOop::hash_mask_in_place").longValue();
+    biasedLockAlignment  = db.lookupLongConstant("markOop::biased_lock_alignment").longValue();
+    lockedValue         = db.lookupLongConstant("markOop::locked_value").longValue();
+    unlockedValue       = db.lookupLongConstant("markOop::unlocked_value").longValue();
+    monitorValue        = db.lookupLongConstant("markOop::monitor_value").longValue();
+    markedValue         = db.lookupLongConstant("markOop::marked_value").longValue();
+    biasedLockPattern = db.lookupLongConstant("markOop::biased_lock_pattern").longValue();
+    noHash              = db.lookupLongConstant("markOop::no_hash").longValue();
+    noHashInPlace       = db.lookupLongConstant("markOop::no_hash_in_place").longValue();
+    noLockInPlace       = db.lookupLongConstant("markOop::no_lock_in_place").longValue();
+    maxAge              = db.lookupLongConstant("markOop::max_age").longValue();
 
     /* Constants in markOop used by CMS. */
-    cmsShift            = db.lookupLongConstant("markOopDesc::cms_shift").longValue();
-    cmsMask             = db.lookupLongConstant("markOopDesc::cms_mask").longValue();
-    sizeShift           = db.lookupLongConstant("markOopDesc::size_shift").longValue();
+    cmsShift            = db.lookupLongConstant("markOop::cms_shift").longValue();
+    cmsMask             = db.lookupLongConstant("markOop::cms_mask").longValue();
+    sizeShift           = db.lookupLongConstant("markOop::size_shift").longValue();
   }
 
   // Field accessors
diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java
index 33caa4d..e3af480 100644
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java
@@ -134,11 +134,11 @@
     // This is only valid on AMD64.
     final int runtimeCallStackSize = getConstant("frame::arg_reg_save_area_bytes", Integer.class, osArch.equals("amd64") ? null : 0);
 
-    private final int markWordNoHashInPlace = getConstant("markOopDesc::no_hash_in_place", Integer.class);
-    private final int markWordNoLockInPlace = getConstant("markOopDesc::no_lock_in_place", Integer.class);
+    private final int markWordNoHashInPlace = getConstant("markOop::no_hash_in_place", Integer.class);
+    private final int markWordNoLockInPlace = getConstant("markOop::no_lock_in_place", Integer.class);
 
     /**
-     * See {@code markOopDesc::prototype()}.
+     * See {@code markOop::prototype()}.
      */
     long arrayPrototypeMarkWord() {
         return markWordNoHashInPlace | markWordNoLockInPlace;
diff --git a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
index 5bfbddf..7848136 100644
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
@@ -399,17 +399,17 @@
 
     public final int osThreadInterruptedOffset = getFieldOffset("OSThread::_interrupted", Integer.class, "jint");
 
-    public final long markOopDescHashShift = getConstant("markOopDesc::hash_shift", Long.class);
+    public final long markOopHashShift = getConstant("markOop::hash_shift", Long.class);
 
-    public final int biasedLockMaskInPlace = getConstant("markOopDesc::biased_lock_mask_in_place", Integer.class);
-    public final int ageMaskInPlace = getConstant("markOopDesc::age_mask_in_place", Integer.class);
-    public final int epochMaskInPlace = getConstant("markOopDesc::epoch_mask_in_place", Integer.class);
-    public final long markOopDescHashMask = getConstant("markOopDesc::hash_mask", Long.class);
-    public final long markOopDescHashMaskInPlace = getConstant("markOopDesc::hash_mask_in_place", Long.class);
+    public final int biasedLockMaskInPlace = getConstant("markOop::biased_lock_mask_in_place", Integer.class);
+    public final int ageMaskInPlace = getConstant("markOop::age_mask_in_place", Integer.class);
+    public final int epochMaskInPlace = getConstant("markOop::epoch_mask_in_place", Integer.class);
+    public final long markOopHashMask = getConstant("markOop::hash_mask", Long.class);
+    public final long markOopHashMaskInPlace = getConstant("markOop::hash_mask_in_place", Long.class);
 
-    public final int unlockedMask = getConstant("markOopDesc::unlocked_value", Integer.class);
-    public final int monitorMask = getConstant("markOopDesc::monitor_value", Integer.class, -1);
-    public final int biasedLockPattern = getConstant("markOopDesc::biased_lock_pattern", Integer.class);
+    public final int unlockedMask = getConstant("markOop::unlocked_value", Integer.class);
+    public final int monitorMask = getConstant("markOop::monitor_value", Integer.class, -1);
+    public final int biasedLockPattern = getConstant("markOop::biased_lock_pattern", Integer.class);
 
     // This field has no type in vmStructs.cpp
     public final int objectMonitorOwner = getFieldOffset("ObjectMonitor::_owner", Integer.class, null, -1);
@@ -417,34 +417,34 @@
     public final int objectMonitorCxq = getFieldOffset("ObjectMonitor::_cxq", Integer.class, "ObjectWaiter*", -1);
     public final int objectMonitorEntryList = getFieldOffset("ObjectMonitor::_EntryList", Integer.class, "ObjectWaiter*", -1);
 
-    public final int markWordNoHashInPlace = getConstant("markOopDesc::no_hash_in_place", Integer.class);
-    public final int markWordNoLockInPlace = getConstant("markOopDesc::no_lock_in_place", Integer.class);
+    public final int markWordNoHashInPlace = getConstant("markOop::no_hash_in_place", Integer.class);
+    public final int markWordNoLockInPlace = getConstant("markOop::no_lock_in_place", Integer.class);
 
     /**
-     * See {@code markOopDesc::prototype()}.
+     * See {@code markOop::prototype()}.
      */
     public long arrayPrototypeMarkWord() {
         return markWordNoHashInPlace | markWordNoLockInPlace;
     }
 
     /**
-     * See {@code markOopDesc::copy_set_hash()}.
+     * See {@code markOop::copy_set_hash()}.
      */
     public long tlabIntArrayMarkWord() {
-        long tmp = arrayPrototypeMarkWord() & (~markOopDescHashMaskInPlace);
-        tmp |= ((0x2 & markOopDescHashMask) << markOopDescHashShift);
+        long tmp = arrayPrototypeMarkWord() & (~markOopHashMaskInPlace);
+        tmp |= ((0x2 & markOopHashMask) << markOopHashShift);
         return tmp;
     }
 
     /**
      * Mark word right shift to get identity hash code.
      */
-    public final int identityHashCodeShift = getConstant("markOopDesc::hash_shift", Integer.class);
+    public final int identityHashCodeShift = getConstant("markOop::hash_shift", Integer.class);
 
     /**
      * Identity hash code value when uninitialized.
      */
-    public final int uninitializedIdentityHashCodeValue = getConstant("markOopDesc::no_hash", Integer.class);
+    public final int uninitializedIdentityHashCodeValue = getConstant("markOop::no_hash", Integer.class);
 
     public final int methodAccessFlagsOffset = getFieldOffset("Method::_access_flags", Integer.class, "AccessFlags");
     public final int methodConstMethodOffset = getFieldOffset("Method::_constMethod", Integer.class, "ConstMethod*");
diff --git a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
index 77c2fe7..9f7d9cd 100644
--- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
+++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
@@ -45,11 +45,11 @@
   markOop mark() { return _oop.mark_raw(); }
   void set_mark(markOop m) { _oop.set_mark_raw(m); }
   void forward_to(oop obj) {
-    markOop m = markOopDesc::encode_pointer_as_mark(obj);
+    markOop m = markOop::encode_pointer_as_mark(obj);
     _oop.set_mark_raw(m);
   }
 
-  static markOop originalMark() { return markOop(markOopDesc::lock_mask_in_place); }
+  static markOop originalMark() { return markOop(markOop::lock_mask_in_place); }
   static markOop changedMark()  { return markOop(0x4711); }
 };
 
diff --git a/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java b/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java
index 3311ebf..ca13be0 100644
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbAttach.java
@@ -55,7 +55,7 @@
                     "detach",
                     "universe",
                     "reattach",
-                    "longConstant markOopDesc::locked_value");
+                    "longConstant markOop::locked_value");
 
             Map<String, List<String>> expStrMap = new HashMap<>();
             expStrMap.put("where", List.of(
@@ -64,8 +64,8 @@
                     "MaxJavaStackTraceDepth = "));
             expStrMap.put("universe", List.of(
                     "Command not valid until attached to a VM"));
-            expStrMap.put("longConstant markOopDesc::locked_value", List.of(
-                    "longConstant markOopDesc::locked_value"));
+            expStrMap.put("longConstant markOop::locked_value", List.of(
+                    "longConstant markOop::locked_value"));
 
             test.run(-1, cmds, expStrMap, null);
         } catch (Exception ex) {
diff --git a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java
index 2931012..df87066 100644
--- a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java
@@ -51,21 +51,21 @@
 
             List<String> cmds = List.of(
                     "longConstant",
-                    "longConstant markOopDesc::locked_value",
-                    "longConstant markOopDesc::lock_bits",
+                    "longConstant markOop::locked_value",
+                    "longConstant markOop::lock_bits",
                     "longConstant jtreg::test 6",
                     "longConstant jtreg::test");
 
             Map<String, List<String>> expStrMap = new HashMap<>();
             expStrMap.put("longConstant", List.of(
-                    "longConstant markOopDesc::locked_value",
-                    "longConstant markOopDesc::lock_bits",
+                    "longConstant markOop::locked_value",
+                    "longConstant markOop::lock_bits",
                     "InvocationCounter::count_increment",
-                    "markOopDesc::epoch_mask_in_place"));
-            expStrMap.put("longConstant markOopDesc::locked_value", List.of(
-                    "longConstant markOopDesc::locked_value"));
-            expStrMap.put("longConstant markOopDesc::lock_bits", List.of(
-                    "longConstant markOopDesc::lock_bits"));
+                    "markOop::epoch_mask_in_place"));
+            expStrMap.put("longConstant markOop::locked_value", List.of(
+                    "longConstant markOop::locked_value"));
+            expStrMap.put("longConstant markOop::lock_bits", List.of(
+                    "longConstant markOop::lock_bits"));
             expStrMap.put("longConstant jtreg::test", List.of(
                     "longConstant jtreg::test 6"));
 
@@ -96,12 +96,12 @@
         // Expected output snippet is of the form (on x64-64):
         // ...
         // longConstant VM_Version::CPU_SHA 17179869184
-        // longConstant markOopDesc::biased_lock_bits 1
-        // longConstant markOopDesc::age_shift 3
-        // longConstant markOopDesc::hash_mask_in_place 549755813632
+        // longConstant markOop::biased_lock_bits 1
+        // longConstant markOop::age_shift 3
+        // longConstant markOop::hash_mask_in_place 549755813632
         // ...
 
-        checkLongValue("markOopDesc::hash_mask_in_place",
+        checkLongValue("markOop::hash_mask_in_place",
                        longConstantOutput,
                        Platform.is64bit() ? 549755813632L: 4294967168L);