Implement base::AtomicRefCount as a class wrapping std::atomic_int.
This is the more standard approach since C++11, and it allows this class to
be encapsulated (preventing abuse by performing other operations on the data).
If landed, call sites will be rewritten to call member functions directly.
A warning must be suppressed in MSVC, because std::atomic forces alignment,
and MSVC warns on end-padding due to alignment on a class, which occurs
in base::RefCountedThreadSafe in debug builds (and the warning is not
targeted at base/memory/ref_counted.*, so it's hard to suppress).
Bug: 736037
Tbr: [email protected],[email protected]
Change-Id: Ieb0599b4a67a4e8db06bd4d08c3126baea9207e2
Reviewed-on: https://chromium-review.googlesource.com/550378
Commit-Queue: Jeremy Roman <[email protected]>
Reviewed-by: danakj <[email protected]>
Reviewed-by: Bruce Dawson <[email protected]>
Reviewed-by: Taiju Tsuiki <[email protected]>
Cr-Commit-Position: refs/heads/master@{#483238}
diff --git a/base/atomic_ref_count.h b/base/atomic_ref_count.h
index 93c1f0d..cc56f5d 100644
--- a/base/atomic_ref_count.h
+++ b/base/atomic_ref_count.h
@@ -8,38 +8,77 @@
#ifndef BASE_ATOMIC_REF_COUNT_H_
#define BASE_ATOMIC_REF_COUNT_H_
-#include "base/atomicops.h"
+#include <atomic>
namespace base {
-typedef subtle::AtomicWord AtomicRefCount;
+class AtomicRefCount {
+ public:
+ constexpr AtomicRefCount() : ref_count_(0) {}
+ explicit constexpr AtomicRefCount(int initial_value)
+ : ref_count_(initial_value) {}
+
+ // Increment a reference count.
+ void Increment() { Increment(1); }
+
+ // Increment a reference count by "increment", which must exceed 0.
+ void Increment(int increment) {
+ ref_count_.fetch_add(increment, std::memory_order_relaxed);
+ }
+
+ // Decrement a reference count, and return whether the result is non-zero.
+ // Insert barriers to ensure that state written before the reference count
+ // became zero will be visible to a thread that has just made the count zero.
+ bool Decrement() {
+ // TODO(jbroman): Technically this doesn't need to be an acquire operation
+ // unless the result is 1 (i.e., the ref count did indeed reach zero).
+ // However, there are toolchain issues that make that not work as well at
+ // present (notably TSAN doesn't like it).
+ return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
+ }
+
+ // Return whether the reference count is one. If the reference count is used
+ // in the conventional way, a refrerence count of 1 implies that the current
+ // thread owns the reference and no other thread shares it. This call
+ // performs the test for a reference count of one, and performs the memory
+ // barrier needed for the owning thread to act on the object, knowing that it
+ // has exclusive access to the object.
+ bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
+
+ // Return whether the reference count is zero. With conventional object
+ // referencing counting, the object will be destroyed, so the reference count
+ // should never be zero. Hence this is generally used for a debug check.
+ bool IsZero() const {
+ return ref_count_.load(std::memory_order_acquire) == 0;
+ }
+
+ // Returns the current reference count (with no barriers). This is subtle, and
+ // should be used only for debugging.
+ int SubtleRefCountForDebug() const {
+ return ref_count_.load(std::memory_order_relaxed);
+ }
+
+ private:
+ std::atomic_int ref_count_;
+};
+
+// TODO(jbroman): Inline these functions once the above changes stick.
// Increment a reference count by "increment", which must exceed 0.
-inline void AtomicRefCountIncN(volatile AtomicRefCount *ptr,
- AtomicRefCount increment) {
- subtle::NoBarrier_AtomicIncrement(ptr, increment);
-}
-
-// Decrement a reference count by "decrement", which must exceed 0,
-// and return whether the result is non-zero.
-// Insert barriers to ensure that state written before the reference count
-// became zero will be visible to a thread that has just made the count zero.
-inline bool AtomicRefCountDecN(volatile AtomicRefCount *ptr,
- AtomicRefCount decrement) {
- bool res = (subtle::Barrier_AtomicIncrement(ptr, -decrement) != 0);
- return res;
+inline void AtomicRefCountIncN(volatile AtomicRefCount* ptr, int increment) {
+ const_cast<AtomicRefCount*>(ptr)->Increment(increment);
}
// Increment a reference count by 1.
inline void AtomicRefCountInc(volatile AtomicRefCount *ptr) {
- base::AtomicRefCountIncN(ptr, 1);
+ const_cast<AtomicRefCount*>(ptr)->Increment();
}
// Decrement a reference count by 1 and return whether the result is non-zero.
// Insert barriers to ensure that state written before the reference count
// became zero will be visible to a thread that has just made the count zero.
inline bool AtomicRefCountDec(volatile AtomicRefCount *ptr) {
- return base::AtomicRefCountDecN(ptr, 1);
+ return const_cast<AtomicRefCount*>(ptr)->Decrement();
}
// Return whether the reference count is one. If the reference count is used
@@ -49,16 +88,14 @@
// needed for the owning thread to act on the object, knowing that it has
// exclusive access to the object.
inline bool AtomicRefCountIsOne(volatile AtomicRefCount *ptr) {
- bool res = (subtle::Acquire_Load(ptr) == 1);
- return res;
+ return const_cast<AtomicRefCount*>(ptr)->IsOne();
}
// Return whether the reference count is zero. With conventional object
// referencing counting, the object will be destroyed, so the reference count
// should never be zero. Hence this is generally used for a debug check.
inline bool AtomicRefCountIsZero(volatile AtomicRefCount *ptr) {
- bool res = (subtle::Acquire_Load(ptr) == 0);
- return res;
+ return const_cast<AtomicRefCount*>(ptr)->IsZero();
}
} // namespace base