license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 1 | // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 4 | |
| 5 | // For atomic operations on reference counts, see atomic_refcount.h. |
| 6 | // For atomic operations on sequence numbers, see atomic_sequence_num.h. |
| 7 | |
| 8 | // The routines exported by this module are subtle. If you use them, even if |
| 9 | // you get the code right, it will depend on careful reasoning about atomicity |
| 10 | // and memory ordering; it will be less readable, and harder to maintain. If |
| 11 | // you plan to use these routines, you should have a good reason, such as solid |
| 12 | // evidence that performance would otherwise suffer, or there being no |
| 13 | // alternative. You should assume only properties explicitly guaranteed by the |
| 14 | // specifications in this file. You are almost certainly _not_ writing code |
| 15 | // just for the x86; if you assume x86 semantics, x86 hardware bugs and |
| 16 | // implementations on other archtectures will cause your code to break. If you |
| 17 | // do not know what you are doing, avoid these routines, and use a Mutex. |
| 18 | // |
| 19 | // It is incorrect to make direct assignments to/from an atomic variable. |
| 20 | // You should use one of the Load or Store routines. The NoBarrier |
| 21 | // versions are provided when no barriers are needed: |
| 22 | // NoBarrier_Store() |
| 23 | // NoBarrier_Load() |
| 24 | // Although there are currently no compiler enforcement, you are encouraged |
| 25 | // to use these. |
| 26 | // |
| 27 | |
| 28 | #ifndef BASE_ATOMICOPS_H_ |
| 29 | #define BASE_ATOMICOPS_H_ |
[email protected] | 32b76ef | 2010-07-26 23:08:24 | [diff] [blame^] | 30 | #pragma once |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 31 | |
| 32 | #include "base/basictypes.h" |
[email protected] | c52e233 | 2008-08-05 13:01:35 | [diff] [blame] | 33 | #include "base/port.h" |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 34 | |
| 35 | namespace base { |
| 36 | namespace subtle { |
| 37 | |
| 38 | // Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting. |
[email protected] | c52e233 | 2008-08-05 13:01:35 | [diff] [blame] | 39 | #ifndef OS_WIN |
| 40 | #define __w64 |
| 41 | #endif |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 42 | typedef __w64 int32 Atomic32; |
[email protected] | 616f9a1 | 2009-07-27 21:17:23 | [diff] [blame] | 43 | #ifdef ARCH_CPU_64_BITS |
| 44 | // We need to be able to go between Atomic64 and AtomicWord implicitly. This |
| 45 | // means Atomic64 and AtomicWord should be the same type on 64-bit. |
| 46 | typedef intptr_t Atomic64; |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 47 | #endif |
| 48 | |
| 49 | // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or |
| 50 | // Atomic64 routines below, depending on your architecture. |
| 51 | typedef intptr_t AtomicWord; |
| 52 | |
| 53 | // Atomically execute: |
| 54 | // result = *ptr; |
| 55 | // if (*ptr == old_value) |
| 56 | // *ptr = new_value; |
| 57 | // return result; |
| 58 | // |
| 59 | // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
| 60 | // Always return the old value of "*ptr" |
| 61 | // |
| 62 | // This routine implies no memory barriers. |
| 63 | Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 64 | Atomic32 old_value, |
| 65 | Atomic32 new_value); |
| 66 | |
| 67 | // Atomically store new_value into *ptr, returning the previous value held in |
| 68 | // *ptr. This routine implies no memory barriers. |
| 69 | Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); |
| 70 | |
| 71 | // Atomically increment *ptr by "increment". Returns the new value of |
| 72 | // *ptr with the increment applied. This routine implies no memory barriers. |
| 73 | Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); |
| 74 | |
| 75 | Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 76 | Atomic32 increment); |
| 77 | |
| 78 | // These following lower-level operations are typically useful only to people |
| 79 | // implementing higher-level synchronization operations like spinlocks, |
| 80 | // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or |
| 81 | // a store with appropriate memory-ordering instructions. "Acquire" operations |
| 82 | // ensure that no later memory access can be reordered ahead of the operation. |
| 83 | // "Release" operations ensure that no previous memory access can be reordered |
| 84 | // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 85 | // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 86 | // access. |
| 87 | Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 88 | Atomic32 old_value, |
| 89 | Atomic32 new_value); |
| 90 | Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 91 | Atomic32 old_value, |
| 92 | Atomic32 new_value); |
| 93 | |
| 94 | void MemoryBarrier(); |
| 95 | void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); |
| 96 | void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); |
| 97 | void Release_Store(volatile Atomic32* ptr, Atomic32 value); |
| 98 | |
| 99 | Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); |
| 100 | Atomic32 Acquire_Load(volatile const Atomic32* ptr); |
| 101 | Atomic32 Release_Load(volatile const Atomic32* ptr); |
| 102 | |
| 103 | // 64-bit atomic operations (only available on 64-bit processors). |
[email protected] | 616f9a1 | 2009-07-27 21:17:23 | [diff] [blame] | 104 | #ifdef ARCH_CPU_64_BITS |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 105 | Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 106 | Atomic64 old_value, |
| 107 | Atomic64 new_value); |
| 108 | Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); |
| 109 | Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
| 110 | Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
| 111 | |
| 112 | Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 113 | Atomic64 old_value, |
| 114 | Atomic64 new_value); |
| 115 | Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 116 | Atomic64 old_value, |
| 117 | Atomic64 new_value); |
| 118 | void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); |
| 119 | void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); |
| 120 | void Release_Store(volatile Atomic64* ptr, Atomic64 value); |
| 121 | Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); |
| 122 | Atomic64 Acquire_Load(volatile const Atomic64* ptr); |
| 123 | Atomic64 Release_Load(volatile const Atomic64* ptr); |
[email protected] | 616f9a1 | 2009-07-27 21:17:23 | [diff] [blame] | 124 | #endif // ARCH_CPU_64_BITS |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 125 | |
| 126 | } // namespace base::subtle |
| 127 | } // namespace base |
| 128 | |
| 129 | // Include our platform specific implementation. |
[email protected] | c52e233 | 2008-08-05 13:01:35 | [diff] [blame] | 130 | #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 131 | #include "base/atomicops_internals_x86_msvc.h" |
[email protected] | 7485061 | 2008-08-06 14:42:40 | [diff] [blame] | 132 | #elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY) |
[email protected] | 90e1d20 | 2008-08-27 18:38:55 | [diff] [blame] | 133 | #include "base/atomicops_internals_x86_macosx.h" |
[email protected] | c52e233 | 2008-08-05 13:01:35 | [diff] [blame] | 134 | #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 135 | #include "base/atomicops_internals_x86_gcc.h" |
[email protected] | 697776a | 2009-05-01 17:57:09 | [diff] [blame] | 136 | #elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY) |
| 137 | #include "base/atomicops_internals_arm_gcc.h" |
[email protected] | 611dbe0 | 2008-08-05 09:57:36 | [diff] [blame] | 138 | #else |
| 139 | #error "Atomic operations are not supported on your platform" |
| 140 | #endif |
| 141 | |
| 142 | #endif // BASE_ATOMICOPS_H_ |