blob: 1b3bc02fcd9067140200b4e85c2f089e4b254a9b [file] [log] [blame]
[email protected]611dbe02008-08-05 09:57:361// Copyright 2008, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14// * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// For atomic operations on reference counts, see atomic_refcount.h.
31// For atomic operations on sequence numbers, see atomic_sequence_num.h.
32
33// The routines exported by this module are subtle. If you use them, even if
34// you get the code right, it will depend on careful reasoning about atomicity
35// and memory ordering; it will be less readable, and harder to maintain. If
36// you plan to use these routines, you should have a good reason, such as solid
37// evidence that performance would otherwise suffer, or there being no
38// alternative. You should assume only properties explicitly guaranteed by the
39// specifications in this file. You are almost certainly _not_ writing code
40// just for the x86; if you assume x86 semantics, x86 hardware bugs and
41// implementations on other archtectures will cause your code to break. If you
42// do not know what you are doing, avoid these routines, and use a Mutex.
43//
44// It is incorrect to make direct assignments to/from an atomic variable.
45// You should use one of the Load or Store routines. The NoBarrier
46// versions are provided when no barriers are needed:
47// NoBarrier_Store()
48// NoBarrier_Load()
49// Although there are currently no compiler enforcement, you are encouraged
50// to use these.
51//
52
53#ifndef BASE_ATOMICOPS_H_
54#define BASE_ATOMICOPS_H_
55
56#include "base/basictypes.h"
[email protected]c52e2332008-08-05 13:01:3557#include "base/port.h"
[email protected]611dbe02008-08-05 09:57:3658
59namespace base {
60namespace subtle {
61
62// Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting.
[email protected]c52e2332008-08-05 13:01:3563#ifndef OS_WIN
64#define __w64
65#endif
[email protected]611dbe02008-08-05 09:57:3666typedef __w64 int32 Atomic32;
[email protected]c52e2332008-08-05 13:01:3567#ifdef CPU_ARCH_64_BITS
[email protected]611dbe02008-08-05 09:57:3668typedef int64 Atomic64;
69#endif
70
71// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
72// Atomic64 routines below, depending on your architecture.
73typedef intptr_t AtomicWord;
74
75// Atomically execute:
76// result = *ptr;
77// if (*ptr == old_value)
78// *ptr = new_value;
79// return result;
80//
81// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
82// Always return the old value of "*ptr"
83//
84// This routine implies no memory barriers.
85Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
86 Atomic32 old_value,
87 Atomic32 new_value);
88
89// Atomically store new_value into *ptr, returning the previous value held in
90// *ptr. This routine implies no memory barriers.
91Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
92
93// Atomically increment *ptr by "increment". Returns the new value of
94// *ptr with the increment applied. This routine implies no memory barriers.
95Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
96
97Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
98 Atomic32 increment);
99
100// These following lower-level operations are typically useful only to people
101// implementing higher-level synchronization operations like spinlocks,
102// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
103// a store with appropriate memory-ordering instructions. "Acquire" operations
104// ensure that no later memory access can be reordered ahead of the operation.
105// "Release" operations ensure that no previous memory access can be reordered
106// after the operation. "Barrier" operations have both "Acquire" and "Release"
107// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
108// access.
109Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
110 Atomic32 old_value,
111 Atomic32 new_value);
112Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
113 Atomic32 old_value,
114 Atomic32 new_value);
115
116void MemoryBarrier();
117void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
118void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
119void Release_Store(volatile Atomic32* ptr, Atomic32 value);
120
121Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
122Atomic32 Acquire_Load(volatile const Atomic32* ptr);
123Atomic32 Release_Load(volatile const Atomic32* ptr);
124
125// 64-bit atomic operations (only available on 64-bit processors).
[email protected]c52e2332008-08-05 13:01:35126#ifdef CPU_ARCH_64_BITS
[email protected]611dbe02008-08-05 09:57:36127Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
128 Atomic64 old_value,
129 Atomic64 new_value);
130Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
131Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
132Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
133
134Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
135 Atomic64 old_value,
136 Atomic64 new_value);
137Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
138 Atomic64 old_value,
139 Atomic64 new_value);
140void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
141void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
142void Release_Store(volatile Atomic64* ptr, Atomic64 value);
143Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
144Atomic64 Acquire_Load(volatile const Atomic64* ptr);
145Atomic64 Release_Load(volatile const Atomic64* ptr);
[email protected]c52e2332008-08-05 13:01:35146#endif // CPU_ARCH_64_BITS
[email protected]611dbe02008-08-05 09:57:36147
148} // namespace base::subtle
149} // namespace base
150
151// Include our platform specific implementation.
[email protected]c52e2332008-08-05 13:01:35152#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
[email protected]611dbe02008-08-05 09:57:36153#include "base/atomicops_internals_x86_msvc.h"
[email protected]c52e2332008-08-05 13:01:35154#elif defined(OS_APPLE) && defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
[email protected]611dbe02008-08-05 09:57:36155#include "base/atomicops_internals_x86_macosx.h"
[email protected]c52e2332008-08-05 13:01:35156#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
[email protected]611dbe02008-08-05 09:57:36157#include "base/atomicops_internals_x86_gcc.h"
158#else
159#error "Atomic operations are not supported on your platform"
160#endif
161
162#endif // BASE_ATOMICOPS_H_