blob: 94fe983226dc0ea2c313b5a6a7fcaa8eb4ae471b [file] [log] [blame]
[email protected]1e6d0a52012-10-02 16:28:231// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation for compiler-based
6// ThreadSanitizer. Use base/atomicops.h instead.
7
8#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
9#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
10
11#include "base/base_export.h"
12
13// This struct is not part of the public API of this module; clients may not
14// use it. (However, it's exported via BASE_EXPORT because clients implicitly
15// do use it at link time by inlining these functions.)
16// Features of this x86. Values may not be correct before main() is run,
17// but are set conservatively.
18struct AtomicOps_x86CPUFeatureStruct {
19 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
20 // after acquire compare-and-swap.
21 bool has_sse2; // Processor has SSE2.
22};
23BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
24 AtomicOps_Internalx86CPUFeatures;
25
26#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
27
28namespace base {
29namespace subtle {
30
31#ifndef TSAN_INTERFACE_ATOMIC_H
32#define TSAN_INTERFACE_ATOMIC_H
33
34#ifdef __cplusplus
35extern "C" {
36#endif
37
38typedef char __tsan_atomic8;
39typedef short __tsan_atomic16; // NOLINT
40typedef int __tsan_atomic32;
41typedef long __tsan_atomic64; // NOLINT
42
43typedef enum {
44 __tsan_memory_order_relaxed = 1 << 0,
45 __tsan_memory_order_consume = 1 << 1,
46 __tsan_memory_order_acquire = 1 << 2,
47 __tsan_memory_order_release = 1 << 3,
48 __tsan_memory_order_acq_rel = 1 << 4,
49 __tsan_memory_order_seq_cst = 1 << 5,
50} __tsan_memory_order;
51
52__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
53 __tsan_memory_order mo);
54__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
55 __tsan_memory_order mo);
56__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
57 __tsan_memory_order mo);
58__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
59 __tsan_memory_order mo);
60
61void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
62 __tsan_memory_order mo);
63void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
64 __tsan_memory_order mo);
65void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
66 __tsan_memory_order mo);
67void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
68 __tsan_memory_order mo);
69
70__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
71 __tsan_atomic8 v, __tsan_memory_order mo);
72__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
73 __tsan_atomic16 v, __tsan_memory_order mo);
74__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
75 __tsan_atomic32 v, __tsan_memory_order mo);
76__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
77 __tsan_atomic64 v, __tsan_memory_order mo);
78
79__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
80 __tsan_atomic8 v, __tsan_memory_order mo);
81__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
82 __tsan_atomic16 v, __tsan_memory_order mo);
83__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
84 __tsan_atomic32 v, __tsan_memory_order mo);
85__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
86 __tsan_atomic64 v, __tsan_memory_order mo);
87
88__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
89 __tsan_atomic8 v, __tsan_memory_order mo);
90__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
91 __tsan_atomic16 v, __tsan_memory_order mo);
92__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
93 __tsan_atomic32 v, __tsan_memory_order mo);
94__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
95 __tsan_atomic64 v, __tsan_memory_order mo);
96
97__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
98 __tsan_atomic8 v, __tsan_memory_order mo);
99__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
100 __tsan_atomic16 v, __tsan_memory_order mo);
101__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
102 __tsan_atomic32 v, __tsan_memory_order mo);
103__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
104 __tsan_atomic64 v, __tsan_memory_order mo);
105
106__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
107 __tsan_atomic8 v, __tsan_memory_order mo);
108__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
109 __tsan_atomic16 v, __tsan_memory_order mo);
110__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
111 __tsan_atomic32 v, __tsan_memory_order mo);
112__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
113 __tsan_atomic64 v, __tsan_memory_order mo);
114
115int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
116 __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
117int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
118 __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
119int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
120 __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
121int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
122 __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
123
124int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
125 __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
126int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
127 __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
128int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
129 __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
130int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
131 __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
132
133void __tsan_atomic_thread_fence(__tsan_memory_order mo);
134
135#ifdef __cplusplus
136} // extern "C"
137#endif
138
139#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
140
141inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
142 Atomic32 old_value,
143 Atomic32 new_value) {
144 Atomic32 cmp = old_value;
145 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
146 __tsan_memory_order_relaxed);
147 return cmp;
148}
149
150inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
151 Atomic32 new_value) {
152 return __tsan_atomic32_exchange(ptr, new_value,
153 __tsan_memory_order_relaxed);
154}
155
156inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
157 Atomic32 new_value) {
158 return __tsan_atomic32_exchange(ptr, new_value,
159 __tsan_memory_order_acquire);
160}
161
162inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
163 Atomic32 new_value) {
164 return __tsan_atomic32_exchange(ptr, new_value,
165 __tsan_memory_order_release);
166}
167
168inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
169 Atomic32 increment) {
170 return increment + __tsan_atomic32_fetch_add(ptr, increment,
171 __tsan_memory_order_relaxed);
172}
173
174inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
175 Atomic32 increment) {
176 return increment + __tsan_atomic32_fetch_add(ptr, increment,
177 __tsan_memory_order_acq_rel);
178}
179
180inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
181 Atomic32 old_value,
182 Atomic32 new_value) {
183 Atomic32 cmp = old_value;
184 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
185 __tsan_memory_order_acquire);
186 return cmp;
187}
188
189inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
190 Atomic32 old_value,
191 Atomic32 new_value) {
192 Atomic32 cmp = old_value;
193 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
194 __tsan_memory_order_release);
195 return cmp;
196}
197
198inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
199 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
200}
201
202inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
203 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
204 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
205}
206
207inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
208 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
209}
210
211inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
212 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
213}
214
215inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
216 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
217}
218
219inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
220 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
221 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
222}
223
224inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
225 Atomic64 old_value,
226 Atomic64 new_value) {
227 Atomic64 cmp = old_value;
228 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
229 __tsan_memory_order_relaxed);
230 return cmp;
231}
232
233inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
234 Atomic64 new_value) {
235 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
236}
237
238inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
239 Atomic64 new_value) {
240 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
241}
242
243inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
244 Atomic64 new_value) {
245 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
246}
247
248inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
249 Atomic64 increment) {
250 return increment + __tsan_atomic64_fetch_add(ptr, increment,
251 __tsan_memory_order_relaxed);
252}
253
254inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
255 Atomic64 increment) {
256 return increment + __tsan_atomic64_fetch_add(ptr, increment,
257 __tsan_memory_order_acq_rel);
258}
259
260inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
261 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
262}
263
264inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
265 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
266 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
267}
268
269inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
270 __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
271}
272
273inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
274 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
275}
276
277inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
278 return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
279}
280
281inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
282 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
283 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
284}
285
286inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
287 Atomic64 old_value,
288 Atomic64 new_value) {
289 Atomic64 cmp = old_value;
290 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
291 __tsan_memory_order_acquire);
292 return cmp;
293}
294
295inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
296 Atomic64 old_value,
297 Atomic64 new_value) {
298 Atomic64 cmp = old_value;
299 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
300 __tsan_memory_order_release);
301 return cmp;
302}
303
304inline void MemoryBarrier() {
305 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
306}
307
308} // namespace base::subtle
309} // namespace base
310
311#undef ATOMICOPS_COMPILER_BARRIER
312
313#endif // BASE_ATOMICOPS_INTERNALS_TSAN_H_