blob: 099f2d61c978df37475f1023b66bdffef46774f8 [file] [log] [blame]
Howard Hinnant3e519522010-05-11 19:42:161//===------------------------ memory.cpp ----------------------------------===//
2//
Howard Hinnant5b08a8a2010-05-11 21:36:013// The LLVM Compiler Infrastructure
Howard Hinnant3e519522010-05-11 19:42:164//
Howard Hinnant412dbeb2010-11-16 22:09:025// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
Howard Hinnant3e519522010-05-11 19:42:167//
8//===----------------------------------------------------------------------===//
9
Howard Hinnantc0937e82012-07-07 20:56:0410#define _LIBCPP_BUILDING_MEMORY
Howard Hinnant3e519522010-05-11 19:42:1611#include "memory"
Jonathan Roelofsb3fcc672014-09-05 19:45:0512#ifndef _LIBCPP_HAS_NO_THREADS
Howard Hinnantd77851e2012-07-30 01:40:5713#include "mutex"
Howard Hinnant088e37c2012-07-30 17:13:2114#include "thread"
Jonathan Roelofsb3fcc672014-09-05 19:45:0515#endif
Eric Fiseliere8fd1642015-08-18 21:08:5416#include "include/atomic_support.h"
Howard Hinnant3e519522010-05-11 19:42:1617
18_LIBCPP_BEGIN_NAMESPACE_STD
19
Howard Hinnant3e519522010-05-11 19:42:1620const allocator_arg_t allocator_arg = allocator_arg_t();
21
Howard Hinnant3739fe72011-05-28 14:41:1322bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {}
Howard Hinnant3e519522010-05-11 19:42:1623
24const char*
Howard Hinnant3739fe72011-05-28 14:41:1325bad_weak_ptr::what() const _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1626{
27 return "bad_weak_ptr";
28}
29
30__shared_count::~__shared_count()
31{
32}
33
Kevin Huf08de522017-01-17 02:46:3334__shared_weak_count::~__shared_weak_count()
35{
36}
37
Howard Hinnant3e519522010-05-11 19:42:1638void
Howard Hinnant3739fe72011-05-28 14:41:1339__shared_count::__add_shared() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1640{
Kevin Huf08de522017-01-17 02:46:3341 __libcpp_atomic_refcount_increment(__shared_owners_);
Howard Hinnant3e519522010-05-11 19:42:1642}
43
Howard Hinnant9b35c822010-11-16 21:33:1744bool
Howard Hinnant3739fe72011-05-28 14:41:1345__shared_count::__release_shared() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1646{
Kevin Huf08de522017-01-17 02:46:3347 if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1)
Howard Hinnant9b35c822010-11-16 21:33:1748 {
Howard Hinnant3e519522010-05-11 19:42:1649 __on_zero_shared();
Howard Hinnant9b35c822010-11-16 21:33:1750 return true;
51 }
52 return false;
Howard Hinnant3e519522010-05-11 19:42:1653}
54
Howard Hinnant3e519522010-05-11 19:42:1655void
Howard Hinnant3739fe72011-05-28 14:41:1356__shared_weak_count::__add_shared() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1657{
58 __shared_count::__add_shared();
Howard Hinnant3e519522010-05-11 19:42:1659}
60
61void
Howard Hinnant3739fe72011-05-28 14:41:1362__shared_weak_count::__add_weak() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1663{
Kevin Huf08de522017-01-17 02:46:3364 __libcpp_atomic_refcount_increment(__shared_weak_owners_);
Howard Hinnant3e519522010-05-11 19:42:1665}
66
67void
Howard Hinnant3739fe72011-05-28 14:41:1368__shared_weak_count::__release_shared() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1669{
Howard Hinnant9b35c822010-11-16 21:33:1770 if (__shared_count::__release_shared())
71 __release_weak();
Howard Hinnant3e519522010-05-11 19:42:1672}
73
74void
Howard Hinnant3739fe72011-05-28 14:41:1375__shared_weak_count::__release_weak() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:1676{
Ben Craigac9eec82016-08-01 17:51:2677 // NOTE: The acquire load here is an optimization of the very
78 // common case where a shared pointer is being destructed while
79 // having no other contended references.
80 //
81 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
82 // in a common case. Those instructions are slow and do nasty
83 // things to caches.
84 //
85 // IS THIS SAFE? Yes. During weak destruction, if we see that we
86 // are the last reference, we know that no-one else is accessing
87 // us. If someone were accessing us, then they would be doing so
88 // while the last shared / weak_ptr was being destructed, and
89 // that's undefined anyway.
90 //
91 // If we see anything other than a 0, then we have possible
92 // contention, and need to use an atomicrmw primitive.
93 // The same arguments don't apply for increment, where it is legal
94 // (though inadvisable) to share shared_ptr references between
95 // threads, and have them all get copied at once. The argument
96 // also doesn't apply for __release_shared, because an outstanding
97 // weak_ptr::lock() could read / modify the shared count.
Ben Craig8ddd3ef2016-08-02 13:43:4898 if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
Ben Craigac9eec82016-08-01 17:51:2699 {
100 // no need to do this store, because we are about
101 // to destroy everything.
102 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
103 __on_zero_shared_weak();
104 }
Kevin Huf08de522017-01-17 02:46:33105 else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1)
Howard Hinnant3e519522010-05-11 19:42:16106 __on_zero_shared_weak();
107}
108
109__shared_weak_count*
Howard Hinnant3739fe72011-05-28 14:41:13110__shared_weak_count::lock() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:16111{
Eric Fiselier1faf2892015-07-07 00:27:16112 long object_owners = __libcpp_atomic_load(&__shared_owners_);
Howard Hinnant3e519522010-05-11 19:42:16113 while (object_owners != -1)
114 {
Eric Fiselier1faf2892015-07-07 00:27:16115 if (__libcpp_atomic_compare_exchange(&__shared_owners_,
116 &object_owners,
117 object_owners+1))
Howard Hinnant3e519522010-05-11 19:42:16118 return this;
Howard Hinnant3e519522010-05-11 19:42:16119 }
120 return 0;
121}
122
Eric Fiselier7a687492014-12-12 02:36:23123#if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
Howard Hinnant54b409f2010-08-11 17:04:31124
Howard Hinnant3e519522010-05-11 19:42:16125const void*
Howard Hinnant3739fe72011-05-28 14:41:13126__shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:16127{
128 return 0;
129}
130
Howard Hinnant940e2112010-08-22 00:03:27131#endif // _LIBCPP_NO_RTTI
Howard Hinnant54b409f2010-08-11 17:04:31132
Eric Fiselierdf93bad2016-06-18 02:12:53133#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
Howard Hinnante4b2a742012-08-19 15:13:16134
Eric Fiselierea117bf2016-09-28 22:08:13135_LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16;
136_LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] =
Howard Hinnant5efca642013-03-16 00:17:53137{
Asiri Rathnayakec7e42392016-05-06 14:06:29138 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
139 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
140 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
141 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
Howard Hinnant5efca642013-03-16 00:17:53142};
143
Howard Hinnantd77851e2012-07-30 01:40:57144_LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT
Howard Hinnant54d333a2012-10-30 19:06:59145 : __lx(p)
Howard Hinnantd77851e2012-07-30 01:40:57146{
147}
148
149void
150__sp_mut::lock() _NOEXCEPT
151{
Eric Fiselierea117bf2016-09-28 22:08:13152 auto m = static_cast<__libcpp_mutex_t*>(__lx);
Howard Hinnant088e37c2012-07-30 17:13:21153 unsigned count = 0;
Eric Fiselierea117bf2016-09-28 22:08:13154 while (__libcpp_mutex_trylock(m) != 0)
Howard Hinnant088e37c2012-07-30 17:13:21155 {
156 if (++count > 16)
157 {
Eric Fiselierea117bf2016-09-28 22:08:13158 __libcpp_mutex_lock(m);
Howard Hinnant088e37c2012-07-30 17:13:21159 break;
160 }
161 this_thread::yield();
162 }
Howard Hinnantd77851e2012-07-30 01:40:57163}
164
165void
166__sp_mut::unlock() _NOEXCEPT
167{
Eric Fiselierea117bf2016-09-28 22:08:13168 __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx));
Howard Hinnantd77851e2012-07-30 01:40:57169}
170
171__sp_mut&
172__get_sp_mut(const void* p)
173{
Eric Fiselierea117bf2016-09-28 22:08:13174 static __sp_mut muts[__sp_mut_count]
Howard Hinnantd77851e2012-07-30 01:40:57175 {
176 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
177 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
178 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11],
179 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15]
180 };
181 return muts[hash<const void*>()(p) & (__sp_mut_count-1)];
182}
183
Eric Fiselierdf93bad2016-06-18 02:12:53184#endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
Howard Hinnantd77851e2012-07-30 01:40:57185
Howard Hinnant3e519522010-05-11 19:42:16186void
187declare_reachable(void*)
188{
189}
190
191void
192declare_no_pointers(char*, size_t)
193{
194}
195
196void
197undeclare_no_pointers(char*, size_t)
198{
199}
200
Eric Fiselier528600c2017-01-05 01:15:42201#if !defined(_LIBCPP_ABI_POINTER_SAFETY_ENUM_TYPE)
202pointer_safety get_pointer_safety() _NOEXCEPT
Howard Hinnant3e519522010-05-11 19:42:16203{
204 return pointer_safety::relaxed;
205}
Eric Fiselier528600c2017-01-05 01:15:42206#endif
Howard Hinnant3e519522010-05-11 19:42:16207
208void*
209__undeclare_reachable(void* p)
210{
211 return p;
212}
213
214void*
215align(size_t alignment, size_t size, void*& ptr, size_t& space)
216{
217 void* r = nullptr;
218 if (size <= space)
219 {
220 char* p1 = static_cast<char*>(ptr);
Joerg Sonnenberger634b9dd2014-01-04 17:43:00221 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment);
Howard Hinnantc2063662011-12-01 20:21:04222 size_t d = static_cast<size_t>(p2 - p1);
Howard Hinnant3e519522010-05-11 19:42:16223 if (d <= space - size)
224 {
225 r = p2;
226 ptr = r;
227 space -= d;
228 }
229 }
230 return r;
231}
232
233_LIBCPP_END_NAMESPACE_STD