blob: 31911bdb754f61d79278197f1d397708a972d451 [file] [log] [blame]
Igor Kudrind9edde42016-10-07 08:48:281//===------------------------ fallback_malloc.cpp -------------------------===//
Howard Hinnant73ab1862012-01-24 21:41:272//
3// The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
Howard Hinnant73ab1862012-01-24 21:41:278//===----------------------------------------------------------------------===//
9
Igor Kudrind9edde42016-10-07 08:48:2810#include "fallback_malloc.h"
11
Jonathan Roelofs40e98422014-05-06 21:30:5612#include "config.h"
Asiri Rathnayake97ba9fa2017-01-03 12:58:3413#include <__threading_support>
Jonathan Roelofs40e98422014-05-06 21:30:5614
Igor Kudrind9edde42016-10-07 08:48:2815#include <cstdlib> // for malloc, calloc, free
16#include <cstring> // for memset
17
Igor Kudrind9edde42016-10-07 08:48:2818// A small, simple heap manager based (loosely) on
Howard Hinnant73ab1862012-01-24 21:41:2719// the startup heap manager from FreeBSD, optimized for space.
20//
21// Manages a fixed-size memory pool, supports malloc and free only.
22// No support for realloc.
23//
24// Allocates chunks in multiples of four bytes, with a four byte header
25// for each chunk. The overhead of each chunk is kept low by keeping pointers
26// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
27
28namespace {
29
Jonathan Roelofs40e98422014-05-06 21:30:5630// When POSIX threads are not available, make the mutex operations a nop
Asiri Rathnayake7c98baa2016-09-21 09:09:3231#ifndef _LIBCXXABI_HAS_NO_THREADS
Asiri Rathnayake97ba9fa2017-01-03 12:58:3432_LIBCPP_SAFE_STATIC
33static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
Asiri Rathnayake7c98baa2016-09-21 09:09:3234#else
35static void * heap_mutex = 0;
Jonathan Roelofs40e98422014-05-06 21:30:5636#endif
Howard Hinnant73ab1862012-01-24 21:41:2737
38class mutexor {
39public:
Asiri Rathnayake7c98baa2016-09-21 09:09:3240#ifndef _LIBCXXABI_HAS_NO_THREADS
Asiri Rathnayake97ba9fa2017-01-03 12:58:3441 mutexor ( std::__libcpp_mutex_t *m ) : mtx_(m) {
42 std::__libcpp_mutex_lock ( mtx_ );
43 }
44 ~mutexor () { std::__libcpp_mutex_unlock ( mtx_ ); }
Asiri Rathnayake7c98baa2016-09-21 09:09:3245#else
46 mutexor ( void * ) {}
47 ~mutexor () {}
Jonathan Roelofs40e98422014-05-06 21:30:5648#endif
Howard Hinnant73ab1862012-01-24 21:41:2749private:
50 mutexor ( const mutexor &rhs );
51 mutexor & operator = ( const mutexor &rhs );
Asiri Rathnayake7c98baa2016-09-21 09:09:3252#ifndef _LIBCXXABI_HAS_NO_THREADS
Asiri Rathnayake97ba9fa2017-01-03 12:58:3453 std::__libcpp_mutex_t *mtx_;
Jonathan Roelofs40e98422014-05-06 21:30:5654#endif
Asiri Rathnayake97ba9fa2017-01-03 12:58:3455};
Howard Hinnant73ab1862012-01-24 21:41:2756
Igor Kudrind9edde42016-10-07 08:48:2857
58static const size_t HEAP_SIZE = 512;
59char heap [ HEAP_SIZE ] __attribute__((aligned));
Howard Hinnant73ab1862012-01-24 21:41:2760
61typedef unsigned short heap_offset;
62typedef unsigned short heap_size;
63
64struct heap_node {
65 heap_offset next_node; // offset into heap
66 heap_size len; // size in units of "sizeof(heap_node)"
67};
68
69static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] ); // one past the end of the heap
70static heap_node *freelist = NULL;
71
Howard Hinnant47cb85482012-01-30 16:07:0072heap_node *node_from_offset ( const heap_offset offset )
Howard Hinnant73ab1862012-01-24 21:41:2773 { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); }
74
Howard Hinnant47cb85482012-01-30 16:07:0075heap_offset offset_from_node ( const heap_node *ptr )
Saleem Abdulrasoolfcdcf1b2015-06-03 17:25:3376 { return static_cast<heap_offset>(static_cast<size_t>(reinterpret_cast<const char *>(ptr) - heap) / sizeof (heap_node)); }
Igor Kudrind9edde42016-10-07 08:48:2877
Howard Hinnant47cb85482012-01-30 16:07:0078void init_heap () {
Howard Hinnant73ab1862012-01-24 21:41:2779 freelist = (heap_node *) heap;
80 freelist->next_node = offset_from_node ( list_end );
81 freelist->len = HEAP_SIZE / sizeof (heap_node);
82 }
Igor Kudrind9edde42016-10-07 08:48:2883
Howard Hinnant73ab1862012-01-24 21:41:2784// How big a chunk we allocate
Howard Hinnant47cb85482012-01-30 16:07:0085size_t alloc_size (size_t len)
Howard Hinnant73ab1862012-01-24 21:41:2786 { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; }
87
Howard Hinnant47cb85482012-01-30 16:07:0088bool is_fallback_ptr ( void *ptr )
Howard Hinnant73ab1862012-01-24 21:41:2789 { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); }
90
Howard Hinnant47cb85482012-01-30 16:07:0091void *fallback_malloc(size_t len) {
Howard Hinnant73ab1862012-01-24 21:41:2792 heap_node *p, *prev;
93 const size_t nelems = alloc_size ( len );
94 mutexor mtx ( &heap_mutex );
Igor Kudrind9edde42016-10-07 08:48:2895
Howard Hinnant73ab1862012-01-24 21:41:2796 if ( NULL == freelist )
97 init_heap ();
98
99// Walk the free list, looking for a "big enough" chunk
Igor Kudrind9edde42016-10-07 08:48:28100 for (p = freelist, prev = 0;
Howard Hinnant73ab1862012-01-24 21:41:27101 p && p != list_end; prev = p, p = node_from_offset ( p->next_node)) {
102
103 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
104 heap_node *q;
Saleem Abdulrasool8d5ab872015-06-03 17:25:35105
106 p->len = static_cast<heap_size>(p->len - nelems);
Howard Hinnant73ab1862012-01-24 21:41:27107 q = p + p->len;
108 q->next_node = 0;
Howard Hinnantd121eb22012-03-08 20:16:45109 q->len = static_cast<heap_size>(nelems);
Howard Hinnant73ab1862012-01-24 21:41:27110 return (void *) (q + 1);
111 }
Igor Kudrind9edde42016-10-07 08:48:28112
Howard Hinnant73ab1862012-01-24 21:41:27113 if (p->len == nelems) { // exact size match
114 if (prev == 0)
115 freelist = node_from_offset(p->next_node);
116 else
117 prev->next_node = p->next_node;
118 p->next_node = 0;
119 return (void *) (p + 1);
120 }
121 }
122 return NULL; // couldn't find a spot big enough
123}
124
125// Return the start of the next block
Howard Hinnant47cb85482012-01-30 16:07:00126heap_node *after ( struct heap_node *p ) { return p + p->len; }
Howard Hinnant73ab1862012-01-24 21:41:27127
Howard Hinnant47cb85482012-01-30 16:07:00128void fallback_free (void *ptr) {
Howard Hinnant73ab1862012-01-24 21:41:27129 struct heap_node *cp = ((struct heap_node *) ptr) - 1; // retrieve the chunk
130 struct heap_node *p, *prev;
131
132 mutexor mtx ( &heap_mutex );
133
134#ifdef DEBUG_FALLBACK_MALLOC
135 std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl;
136#endif
137
Igor Kudrind9edde42016-10-07 08:48:28138 for (p = freelist, prev = 0;
Howard Hinnant73ab1862012-01-24 21:41:27139 p && p != list_end; prev = p, p = node_from_offset (p->next_node)) {
140#ifdef DEBUG_FALLBACK_MALLOC
141 std::cout << " p, cp, after (p), after(cp) "
142 << offset_from_node ( p ) << ' '
143 << offset_from_node ( cp ) << ' '
144 << offset_from_node ( after ( p )) << ' '
145 << offset_from_node ( after ( cp )) << std::endl;
146#endif
147 if ( after ( p ) == cp ) {
148#ifdef DEBUG_FALLBACK_MALLOC
149 std::cout << " Appending onto chunk at " << offset_from_node ( p ) << std::endl;
150#endif
Saleem Abdulrasool8d5ab872015-06-03 17:25:35151 p->len = static_cast<heap_size>(p->len + cp->len); // make the free heap_node larger
Howard Hinnant73ab1862012-01-24 21:41:27152 return;
153 }
154 else if ( after ( cp ) == p ) { // there's a free heap_node right after
155#ifdef DEBUG_FALLBACK_MALLOC
156 std::cout << " Appending free chunk at " << offset_from_node ( p ) << std::endl;
157#endif
Saleem Abdulrasool8d5ab872015-06-03 17:25:35158 cp->len = static_cast<heap_size>(cp->len + p->len);
Howard Hinnant73ab1862012-01-24 21:41:27159 if ( prev == 0 ) {
160 freelist = cp;
161 cp->next_node = p->next_node;
162 }
163 else
164 prev->next_node = offset_from_node(cp);
165 return;
166 }
167 }
168// Nothing to merge with, add it to the start of the free list
169#ifdef DEBUG_FALLBACK_MALLOC
170 std::cout << " Making new free list entry " << offset_from_node ( cp ) << std::endl;
171#endif
172 cp->next_node = offset_from_node ( freelist );
173 freelist = cp;
174}
175
176#ifdef INSTRUMENT_FALLBACK_MALLOC
177size_t print_free_list () {
178 struct heap_node *p, *prev;
179 heap_size total_free = 0;
180 if ( NULL == freelist )
181 init_heap ();
Igor Kudrind9edde42016-10-07 08:48:28182
183 for (p = freelist, prev = 0;
Howard Hinnant73ab1862012-01-24 21:41:27184 p && p != list_end; prev = p, p = node_from_offset (p->next_node)) {
Igor Kudrind9edde42016-10-07 08:48:28185 std::cout << ( prev == 0 ? "" : " ") << "Offset: " << offset_from_node ( p )
Howard Hinnant73ab1862012-01-24 21:41:27186 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
187 total_free += p->len;
188 }
189 std::cout << "Total Free space: " << total_free << std::endl;
190 return total_free;
191 }
192#endif
193} // end unnamed namespace
Igor Kudrind9edde42016-10-07 08:48:28194
195namespace __cxxabiv1 {
196
Eric Fiselierc74a2e12017-03-04 02:04:45197struct __attribute__((aligned)) __aligned_type {};
198
199void * __aligned_malloc_with_fallback(size_t size) {
200#if defined(_WIN32)
201 if (void *dest = _aligned_malloc(size, alignof(__aligned_type)))
202 return dest;
203#elif defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION)
204 if (void* dest = std::malloc(size))
205 return dest;
206#else
207 if (size == 0)
208 size = 1;
209 void* dest;
210 if (::posix_memalign(&dest, alignof(__aligned_type), size) == 0)
211 return dest;
212#endif
213 return fallback_malloc(size);
Igor Kudrind9edde42016-10-07 08:48:28214}
215
Eric Fiselierc74a2e12017-03-04 02:04:45216
Igor Kudrind9edde42016-10-07 08:48:28217void * __calloc_with_fallback(size_t count, size_t size) {
218 void *ptr = std::calloc(count, size);
219 if (NULL != ptr)
220 return ptr;
221 // if calloc fails, fall back to emergency stash
222 ptr = fallback_malloc(size * count);
223 if (NULL != ptr)
224 std::memset(ptr, 0, size * count);
225 return ptr;
226}
227
Eric Fiselierc74a2e12017-03-04 02:04:45228void __aligned_free_with_fallback(void* ptr) {
229 if (is_fallback_ptr(ptr))
230 fallback_free(ptr);
231 else {
232#if defined(_WIN32)
233 ::_aligned_free(ptr);
234#else
235 std::free(ptr);
236#endif
237 }
238}
239
Igor Kudrind9edde42016-10-07 08:48:28240void __free_with_fallback(void *ptr) {
241 if (is_fallback_ptr(ptr))
242 fallback_free(ptr);
243 else
244 std::free(ptr);
245}
246
Igor Kudrind9edde42016-10-07 08:48:28247} // namespace __cxxabiv1