blob: 1d8f8a32ce4a49ce518710c1f39d8a74330622b2 [file] [log] [blame]
Howard Hinnant73ab1862012-01-24 21:41:271//===------------------------ fallback_malloc.ipp -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
8//
9// This file implements the "Exception Handling APIs"
Jonathan Roelofsc5f7e6f2014-02-12 04:49:0910// http://mentorembedded.github.io/cxx-abi/abi-eh.html
Howard Hinnant73ab1862012-01-24 21:41:2711//
12//===----------------------------------------------------------------------===//
13
Jonathan Roelofs40e98422014-05-06 21:30:5614#include "config.h"
15
Howard Hinnant73ab1862012-01-24 21:41:2716// A small, simple heap manager based (loosely) on
17// the startup heap manager from FreeBSD, optimized for space.
18//
19// Manages a fixed-size memory pool, supports malloc and free only.
20// No support for realloc.
21//
22// Allocates chunks in multiples of four bytes, with a four byte header
23// for each chunk. The overhead of each chunk is kept low by keeping pointers
24// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
25
26namespace {
27
Jonathan Roelofs40e98422014-05-06 21:30:5628// When POSIX threads are not available, make the mutex operations a nop
Asiri Rathnayake7c98baa2016-09-21 09:09:3229#ifndef _LIBCXXABI_HAS_NO_THREADS
Howard Hinnant73ab1862012-01-24 21:41:2730static pthread_mutex_t heap_mutex = PTHREAD_MUTEX_INITIALIZER;
Asiri Rathnayake7c98baa2016-09-21 09:09:3231#else
32static void * heap_mutex = 0;
Jonathan Roelofs40e98422014-05-06 21:30:5633#endif
Howard Hinnant73ab1862012-01-24 21:41:2734
35class mutexor {
36public:
Asiri Rathnayake7c98baa2016-09-21 09:09:3237#ifndef _LIBCXXABI_HAS_NO_THREADS
Howard Hinnant73ab1862012-01-24 21:41:2738 mutexor ( pthread_mutex_t *m ) : mtx_(m) { pthread_mutex_lock ( mtx_ ); }
39 ~mutexor () { pthread_mutex_unlock ( mtx_ ); }
Asiri Rathnayake7c98baa2016-09-21 09:09:3240#else
41 mutexor ( void * ) {}
42 ~mutexor () {}
Jonathan Roelofs40e98422014-05-06 21:30:5643#endif
Howard Hinnant73ab1862012-01-24 21:41:2744private:
45 mutexor ( const mutexor &rhs );
46 mutexor & operator = ( const mutexor &rhs );
Asiri Rathnayake7c98baa2016-09-21 09:09:3247#ifndef _LIBCXXABI_HAS_NO_THREADS
Howard Hinnant73ab1862012-01-24 21:41:2748 pthread_mutex_t *mtx_;
Jonathan Roelofs40e98422014-05-06 21:30:5649#endif
Howard Hinnant73ab1862012-01-24 21:41:2750 };
51
52
53#define HEAP_SIZE 512
54char heap [ HEAP_SIZE ];
55
56typedef unsigned short heap_offset;
57typedef unsigned short heap_size;
58
59struct heap_node {
60 heap_offset next_node; // offset into heap
61 heap_size len; // size in units of "sizeof(heap_node)"
62};
63
64static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] ); // one past the end of the heap
65static heap_node *freelist = NULL;
66
Howard Hinnant47cb85482012-01-30 16:07:0067heap_node *node_from_offset ( const heap_offset offset )
Howard Hinnant73ab1862012-01-24 21:41:2768 { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); }
69
Howard Hinnant47cb85482012-01-30 16:07:0070heap_offset offset_from_node ( const heap_node *ptr )
Saleem Abdulrasoolfcdcf1b2015-06-03 17:25:3371 { return static_cast<heap_offset>(static_cast<size_t>(reinterpret_cast<const char *>(ptr) - heap) / sizeof (heap_node)); }
Howard Hinnant73ab1862012-01-24 21:41:2772
Howard Hinnant47cb85482012-01-30 16:07:0073void init_heap () {
Howard Hinnant73ab1862012-01-24 21:41:2774 freelist = (heap_node *) heap;
75 freelist->next_node = offset_from_node ( list_end );
76 freelist->len = HEAP_SIZE / sizeof (heap_node);
77 }
78
79// How big a chunk we allocate
Howard Hinnant47cb85482012-01-30 16:07:0080size_t alloc_size (size_t len)
Howard Hinnant73ab1862012-01-24 21:41:2781 { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; }
82
Howard Hinnant47cb85482012-01-30 16:07:0083bool is_fallback_ptr ( void *ptr )
Howard Hinnant73ab1862012-01-24 21:41:2784 { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); }
85
Howard Hinnant47cb85482012-01-30 16:07:0086void *fallback_malloc(size_t len) {
Howard Hinnant73ab1862012-01-24 21:41:2787 heap_node *p, *prev;
88 const size_t nelems = alloc_size ( len );
89 mutexor mtx ( &heap_mutex );
90
91 if ( NULL == freelist )
92 init_heap ();
93
94// Walk the free list, looking for a "big enough" chunk
95 for (p = freelist, prev = 0;
96 p && p != list_end; prev = p, p = node_from_offset ( p->next_node)) {
97
98 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
99 heap_node *q;
Saleem Abdulrasool8d5ab872015-06-03 17:25:35100
101 p->len = static_cast<heap_size>(p->len - nelems);
Howard Hinnant73ab1862012-01-24 21:41:27102 q = p + p->len;
103 q->next_node = 0;
Howard Hinnantd121eb22012-03-08 20:16:45104 q->len = static_cast<heap_size>(nelems);
Howard Hinnant73ab1862012-01-24 21:41:27105 return (void *) (q + 1);
106 }
107
108 if (p->len == nelems) { // exact size match
109 if (prev == 0)
110 freelist = node_from_offset(p->next_node);
111 else
112 prev->next_node = p->next_node;
113 p->next_node = 0;
114 return (void *) (p + 1);
115 }
116 }
117 return NULL; // couldn't find a spot big enough
118}
119
120// Return the start of the next block
Howard Hinnant47cb85482012-01-30 16:07:00121heap_node *after ( struct heap_node *p ) { return p + p->len; }
Howard Hinnant73ab1862012-01-24 21:41:27122
Howard Hinnant47cb85482012-01-30 16:07:00123void fallback_free (void *ptr) {
Howard Hinnant73ab1862012-01-24 21:41:27124 struct heap_node *cp = ((struct heap_node *) ptr) - 1; // retrieve the chunk
125 struct heap_node *p, *prev;
126
127 mutexor mtx ( &heap_mutex );
128
129#ifdef DEBUG_FALLBACK_MALLOC
130 std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl;
131#endif
132
133 for (p = freelist, prev = 0;
134 p && p != list_end; prev = p, p = node_from_offset (p->next_node)) {
135#ifdef DEBUG_FALLBACK_MALLOC
136 std::cout << " p, cp, after (p), after(cp) "
137 << offset_from_node ( p ) << ' '
138 << offset_from_node ( cp ) << ' '
139 << offset_from_node ( after ( p )) << ' '
140 << offset_from_node ( after ( cp )) << std::endl;
141#endif
142 if ( after ( p ) == cp ) {
143#ifdef DEBUG_FALLBACK_MALLOC
144 std::cout << " Appending onto chunk at " << offset_from_node ( p ) << std::endl;
145#endif
Saleem Abdulrasool8d5ab872015-06-03 17:25:35146 p->len = static_cast<heap_size>(p->len + cp->len); // make the free heap_node larger
Howard Hinnant73ab1862012-01-24 21:41:27147 return;
148 }
149 else if ( after ( cp ) == p ) { // there's a free heap_node right after
150#ifdef DEBUG_FALLBACK_MALLOC
151 std::cout << " Appending free chunk at " << offset_from_node ( p ) << std::endl;
152#endif
Saleem Abdulrasool8d5ab872015-06-03 17:25:35153 cp->len = static_cast<heap_size>(cp->len + p->len);
Howard Hinnant73ab1862012-01-24 21:41:27154 if ( prev == 0 ) {
155 freelist = cp;
156 cp->next_node = p->next_node;
157 }
158 else
159 prev->next_node = offset_from_node(cp);
160 return;
161 }
162 }
163// Nothing to merge with, add it to the start of the free list
164#ifdef DEBUG_FALLBACK_MALLOC
165 std::cout << " Making new free list entry " << offset_from_node ( cp ) << std::endl;
166#endif
167 cp->next_node = offset_from_node ( freelist );
168 freelist = cp;
169}
170
171#ifdef INSTRUMENT_FALLBACK_MALLOC
172size_t print_free_list () {
173 struct heap_node *p, *prev;
174 heap_size total_free = 0;
175 if ( NULL == freelist )
176 init_heap ();
177
178 for (p = freelist, prev = 0;
179 p && p != list_end; prev = p, p = node_from_offset (p->next_node)) {
180 std::cout << ( prev == 0 ? "" : " ") << "Offset: " << offset_from_node ( p )
181 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
182 total_free += p->len;
183 }
184 std::cout << "Total Free space: " << total_free << std::endl;
185 return total_free;
186 }
187#endif
188} // end unnamed namespace