Index: head/contrib/libcxxrt/atomic.h =================================================================== --- head/contrib/libcxxrt/atomic.h (revision 276416) +++ head/contrib/libcxxrt/atomic.h (revision 276417) @@ -1,30 +1,30 @@ #ifndef __has_builtin #define __has_builtin(x) 0 #endif #ifndef __has_feature #define __has_feature(x) 0 #endif /** * Swap macro that enforces a happens-before relationship with a corresponding * ATOMIC_LOAD. */ #if __has_builtin(__c11_atomic_exchange) #define ATOMIC_SWAP(addr, val)\ - __c11_atomic_exchange((_Atomic(__typeof__(val))*)addr, val, __ATOMIC_ACQ_REL) + __c11_atomic_exchange(reinterpret_cast<_Atomic(__typeof__(val))*>(addr), val, __ATOMIC_ACQ_REL) #elif __has_builtin(__sync_swap) #define ATOMIC_SWAP(addr, val)\ __sync_swap(addr, val) #else #define ATOMIC_SWAP(addr, val)\ __sync_lock_test_and_set(addr, val) #endif #if __has_builtin(__c11_atomic_load) #define ATOMIC_LOAD(addr)\ - __c11_atomic_load((_Atomic(__typeof__(*addr))*)addr, __ATOMIC_ACQUIRE) + __c11_atomic_load(reinterpret_cast<_Atomic(__typeof__(*addr))*>(addr), __ATOMIC_ACQUIRE) #else #define ATOMIC_LOAD(addr)\ (__sync_synchronize(), *addr) #endif Index: head/contrib/libcxxrt/cxxabi.h =================================================================== --- head/contrib/libcxxrt/cxxabi.h (revision 276416) +++ head/contrib/libcxxrt/cxxabi.h (revision 276417) @@ -1,244 +1,244 @@ /* * Copyright 2012 David Chisnall. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __CXXABI_H_ #define __CXXABI_H_ #include #include #include "unwind.h" namespace std { class type_info; } /* * The cxxabi.h header provides a set of public definitions for types and * functions defined by the Itanium C++ ABI specification. For reference, see * the ABI specification here: * * http://sourcery.mentor.com/public/cxx-abi/abi.html * * All deviations from this specification, unless otherwise noted, are * accidental. */ #ifdef __cplusplus namespace __cxxabiv1 { extern "C" { #endif /** * Function type to call when an unexpected exception is encountered. */ typedef void (*unexpected_handler)(); /** * Function type to call when an unrecoverable condition is encountered. */ typedef void (*terminate_handler)(); /** * Structure used as a header on thrown exceptions. This is the same layout as * defined by the Itanium ABI spec, so should be interoperable with any other * implementation of this spec, such as GNU libsupc++. * * This structure is allocated when an exception is thrown. Unwinding happens * in two phases, the first looks for a handler and the second installs the * context. This structure stores a cache of the handler location between * phase 1 and phase 2. Unfortunately, cleanup information is not cached, so * must be looked up in both phases. This happens for two reasons. The first * is that we don't know how many frames containing cleanups there will be, and * we should avoid dynamic allocation during unwinding (the exception may be * reporting that we've run out of memory). The second is that finding * cleanups is much cheaper than finding handlers, because we don't have to * look at the type table at all. * * Note: Several fields of this structure have not-very-informative names. * These are taken from the ABI spec and have not been changed to make it * easier for people referring to to the spec while reading this code. */ struct __cxa_exception { #if __LP64__ /** * Reference count. Used to support the C++11 exception_ptr class. This * is prepended to the structure in 64-bit mode and squeezed in to the * padding left before the 64-bit aligned _Unwind_Exception at the end in * 32-bit mode. * * Note that it is safe to extend this structure at the beginning, rather * than the end, because the public API for creating it returns the address * of the end (where the exception object can be stored). */ uintptr_t referenceCount; #endif /** Type info for the thrown object. */ std::type_info *exceptionType; /** Destructor for the object, if one exists. */ void (*exceptionDestructor) (void *); /** Handler called when an exception specification is violated. */ unexpected_handler unexpectedHandler; /** Hander called to terminate. */ terminate_handler terminateHandler; /** * Next exception in the list. If an exception is thrown inside a catch * block and caught in a nested catch, this points to the exception that * will be handled after the inner catch block completes. */ __cxa_exception *nextException; /** * The number of handlers that currently have references to this * exception. The top (non-sign) bit of this is used as a flag to indicate * that the exception is being rethrown, so should not be deleted when its * handler count reaches 0 (which it doesn't with the top bit set). */ int handlerCount; -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) /** * The ARM EH ABI requires the unwind library to keep track of exceptions * during cleanups. These support nesting, so we need to keep a list of * them. */ _Unwind_Exception *nextCleanup; /** * The number of cleanups that are currently being run on this exception. */ int cleanupCount; #endif /** * The selector value to be returned when installing the catch handler. * Used at the call site to determine which catch() block should execute. * This is found in phase 1 of unwinding then installed in phase 2. */ int handlerSwitchValue; /** * The action record for the catch. This is cached during phase 1 * unwinding. */ const char *actionRecord; /** * Pointer to the language-specific data area (LSDA) for the handler * frame. This is unused in this implementation, but set for ABI * compatibility in case we want to mix code in very weird ways. */ const char *languageSpecificData; /** The cached landing pad for the catch handler.*/ void *catchTemp; /** * The pointer that will be returned as the pointer to the object. When * throwing a class and catching a virtual superclass (for example), we * need to adjust the thrown pointer to make it all work correctly. */ void *adjustedPtr; #if !__LP64__ /** * Reference count. Used to support the C++11 exception_ptr class. This * is prepended to the structure in 64-bit mode and squeezed in to the * padding left before the 64-bit aligned _Unwind_Exception at the end in * 32-bit mode. * * Note that it is safe to extend this structure at the beginning, rather * than the end, because the public API for creating it returns the address * of the end (where the exception object can be stored) */ uintptr_t referenceCount; #endif /** The language-agnostic part of the exception header. */ _Unwind_Exception unwindHeader; }; /** * ABI-specified globals structure. Returned by the __cxa_get_globals() * function and its fast variant. This is a per-thread structure - every * thread will have one lazily allocated. * * This structure is defined by the ABI, so may be used outside of this * library. */ struct __cxa_eh_globals { /** * A linked list of exceptions that are currently caught. There may be * several of these in nested catch() blocks. */ __cxa_exception *caughtExceptions; /** * The number of uncaught exceptions. */ unsigned int uncaughtExceptions; }; /** * ABI function returning the __cxa_eh_globals structure. */ __cxa_eh_globals *__cxa_get_globals(void); /** * Version of __cxa_get_globals() assuming that __cxa_get_globals() has already * been called at least once by this thread. */ __cxa_eh_globals *__cxa_get_globals_fast(void); std::type_info * __cxa_current_exception_type(); /** * Throws an exception returned by __cxa_current_primary_exception(). This * exception may have been caught in another thread. */ void __cxa_rethrow_primary_exception(void* thrown_exception); /** * Returns the current exception in a form that can be stored in an * exception_ptr object and then rethrown by a call to * __cxa_rethrow_primary_exception(). */ void *__cxa_current_primary_exception(void); /** * Increments the reference count of an exception. Called when an * exception_ptr is copied. */ void __cxa_increment_exception_refcount(void* thrown_exception); /** * Decrements the reference count of an exception. Called when an * exception_ptr is deleted. */ void __cxa_decrement_exception_refcount(void* thrown_exception); /** * Demangles a C++ symbol or type name. The buffer, if non-NULL, must be * allocated with malloc() and must be *n bytes or more long. This function * may call realloc() on the value pointed to by buf, and will return the * length of the string via *n. * * The value pointed to by status is set to one of the following: * * 0: success * -1: memory allocation failure * -2: invalid mangled name * -3: invalid arguments */ char* __cxa_demangle(const char* mangled_name, char* buf, size_t* n, int* status); #ifdef __cplusplus } // extern "C" } // namespace namespace abi = __cxxabiv1; #endif /* __cplusplus */ #endif /* __CXXABI_H_ */ Index: head/contrib/libcxxrt/dwarf_eh.h =================================================================== --- head/contrib/libcxxrt/dwarf_eh.h (revision 276416) +++ head/contrib/libcxxrt/dwarf_eh.h (revision 276417) @@ -1,479 +1,479 @@ /* * Copyright 2010-2011 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * dwarf_eh.h - Defines some helper functions for parsing DWARF exception * handling tables. * * This file contains various helper functions that are independent of the * language-specific code. It can be used in any personality function for the * Itanium ABI. */ #include // TODO: Factor out Itanium / ARM differences. We probably want an itanium.h // and arm.h that can be included by this file depending on the target ABI. // _GNU_SOURCE must be defined for unwind.h to expose some of the functions // that we want. If it isn't, then we define it and undefine it to make sure // that it doesn't impact the rest of the program. #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 # include "unwind.h" # undef _GNU_SOURCE #else # include "unwind.h" #endif #include /// Type used for pointers into DWARF data typedef unsigned char *dw_eh_ptr_t; // Flag indicating a signed quantity #define DW_EH_PE_signed 0x08 /// DWARF data encoding types. enum dwarf_data_encoding { /// Absolute pointer value DW_EH_PE_absptr = 0x00, /// Unsigned, little-endian, base 128-encoded (variable length). DW_EH_PE_uleb128 = 0x01, /// Unsigned 16-bit integer. DW_EH_PE_udata2 = 0x02, /// Unsigned 32-bit integer. DW_EH_PE_udata4 = 0x03, /// Unsigned 64-bit integer. DW_EH_PE_udata8 = 0x04, /// Signed, little-endian, base 128-encoded (variable length) DW_EH_PE_sleb128 = DW_EH_PE_uleb128 | DW_EH_PE_signed, /// Signed 16-bit integer. DW_EH_PE_sdata2 = DW_EH_PE_udata2 | DW_EH_PE_signed, /// Signed 32-bit integer. DW_EH_PE_sdata4 = DW_EH_PE_udata4 | DW_EH_PE_signed, /// Signed 32-bit integer. DW_EH_PE_sdata8 = DW_EH_PE_udata8 | DW_EH_PE_signed }; /** * Returns the encoding for a DWARF EH table entry. The encoding is stored in * the low four of an octet. The high four bits store the addressing mode. */ static inline enum dwarf_data_encoding get_encoding(unsigned char x) { - return (enum dwarf_data_encoding)(x & 0xf); + return static_cast(x & 0xf); } /** * DWARF addressing mode constants. When reading a pointer value from a DWARF * exception table, you must know how it is stored and what the addressing mode * is. The low four bits tell you the encoding, allowing you to decode a * number. The high four bits tell you the addressing mode, allowing you to * turn that number into an address in memory. */ enum dwarf_data_relative { /// Value is omitted DW_EH_PE_omit = 0xff, /// Value relative to program counter DW_EH_PE_pcrel = 0x10, /// Value relative to the text segment DW_EH_PE_textrel = 0x20, /// Value relative to the data segment DW_EH_PE_datarel = 0x30, /// Value relative to the start of the function DW_EH_PE_funcrel = 0x40, /// Aligned pointer (Not supported yet - are they actually used?) DW_EH_PE_aligned = 0x50, /// Pointer points to address of real value DW_EH_PE_indirect = 0x80 }; /** * Returns the addressing mode component of this encoding. */ static inline enum dwarf_data_relative get_base(unsigned char x) { - return (enum dwarf_data_relative)(x & 0x70); + return static_cast(x & 0x70); } /** * Returns whether an encoding represents an indirect address. */ static int is_indirect(unsigned char x) { return ((x & DW_EH_PE_indirect) == DW_EH_PE_indirect); } /** * Returns the size of a fixed-size encoding. This function will abort if * called with a value that is not a fixed-size encoding. */ static inline int dwarf_size_of_fixed_size_field(unsigned char type) { switch (get_encoding(type)) { default: abort(); case DW_EH_PE_sdata2: case DW_EH_PE_udata2: return 2; case DW_EH_PE_sdata4: case DW_EH_PE_udata4: return 4; case DW_EH_PE_sdata8: case DW_EH_PE_udata8: return 8; case DW_EH_PE_absptr: return sizeof(void*); } } /** * Read an unsigned, little-endian, base-128, DWARF value. Updates *data to * point to the end of the value. Stores the number of bits read in the value * pointed to by b, allowing you to determine the value of the highest bit, and * therefore the sign of a signed value. * * This function is not intended to be called directly. Use read_sleb128() or * read_uleb128() for reading signed and unsigned versions, respectively. */ static uint64_t read_leb128(dw_eh_ptr_t *data, int *b) { uint64_t uleb = 0; unsigned int bit = 0; unsigned char digit = 0; // We have to read at least one octet, and keep reading until we get to one // with the high bit unset do { // This check is a bit too strict - we should also check the highest // bit of the digit. assert(bit < sizeof(uint64_t) * 8); // Get the base 128 digit digit = (**data) & 0x7f; // Add it to the current value uleb += digit << bit; // Increase the shift value bit += 7; // Proceed to the next octet (*data)++; // Terminate when we reach a value that does not have the high bit set // (i.e. which was not modified when we mask it with 0x7f) } while ((*(*data - 1)) != digit); *b = bit; return uleb; } /** * Reads an unsigned little-endian base-128 value starting at the address * pointed to by *data. Updates *data to point to the next byte after the end * of the variable-length value. */ static int64_t read_uleb128(dw_eh_ptr_t *data) { int b; return read_leb128(data, &b); } /** * Reads a signed little-endian base-128 value starting at the address pointed * to by *data. Updates *data to point to the next byte after the end of the * variable-length value. */ static int64_t read_sleb128(dw_eh_ptr_t *data) { int bits; // Read as if it's signed uint64_t uleb = read_leb128(data, &bits); // If the most significant bit read is 1, then we need to sign extend it if ((uleb >> (bits-1)) == 1) { // Sign extend by setting all bits in front of it to 1 - uleb |= ((int64_t)-1) << bits; + uleb |= static_cast(-1) << bits; } - return (int64_t)uleb; + return static_cast(uleb); } /** * Reads a value using the specified encoding from the address pointed to by * *data. Updates the value of *data to point to the next byte after the end * of the data. */ static uint64_t read_value(char encoding, dw_eh_ptr_t *data) { enum dwarf_data_encoding type = get_encoding(encoding); uint64_t v; switch (type) { // Read fixed-length types #define READ(dwarf, type) \ case dwarf:\ - v = (uint64_t)(*(type*)(*data));\ + v = static_cast(*reinterpret_cast(*data));\ *data += sizeof(type);\ break; READ(DW_EH_PE_udata2, uint16_t) READ(DW_EH_PE_udata4, uint32_t) READ(DW_EH_PE_udata8, uint64_t) READ(DW_EH_PE_sdata2, int16_t) READ(DW_EH_PE_sdata4, int32_t) READ(DW_EH_PE_sdata8, int64_t) READ(DW_EH_PE_absptr, intptr_t) #undef READ // Read variable-length types case DW_EH_PE_sleb128: v = read_sleb128(data); break; case DW_EH_PE_uleb128: v = read_uleb128(data); break; default: abort(); } return v; } /** * Resolves an indirect value. This expects an unwind context, an encoding, a * decoded value, and the start of the region as arguments. The returned value * is a pointer to the address identified by the encoded value. * * If the encoding does not specify an indirect value, then this returns v. */ static uint64_t resolve_indirect_value(_Unwind_Context *c, unsigned char encoding, int64_t v, dw_eh_ptr_t start) { switch (get_base(encoding)) { case DW_EH_PE_pcrel: - v += (uint64_t)start; + v += reinterpret_cast(start); break; case DW_EH_PE_textrel: - v += (uint64_t)_Unwind_GetTextRelBase(c); + v += static_cast(static_cast(_Unwind_GetTextRelBase(c))); break; case DW_EH_PE_datarel: - v += (uint64_t)_Unwind_GetDataRelBase(c); + v += static_cast(static_cast(_Unwind_GetDataRelBase(c))); break; case DW_EH_PE_funcrel: - v += (uint64_t)_Unwind_GetRegionStart(c); + v += static_cast(static_cast(_Unwind_GetRegionStart(c))); default: break; } // If this is an indirect value, then it is really the address of the real // value // TODO: Check whether this should really always be a pointer - it seems to // be a GCC extensions, so not properly documented... if (is_indirect(encoding)) { - v = (uint64_t)(uintptr_t)*(void**)v; + v = static_cast(reinterpret_cast(*reinterpret_cast(v))); } return v; } /** * Reads an encoding and a value, updating *data to point to the next byte. */ static inline void read_value_with_encoding(_Unwind_Context *context, dw_eh_ptr_t *data, uint64_t *out) { dw_eh_ptr_t start = *data; unsigned char encoding = *((*data)++); // If this value is omitted, skip it and don't touch the output value if (encoding == DW_EH_PE_omit) { return; } *out = read_value(encoding, data); *out = resolve_indirect_value(context, encoding, *out, start); } /** * Structure storing a decoded language-specific data area. Use parse_lsda() * to generate an instance of this structure from the address returned by the * generic unwind library. * * You should not need to inspect the fields of this structure directly if you * are just using this header. The structure stores the locations of the * various tables used for unwinding exceptions and is used by the functions * for reading values from these tables. */ struct dwarf_eh_lsda { /// The start of the region. This is a cache of the value returned by /// _Unwind_GetRegionStart(). dw_eh_ptr_t region_start; /// The start of the landing pads table. dw_eh_ptr_t landing_pads; /// The start of the type table. dw_eh_ptr_t type_table; /// The encoding used for entries in the type tables. unsigned char type_table_encoding; /// The location of the call-site table. dw_eh_ptr_t call_site_table; /// The location of the action table. dw_eh_ptr_t action_table; /// The encoding used for entries in the call-site table. unsigned char callsite_encoding; }; /** * Parse the header on the language-specific data area and return a structure * containing the addresses and encodings of the various tables. */ static inline struct dwarf_eh_lsda parse_lsda(_Unwind_Context *context, unsigned char *data) { struct dwarf_eh_lsda lsda; - lsda.region_start = (dw_eh_ptr_t)(uintptr_t)_Unwind_GetRegionStart(context); + lsda.region_start = reinterpret_cast(_Unwind_GetRegionStart(context)); // If the landing pads are relative to anything other than the start of // this region, find out where. This is @LPStart in the spec, although the // encoding that GCC uses does not quite match the spec. - uint64_t v = (uint64_t)(uintptr_t)lsda.region_start; + uint64_t v = static_cast(reinterpret_cast(lsda.region_start)); read_value_with_encoding(context, &data, &v); - lsda.landing_pads = (dw_eh_ptr_t)(uintptr_t)v; + lsda.landing_pads = reinterpret_cast(static_cast(v)); // If there is a type table, find out where it is. This is @TTBase in the // spec. Note: we find whether there is a type table pointer by checking // whether the leading byte is DW_EH_PE_omit (0xff), which is not what the // spec says, but does seem to be how G++ indicates this. lsda.type_table = 0; lsda.type_table_encoding = *data++; if (lsda.type_table_encoding != DW_EH_PE_omit) { v = read_uleb128(&data); dw_eh_ptr_t type_table = data; type_table += v; lsda.type_table = type_table; //lsda.type_table = (uintptr_t*)(data + v); } -#if __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) lsda.type_table_encoding = (DW_EH_PE_pcrel | DW_EH_PE_indirect); #endif - lsda.callsite_encoding = (enum dwarf_data_encoding)(*(data++)); + lsda.callsite_encoding = static_cast(*(data++)); // Action table is immediately after the call site table lsda.action_table = data; - uintptr_t callsite_size = (uintptr_t)read_uleb128(&data); + uintptr_t callsite_size = static_cast(read_uleb128(&data)); lsda.action_table = data + callsite_size; // Call site table is immediately after the header - lsda.call_site_table = (dw_eh_ptr_t)data; + lsda.call_site_table = static_cast(data); return lsda; } /** * Structure representing an action to be performed while unwinding. This * contains the address that should be unwound to and the action record that * provoked this action. */ struct dwarf_eh_action { /** * The address that this action directs should be the new program counter * value after unwinding. */ dw_eh_ptr_t landing_pad; /// The address of the action record. dw_eh_ptr_t action_record; }; /** * Look up the landing pad that corresponds to the current invoke. * Returns true if record exists. The context is provided by the generic * unwind library and the lsda should be the result of a call to parse_lsda(). * * The action record is returned via the result parameter. */ static bool dwarf_eh_find_callsite(struct _Unwind_Context *context, struct dwarf_eh_lsda *lsda, struct dwarf_eh_action *result) { result->action_record = 0; result->landing_pad = 0; // The current instruction pointer offset within the region uint64_t ip = _Unwind_GetIP(context) - _Unwind_GetRegionStart(context); - unsigned char *callsite_table = (unsigned char*)lsda->call_site_table; + unsigned char *callsite_table = static_cast(lsda->call_site_table); while (callsite_table <= lsda->action_table) { // Once again, the layout deviates from the spec. uint64_t call_site_start, call_site_size, landing_pad, action; call_site_start = read_value(lsda->callsite_encoding, &callsite_table); call_site_size = read_value(lsda->callsite_encoding, &callsite_table); // Call site entries are sorted, so if we find a call site that's after // the current instruction pointer then there is no action associated // with this call and we should unwind straight through this frame // without doing anything. if (call_site_start > ip) { break; } // Read the address of the landing pad and the action from the call // site table. landing_pad = read_value(lsda->callsite_encoding, &callsite_table); action = read_uleb128(&callsite_table); // We should not include the call_site_start (beginning of the region) // address in the ip range. For each call site: // // address1: call proc // address2: next instruction // // The call stack contains address2 and not address1, address1 can be // at the end of another EH region. if (call_site_start < ip && ip <= call_site_start + call_site_size) { if (action) { // Action records are 1-biased so both no-record and zeroth // record can be stored. result->action_record = lsda->action_table + action - 1; } // No landing pad means keep unwinding. if (landing_pad) { // Landing pad is the offset from the value in the header result->landing_pad = lsda->landing_pads + landing_pad; } return true; } } return false; } /// Defines an exception class from 8 bytes (endian independent) #define EXCEPTION_CLASS(a,b,c,d,e,f,g,h) \ - (((uint64_t)a << 56) +\ - ((uint64_t)b << 48) +\ - ((uint64_t)c << 40) +\ - ((uint64_t)d << 32) +\ - ((uint64_t)e << 24) +\ - ((uint64_t)f << 16) +\ - ((uint64_t)g << 8) +\ - ((uint64_t)h)) + ((static_cast(a) << 56) +\ + (static_cast(b) << 48) +\ + (static_cast(c) << 40) +\ + (static_cast(d) << 32) +\ + (static_cast(e) << 24) +\ + (static_cast(f) << 16) +\ + (static_cast(g) << 8) +\ + (static_cast(h))) #define GENERIC_EXCEPTION_CLASS(e,f,g,h) \ - ((uint32_t)e << 24) +\ - ((uint32_t)f << 16) +\ - ((uint32_t)g << 8) +\ - ((uint32_t)h) + (static_cast(e) << 24) +\ + (static_cast(f) << 16) +\ + (static_cast(g) << 8) +\ + (static_cast(h)) Index: head/contrib/libcxxrt/dynamic_cast.cc =================================================================== --- head/contrib/libcxxrt/dynamic_cast.cc (revision 276416) +++ head/contrib/libcxxrt/dynamic_cast.cc (revision 276417) @@ -1,210 +1,210 @@ /* * Copyright 2010-2011 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "typeinfo.h" #include using namespace ABI_NAMESPACE; /** * Vtable header. */ struct vtable_header { /** Offset of the leaf object. */ ptrdiff_t leaf_offset; /** Type of the object. */ const __class_type_info *type; }; /** * Simple macro that does pointer arithmetic in bytes but returns a value of * the same type as the original. */ -#define ADD_TO_PTR(x, off) (__typeof__(x))(((char*)x) + off) +#define ADD_TO_PTR(x, off) reinterpret_cast<__typeof__(x)>(reinterpret_cast(x) + off) bool std::type_info::__do_catch(std::type_info const *ex_type, void **exception_object, unsigned int outer) const { const type_info *type = this; if (type == ex_type) { return true; } if (const __class_type_info *cti = dynamic_cast(type)) { return ex_type->__do_upcast(cti, exception_object); } return false; } bool __pbase_type_info::__do_catch(std::type_info const *ex_type, void **exception_object, unsigned int outer) const { if (ex_type == this) { return true; } if (!ex_type->__is_pointer_p()) { // Can't catch a non-pointer type in a pointer catch return false; } if (!(outer & 1)) { // If the low bit is cleared on this means that we've gone // through a pointer that is not const qualified. return false; } // Clear the low bit on outer if we're not const qualified. if (!(__flags & __const_mask)) { outer &= ~1; } const __pbase_type_info *ptr_type = static_cast(ex_type); if (ptr_type->__flags & ~__flags) { // Handler pointer is less qualified return false; } // Special case for void* handler. if(*__pointee == typeid(void)) { return true; } return __pointee->__do_catch(ptr_type->__pointee, exception_object, outer); } void *__class_type_info::cast_to(void *obj, const struct __class_type_info *other) const { if (this == other) { return obj; } return 0; } void *__si_class_type_info::cast_to(void *obj, const struct __class_type_info *other) const { if (this == other) { return obj; } return __base_type->cast_to(obj, other); } bool __si_class_type_info::__do_upcast(const __class_type_info *target, void **thrown_object) const { if (this == target) { return true; } return __base_type->__do_upcast(target, thrown_object); } void *__vmi_class_type_info::cast_to(void *obj, const struct __class_type_info *other) const { if (__do_upcast(other, &obj)) { return obj; } return 0; } bool __vmi_class_type_info::__do_upcast(const __class_type_info *target, void **thrown_object) const { if (this == target) { return true; } for (unsigned int i=0 ; i<__base_count ; i++) { const __base_class_type_info *info = &__base_info[i]; ptrdiff_t offset = info->offset(); // If this is a virtual superclass, the offset is stored in the // object's vtable at the offset requested; 2.9.5.6.c: // // 'For a non-virtual base, this is the offset in the object of the // base subobject. For a virtual base, this is the offset in the // virtual table of the virtual base offset for the virtual base // referenced (negative).' void *obj = *thrown_object; if (info->isVirtual()) { // Object's vtable - ptrdiff_t *off = *(ptrdiff_t**)obj; + ptrdiff_t *off = *static_cast(obj); // Offset location in vtable off = ADD_TO_PTR(off, offset); offset = *off; } void *cast = ADD_TO_PTR(obj, offset); if (info->__base_type == target || (info->__base_type->__do_upcast(target, &cast))) { *thrown_object = cast; return true; } } return 0; } /** * ABI function used to implement the dynamic_cast<> operator. Some cases of * this operator are implemented entirely in the compiler (e.g. to void*). * This function implements the dynamic casts of the form dynamic_cast(v). * This will be translated to a call to this function with the value v as the * first argument. The type id of the static type of v is the second argument * and the type id of the destination type (T) is the third argument. * * The third argument is a hint about the compiler's guess at the correct * pointer offset. If this value is negative, then -1 indicates no hint, -2 * that src is not a public base of dst, and -3 that src is a multiple public * base type but never a virtual base type */ extern "C" void* __dynamic_cast(const void *sub, const __class_type_info *src, const __class_type_info *dst, ptrdiff_t src2dst_offset) { - char *vtable_location = *(char**)sub; + const char *vtable_location = *static_cast(sub); const vtable_header *header = - (const vtable_header*)(vtable_location - sizeof(vtable_header)); - void *leaf = ADD_TO_PTR((void*)sub, header->leaf_offset); + reinterpret_cast(vtable_location - sizeof(vtable_header)); + void *leaf = ADD_TO_PTR(const_cast(sub), header->leaf_offset); return header->type->cast_to(leaf, dst); } Index: head/contrib/libcxxrt/exception.cc =================================================================== --- head/contrib/libcxxrt/exception.cc (revision 276416) +++ head/contrib/libcxxrt/exception.cc (revision 276417) @@ -1,1520 +1,1535 @@ /* * Copyright 2010-2011 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include "typeinfo.h" #include "dwarf_eh.h" #include "atomic.h" #include "cxxabi.h" #pragma weak pthread_key_create #pragma weak pthread_setspecific #pragma weak pthread_getspecific #pragma weak pthread_once #ifdef LIBCXXRT_WEAK_LOCKS #pragma weak pthread_mutex_lock #define pthread_mutex_lock(mtx) do {\ if (pthread_mutex_lock) pthread_mutex_lock(mtx);\ } while(0) #pragma weak pthread_mutex_unlock #define pthread_mutex_unlock(mtx) do {\ if (pthread_mutex_unlock) pthread_mutex_unlock(mtx);\ } while(0) #pragma weak pthread_cond_signal #define pthread_cond_signal(cv) do {\ if (pthread_cond_signal) pthread_cond_signal(cv);\ } while(0) #pragma weak pthread_cond_wait #define pthread_cond_wait(cv, mtx) do {\ if (pthread_cond_wait) pthread_cond_wait(cv, mtx);\ } while(0) #endif using namespace ABI_NAMESPACE; /** * Saves the result of the landing pad that we have found. For ARM, this is * stored in the generic unwind structure, while on other platforms it is * stored in the C++ exception. */ static void saveLandingPad(struct _Unwind_Context *context, struct _Unwind_Exception *ucb, struct __cxa_exception *ex, int selector, dw_eh_ptr_t landingPad) { -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) // On ARM, we store the saved exception in the generic part of the structure ucb->barrier_cache.sp = _Unwind_GetGR(context, 13); - ucb->barrier_cache.bitpattern[1] = (uint32_t)selector; - ucb->barrier_cache.bitpattern[3] = (uint32_t)landingPad; + ucb->barrier_cache.bitpattern[1] = static_cast(selector); + ucb->barrier_cache.bitpattern[3] = reinterpret_cast(landingPad); #endif // Cache the results for the phase 2 unwind, if we found a handler // and this is not a foreign exception. if (ex) { ex->handlerSwitchValue = selector; ex->catchTemp = landingPad; } } /** * Loads the saved landing pad. Returns 1 on success, 0 on failure. */ static int loadLandingPad(struct _Unwind_Context *context, struct _Unwind_Exception *ucb, struct __cxa_exception *ex, unsigned long *selector, dw_eh_ptr_t *landingPad) { -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) *selector = ucb->barrier_cache.bitpattern[1]; - *landingPad = (dw_eh_ptr_t)ucb->barrier_cache.bitpattern[3]; + *landingPad = reinterpret_cast(ucb->barrier_cache.bitpattern[3]); return 1; #else if (ex) { *selector = ex->handlerSwitchValue; - *landingPad = (dw_eh_ptr_t)ex->catchTemp; + *landingPad = reinterpret_cast(ex->catchTemp); return 0; } return 0; #endif } static inline _Unwind_Reason_Code continueUnwinding(struct _Unwind_Exception *ex, struct _Unwind_Context *context) { -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) if (__gnu_unwind_frame(ex, context) != _URC_OK) { return _URC_FAILURE; } #endif return _URC_CONTINUE_UNWIND; } extern "C" void __cxa_free_exception(void *thrown_exception); extern "C" void __cxa_free_dependent_exception(void *thrown_exception); extern "C" void* __dynamic_cast(const void *sub, const __class_type_info *src, const __class_type_info *dst, ptrdiff_t src2dst_offset); /** * The type of a handler that has been found. */ typedef enum { /** No handler. */ handler_none, /** * A cleanup - the exception will propagate through this frame, but code * must be run when this happens. */ handler_cleanup, /** * A catch statement. The exception will not propagate past this frame * (without an explicit rethrow). */ handler_catch } handler_type; /** * Per-thread info required by the runtime. We store a single structure * pointer in thread-local storage, because this tends to be a scarce resource * and it's impolite to steal all of it and not leave any for the rest of the * program. * * Instances of this structure are allocated lazily - at most one per thread - * and are destroyed on thread termination. */ struct __cxa_thread_info { /** The termination handler for this thread. */ terminate_handler terminateHandler; /** The unexpected exception handler for this thread. */ unexpected_handler unexpectedHandler; /** * The number of emergency buffers held by this thread. This is 0 in * normal operation - the emergency buffers are only used when malloc() * fails to return memory for allocating an exception. Threads are not * permitted to hold more than 4 emergency buffers (as per recommendation * in ABI spec [3.3.1]). */ int emergencyBuffersHeld; /** * The exception currently running in a cleanup. */ _Unwind_Exception *currentCleanup; /** * Our state with respect to foreign exceptions. Usually none, set to * caught if we have just caught an exception and rethrown if we are * rethrowing it. */ enum { none, caught, rethrown } foreign_exception_state; /** * The public part of this structure, accessible from outside of this * module. */ __cxa_eh_globals globals; }; /** * Dependent exception. This */ struct __cxa_dependent_exception { #if __LP64__ void *primaryException; #endif std::type_info *exceptionType; void (*exceptionDestructor) (void *); unexpected_handler unexpectedHandler; terminate_handler terminateHandler; __cxa_exception *nextException; int handlerCount; -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) _Unwind_Exception *nextCleanup; int cleanupCount; #endif int handlerSwitchValue; const char *actionRecord; const char *languageSpecificData; void *catchTemp; void *adjustedPtr; #if !__LP64__ void *primaryException; #endif _Unwind_Exception unwindHeader; }; namespace std { void unexpected(); class exception { public: virtual ~exception() throw(); virtual const char* what() const throw(); }; } /** * Class of exceptions to distinguish between this and other exception types. * * The first four characters are the vendor ID. Currently, we use GNUC, * because we aim for ABI-compatibility with the GNU implementation, and * various checks may test for equality of the class, which is incorrect. */ static const uint64_t exception_class = EXCEPTION_CLASS('G', 'N', 'U', 'C', 'C', '+', '+', '\0'); /** * Class used for dependent exceptions. */ static const uint64_t dependent_exception_class = EXCEPTION_CLASS('G', 'N', 'U', 'C', 'C', '+', '+', '\x01'); /** * The low four bytes of the exception class, indicating that we conform to the * Itanium C++ ABI. This is currently unused, but should be used in the future * if we change our exception class, to allow this library and libsupc++ to be * linked to the same executable and both to interoperate. */ static const uint32_t abi_exception_class = GENERIC_EXCEPTION_CLASS('C', '+', '+', '\0'); static bool isCXXException(uint64_t cls) { return (cls == exception_class) || (cls == dependent_exception_class); } static bool isDependentException(uint64_t cls) { return cls == dependent_exception_class; } static __cxa_exception *exceptionFromPointer(void *ex) { - return (__cxa_exception*)((char*)ex - + return reinterpret_cast<__cxa_exception*>(static_cast(ex) - offsetof(struct __cxa_exception, unwindHeader)); } static __cxa_exception *realExceptionFromException(__cxa_exception *ex) { if (!isDependentException(ex->unwindHeader.exception_class)) { return ex; } - return ((__cxa_exception*)(((__cxa_dependent_exception*)ex)->primaryException))-1; + return reinterpret_cast<__cxa_exception*>((reinterpret_cast<__cxa_dependent_exception*>(ex))->primaryException)-1; } namespace std { // Forward declaration of standard library terminate() function used to // abort execution. void terminate(void); } using namespace ABI_NAMESPACE; /** The global termination handler. */ static terminate_handler terminateHandler = abort; /** The global unexpected exception handler. */ static unexpected_handler unexpectedHandler = std::terminate; /** Key used for thread-local data. */ static pthread_key_t eh_key; /** * Cleanup function, allowing foreign exception handlers to correctly destroy * this exception if they catch it. */ static void exception_cleanup(_Unwind_Reason_Code reason, struct _Unwind_Exception *ex) { - __cxa_free_exception((void*)ex); + __cxa_free_exception(static_cast(ex)); } static void dependent_exception_cleanup(_Unwind_Reason_Code reason, struct _Unwind_Exception *ex) { - __cxa_free_dependent_exception((void*)ex); + __cxa_free_dependent_exception(static_cast(ex)); } /** * Recursively walk a list of exceptions and delete them all in post-order. */ static void free_exception_list(__cxa_exception *ex) { if (0 != ex->nextException) { free_exception_list(ex->nextException); } // __cxa_free_exception() expects to be passed the thrown object, which // immediately follows the exception, not the exception itself __cxa_free_exception(ex+1); } /** * Cleanup function called when a thread exists to make certain that all of the * per-thread data is deleted. */ static void thread_cleanup(void* thread_info) { - __cxa_thread_info *info = (__cxa_thread_info*)thread_info; + __cxa_thread_info *info = static_cast<__cxa_thread_info*>(thread_info); if (info->globals.caughtExceptions) { // If this is a foreign exception, ask it to clean itself up. if (info->foreign_exception_state != __cxa_thread_info::none) { - _Unwind_Exception *e = (_Unwind_Exception*)info->globals.caughtExceptions; + _Unwind_Exception *e = reinterpret_cast<_Unwind_Exception*>(info->globals.caughtExceptions); e->exception_cleanup(_URC_FOREIGN_EXCEPTION_CAUGHT, e); } else { free_exception_list(info->globals.caughtExceptions); } } free(thread_info); } /** * Once control used to protect the key creation. */ static pthread_once_t once_control = PTHREAD_ONCE_INIT; /** * We may not be linked against a full pthread implementation. If we're not, * then we need to fake the thread-local storage by storing 'thread-local' * things in a global. */ static bool fakeTLS; /** * Thread-local storage for a single-threaded program. */ static __cxa_thread_info singleThreadInfo; /** * Initialise eh_key. */ static void init_key(void) { if ((0 == pthread_key_create) || (0 == pthread_setspecific) || (0 == pthread_getspecific)) { fakeTLS = true; return; } pthread_key_create(&eh_key, thread_cleanup); - pthread_setspecific(eh_key, (void*)0x42); - fakeTLS = (pthread_getspecific(eh_key) != (void*)0x42); + pthread_setspecific(eh_key, reinterpret_cast(0x42)); + fakeTLS = (pthread_getspecific(eh_key) != reinterpret_cast(0x42)); pthread_setspecific(eh_key, 0); } /** * Returns the thread info structure, creating it if it is not already created. */ static __cxa_thread_info *thread_info() { if ((0 == pthread_once) || pthread_once(&once_control, init_key)) { fakeTLS = true; } if (fakeTLS) { return &singleThreadInfo; } - __cxa_thread_info *info = (__cxa_thread_info*)pthread_getspecific(eh_key); + __cxa_thread_info *info = static_cast<__cxa_thread_info*>(pthread_getspecific(eh_key)); if (0 == info) { - info = (__cxa_thread_info*)calloc(1, sizeof(__cxa_thread_info)); + info = static_cast<__cxa_thread_info*>(calloc(1, sizeof(__cxa_thread_info))); pthread_setspecific(eh_key, info); } return info; } /** * Fast version of thread_info(). May fail if thread_info() is not called on * this thread at least once already. */ static __cxa_thread_info *thread_info_fast() { if (fakeTLS) { return &singleThreadInfo; } - return (__cxa_thread_info*)pthread_getspecific(eh_key); + return static_cast<__cxa_thread_info*>(pthread_getspecific(eh_key)); } /** * ABI function returning the __cxa_eh_globals structure. */ extern "C" __cxa_eh_globals *ABI_NAMESPACE::__cxa_get_globals(void) { return &(thread_info()->globals); } /** * Version of __cxa_get_globals() assuming that __cxa_get_globals() has already * been called at least once by this thread. */ extern "C" __cxa_eh_globals *ABI_NAMESPACE::__cxa_get_globals_fast(void) { return &(thread_info_fast()->globals); } /** * An emergency allocation reserved for when malloc fails. This is treated as * 16 buffers of 1KB each. */ static char emergency_buffer[16384]; /** * Flag indicating whether each buffer is allocated. */ static bool buffer_allocated[16]; /** * Lock used to protect emergency allocation. */ static pthread_mutex_t emergency_malloc_lock = PTHREAD_MUTEX_INITIALIZER; /** * Condition variable used to wait when two threads are both trying to use the * emergency malloc() buffer at once. */ static pthread_cond_t emergency_malloc_wait = PTHREAD_COND_INITIALIZER; /** * Allocates size bytes from the emergency allocation mechanism, if possible. * This function will fail if size is over 1KB or if this thread already has 4 * emergency buffers. If all emergency buffers are allocated, it will sleep * until one becomes available. */ static char *emergency_malloc(size_t size) { if (size > 1024) { return 0; } __cxa_thread_info *info = thread_info(); // Only 4 emergency buffers allowed per thread! if (info->emergencyBuffersHeld > 3) { return 0; } pthread_mutex_lock(&emergency_malloc_lock); int buffer = -1; while (buffer < 0) { // While we were sleeping on the lock, another thread might have free'd // enough memory for us to use, so try the allocation again - no point // using the emergency buffer if there is some real memory that we can // use... void *m = calloc(1, size); if (0 != m) { pthread_mutex_unlock(&emergency_malloc_lock); - return (char*)m; + return static_cast(m); } for (int i=0 ; i<16 ; i++) { if (!buffer_allocated[i]) { buffer = i; buffer_allocated[i] = true; break; } } // If there still isn't a buffer available, then sleep on the condition // variable. This will be signalled when another thread releases one // of the emergency buffers. if (buffer < 0) { pthread_cond_wait(&emergency_malloc_wait, &emergency_malloc_lock); } } pthread_mutex_unlock(&emergency_malloc_lock); info->emergencyBuffersHeld++; return emergency_buffer + (1024 * buffer); } /** * Frees a buffer returned by emergency_malloc(). * * Note: Neither this nor emergency_malloc() is particularly efficient. This * should not matter, because neither will be called in normal operation - they * are only used when the program runs out of memory, which should not happen * often. */ static void emergency_malloc_free(char *ptr) { int buffer = -1; // Find the buffer corresponding to this pointer. for (int i=0 ; i<16 ; i++) { - if (ptr == (void*)(emergency_buffer + (1024 * i))) + if (ptr == static_cast(emergency_buffer + (1024 * i))) { buffer = i; break; } } assert(buffer > 0 && "Trying to free something that is not an emergency buffer!"); // emergency_malloc() is expected to return 0-initialized data. We don't // zero the buffer when allocating it, because the static buffers will // begin life containing 0 values. - memset((void*)ptr, 0, 1024); + memset(ptr, 0, 1024); // Signal the condition variable to wake up any threads that are blocking // waiting for some space in the emergency buffer pthread_mutex_lock(&emergency_malloc_lock); // In theory, we don't need to do this with the lock held. In practice, // our array of bools will probably be updated using 32-bit or 64-bit // memory operations, so this update may clobber adjacent values. buffer_allocated[buffer] = false; pthread_cond_signal(&emergency_malloc_wait); pthread_mutex_unlock(&emergency_malloc_lock); } static char *alloc_or_die(size_t size) { - char *buffer = (char*)calloc(1, size); + char *buffer = static_cast(calloc(1, size)); // If calloc() doesn't want to give us any memory, try using an emergency // buffer. if (0 == buffer) { buffer = emergency_malloc(size); // This is only reached if the allocation is greater than 1KB, and // anyone throwing objects that big really should know better. if (0 == buffer) { fprintf(stderr, "Out of memory attempting to allocate exception\n"); std::terminate(); } } return buffer; } static void free_exception(char *e) { // If this allocation is within the address range of the emergency buffer, // don't call free() because it was not allocated with malloc() if ((e > emergency_buffer) && (e < (emergency_buffer + sizeof(emergency_buffer)))) { emergency_malloc_free(e); } else { free(e); } } /** * Allocates an exception structure. Returns a pointer to the space that can * be used to store an object of thrown_size bytes. This function will use an * emergency buffer if malloc() fails, and may block if there are no such * buffers available. */ extern "C" void *__cxa_allocate_exception(size_t thrown_size) { size_t size = thrown_size + sizeof(__cxa_exception); char *buffer = alloc_or_die(size); return buffer+sizeof(__cxa_exception); } extern "C" void *__cxa_allocate_dependent_exception(void) { size_t size = sizeof(__cxa_dependent_exception); char *buffer = alloc_or_die(size); return buffer+sizeof(__cxa_dependent_exception); } /** * __cxa_free_exception() is called when an exception was thrown in between * calling __cxa_allocate_exception() and actually throwing the exception. * This happens when the object's copy constructor throws an exception. * * In this implementation, it is also called by __cxa_end_catch() and during * thread cleanup. */ extern "C" void __cxa_free_exception(void *thrown_exception) { - __cxa_exception *ex = ((__cxa_exception*)thrown_exception) - 1; + __cxa_exception *ex = reinterpret_cast<__cxa_exception*>(thrown_exception) - 1; // Free the object that was thrown, calling its destructor if (0 != ex->exceptionDestructor) { try { ex->exceptionDestructor(thrown_exception); } catch(...) { // FIXME: Check that this is really what the spec says to do. std::terminate(); } } - free_exception((char*)ex); + free_exception(reinterpret_cast(ex)); } static void releaseException(__cxa_exception *exception) { if (isDependentException(exception->unwindHeader.exception_class)) { __cxa_free_dependent_exception(exception+1); return; } if (__sync_sub_and_fetch(&exception->referenceCount, 1) == 0) { // __cxa_free_exception() expects to be passed the thrown object, // which immediately follows the exception, not the exception // itself __cxa_free_exception(exception+1); } } void __cxa_free_dependent_exception(void *thrown_exception) { - __cxa_dependent_exception *ex = ((__cxa_dependent_exception*)thrown_exception) - 1; + __cxa_dependent_exception *ex = reinterpret_cast<__cxa_dependent_exception*>(thrown_exception) - 1; assert(isDependentException(ex->unwindHeader.exception_class)); if (ex->primaryException) { - releaseException(realExceptionFromException((__cxa_exception*)ex)); + releaseException(realExceptionFromException(reinterpret_cast<__cxa_exception*>(ex))); } - free_exception((char*)ex); + free_exception(reinterpret_cast(ex)); } /** * Callback function used with _Unwind_Backtrace(). * * Prints a stack trace. Used only for debugging help. * * Note: As of FreeBSD 8.1, dladd() still doesn't work properly, so this only * correctly prints function names from public, relocatable, symbols. */ static _Unwind_Reason_Code trace(struct _Unwind_Context *context, void *c) { Dl_info myinfo; int mylookup = - dladdr((void*)(uintptr_t)__cxa_current_exception_type, &myinfo); - void *ip = (void*)_Unwind_GetIP(context); + dladdr(reinterpret_cast(__cxa_current_exception_type), &myinfo); + void *ip = reinterpret_cast(_Unwind_GetIP(context)); Dl_info info; if (dladdr(ip, &info) != 0) { if (mylookup == 0 || strcmp(info.dli_fname, myinfo.dli_fname) != 0) { printf("%p:%s() in %s\n", ip, info.dli_sname, info.dli_fname); } } return _URC_CONTINUE_UNWIND; } /** * Report a failure that occurred when attempting to throw an exception. * * If the failure happened by falling off the end of the stack without finding * a handler, prints a back trace before aborting. */ +#if __GNUC__ > 3 && __GNUC_MINOR__ > 2 +extern "C" void *__cxa_begin_catch(void *e) throw(); +#else +extern "C" void *__cxa_begin_catch(void *e); +#endif static void report_failure(_Unwind_Reason_Code err, __cxa_exception *thrown_exception) { switch (err) { default: break; case _URC_FATAL_PHASE1_ERROR: fprintf(stderr, "Fatal error during phase 1 unwinding\n"); break; -#ifndef __arm__ +#if !defined(__arm__) || defined(__ARM_DWARF_EH__) case _URC_FATAL_PHASE2_ERROR: fprintf(stderr, "Fatal error during phase 2 unwinding\n"); break; #endif case _URC_END_OF_STACK: + __cxa_begin_catch (&(thrown_exception->unwindHeader)); + std::terminate(); fprintf(stderr, "Terminating due to uncaught exception %p", - (void*)thrown_exception); + static_cast(thrown_exception)); thrown_exception = realExceptionFromException(thrown_exception); static const __class_type_info *e_ti = static_cast(&typeid(std::exception)); const __class_type_info *throw_ti = dynamic_cast(thrown_exception->exceptionType); if (throw_ti) { std::exception *e = - (std::exception*)e_ti->cast_to((void*)(thrown_exception+1), - throw_ti); + static_cast(e_ti->cast_to(static_cast(thrown_exception+1), + throw_ti)); if (e) { fprintf(stderr, " '%s'", e->what()); } } size_t bufferSize = 128; - char *demangled = (char*)malloc(bufferSize); + char *demangled = static_cast(malloc(bufferSize)); const char *mangled = thrown_exception->exceptionType->name(); int status; demangled = __cxa_demangle(mangled, demangled, &bufferSize, &status); fprintf(stderr, " of type %s\n", - status == 0 ? (const char*)demangled : mangled); + status == 0 ? demangled : mangled); if (status == 0) { free(demangled); } // Print a back trace if no handler is found. // TODO: Make this optional #ifndef __arm__ _Unwind_Backtrace(trace, 0); #endif + + // Just abort. No need to call std::terminate for the second time + abort(); break; } std::terminate(); } static void throw_exception(__cxa_exception *ex) { __cxa_thread_info *info = thread_info(); ex->unexpectedHandler = info->unexpectedHandler; if (0 == ex->unexpectedHandler) { ex->unexpectedHandler = unexpectedHandler; } ex->terminateHandler = info->terminateHandler; if (0 == ex->terminateHandler) { ex->terminateHandler = terminateHandler; } info->globals.uncaughtExceptions++; _Unwind_Reason_Code err = _Unwind_RaiseException(&ex->unwindHeader); // The _Unwind_RaiseException() function should not return, it should // unwind the stack past this function. If it does return, then something // has gone wrong. report_failure(err, ex); } /** * ABI function for throwing an exception. Takes the object to be thrown (the * pointer returned by __cxa_allocate_exception()), the type info for the * pointee, and the destructor (if there is one) as arguments. */ extern "C" void __cxa_throw(void *thrown_exception, std::type_info *tinfo, void(*dest)(void*)) { - __cxa_exception *ex = ((__cxa_exception*)thrown_exception) - 1; + __cxa_exception *ex = reinterpret_cast<__cxa_exception*>(thrown_exception) - 1; ex->referenceCount = 1; ex->exceptionType = tinfo; ex->exceptionDestructor = dest; ex->unwindHeader.exception_class = exception_class; ex->unwindHeader.exception_cleanup = exception_cleanup; throw_exception(ex); } extern "C" void __cxa_rethrow_primary_exception(void* thrown_exception) { if (NULL == thrown_exception) { return; } __cxa_exception *original = exceptionFromPointer(thrown_exception); - __cxa_dependent_exception *ex = ((__cxa_dependent_exception*)__cxa_allocate_dependent_exception())-1; + __cxa_dependent_exception *ex = reinterpret_cast<__cxa_dependent_exception*>(__cxa_allocate_dependent_exception())-1; ex->primaryException = thrown_exception; __cxa_increment_exception_refcount(thrown_exception); ex->exceptionType = original->exceptionType; ex->unwindHeader.exception_class = dependent_exception_class; ex->unwindHeader.exception_cleanup = dependent_exception_cleanup; - throw_exception((__cxa_exception*)ex); + throw_exception(reinterpret_cast<__cxa_exception*>(ex)); } extern "C" void *__cxa_current_primary_exception(void) { __cxa_eh_globals* globals = __cxa_get_globals(); __cxa_exception *ex = globals->caughtExceptions; if (0 == ex) { return NULL; } ex = realExceptionFromException(ex); __sync_fetch_and_add(&ex->referenceCount, 1); return ex + 1; } extern "C" void __cxa_increment_exception_refcount(void* thrown_exception) { if (NULL == thrown_exception) { return; } - __cxa_exception *ex = ((__cxa_exception*)thrown_exception) - 1; + __cxa_exception *ex = static_cast<__cxa_exception*>(thrown_exception) - 1; if (isDependentException(ex->unwindHeader.exception_class)) { return; } __sync_fetch_and_add(&ex->referenceCount, 1); } extern "C" void __cxa_decrement_exception_refcount(void* thrown_exception) { if (NULL == thrown_exception) { return; } - __cxa_exception *ex = ((__cxa_exception*)thrown_exception) - 1; + __cxa_exception *ex = static_cast<__cxa_exception*>(thrown_exception) - 1; releaseException(ex); } /** * ABI function. Rethrows the current exception. Does not remove the * exception from the stack or decrement its handler count - the compiler is * expected to set the landing pad for this function to the end of the catch * block, and then call _Unwind_Resume() to continue unwinding once * __cxa_end_catch() has been called and any cleanup code has been run. */ extern "C" void __cxa_rethrow() { __cxa_thread_info *ti = thread_info(); __cxa_eh_globals *globals = &ti->globals; // Note: We don't remove this from the caught list here, because // __cxa_end_catch will be called when we unwind out of the try block. We // could probably make this faster by providing an alternative rethrow // function and ensuring that all cleanup code is run before calling it, so // we can skip the top stack frame when unwinding. __cxa_exception *ex = globals->caughtExceptions; if (0 == ex) { fprintf(stderr, "Attempting to rethrow an exception that doesn't exist!\n"); std::terminate(); } if (ti->foreign_exception_state != __cxa_thread_info::none) { ti->foreign_exception_state = __cxa_thread_info::rethrown; - _Unwind_Exception *e = (_Unwind_Exception*)ex; + _Unwind_Exception *e = reinterpret_cast<_Unwind_Exception*>(ex); _Unwind_Reason_Code err = _Unwind_Resume_or_Rethrow(e); report_failure(err, ex); return; } assert(ex->handlerCount > 0 && "Rethrowing uncaught exception!"); // ex->handlerCount will be decremented in __cxa_end_catch in enclosing // catch block // Make handler count negative. This will tell __cxa_end_catch that // exception was rethrown and exception object should not be destroyed // when handler count become zero ex->handlerCount = -ex->handlerCount; // Continue unwinding the stack with this exception. This should unwind to // the place in the caller where __cxa_end_catch() is called. The caller // will then run cleanup code and bounce the exception back with // _Unwind_Resume(). _Unwind_Reason_Code err = _Unwind_Resume_or_Rethrow(&ex->unwindHeader); report_failure(err, ex); } /** * Returns the type_info object corresponding to the filter. */ static std::type_info *get_type_info_entry(_Unwind_Context *context, dwarf_eh_lsda *lsda, int filter) { // Get the address of the record in the table. dw_eh_ptr_t record = lsda->type_table - dwarf_size_of_fixed_size_field(lsda->type_table_encoding)*filter; //record -= 4; dw_eh_ptr_t start = record; // Read the value, but it's probably an indirect reference... int64_t offset = read_value(lsda->type_table_encoding, &record); // (If the entry is 0, don't try to dereference it. That would be bad.) if (offset == 0) { return 0; } // ...so we need to resolve it - return (std::type_info*)resolve_indirect_value(context, - lsda->type_table_encoding, offset, start); + return reinterpret_cast(resolve_indirect_value(context, + lsda->type_table_encoding, offset, start)); } /** * Checks the type signature found in a handler against the type of the thrown * object. If ex is 0 then it is assumed to be a foreign exception and only * matches cleanups. */ static bool check_type_signature(__cxa_exception *ex, const std::type_info *type, void *&adjustedPtr) { - void *exception_ptr = (void*)(ex+1); + void *exception_ptr = static_cast(ex+1); const std::type_info *ex_type = ex ? ex->exceptionType : 0; bool is_ptr = ex ? ex_type->__is_pointer_p() : false; if (is_ptr) { - exception_ptr = *(void**)exception_ptr; + exception_ptr = *static_cast(exception_ptr); } // Always match a catchall, even with a foreign exception // // Note: A 0 here is a catchall, not a cleanup, so we return true to // indicate that we found a catch. if (0 == type) { if (ex) { adjustedPtr = exception_ptr; } return true; } if (0 == ex) { return false; } // If the types are the same, no casting is needed. if (*type == *ex_type) { adjustedPtr = exception_ptr; return true; } if (type->__do_catch(ex_type, &exception_ptr, 1)) { adjustedPtr = exception_ptr; return true; } return false; } /** * Checks whether the exception matches the type specifiers in this action * record. If the exception only matches cleanups, then this returns false. * If it matches a catch (including a catchall) then it returns true. * * The selector argument is used to return the selector that is passed in the * second exception register when installing the context. */ static handler_type check_action_record(_Unwind_Context *context, dwarf_eh_lsda *lsda, dw_eh_ptr_t action_record, __cxa_exception *ex, unsigned long *selector, void *&adjustedPtr) { if (!action_record) { return handler_cleanup; } handler_type found = handler_none; while (action_record) { int filter = read_sleb128(&action_record); dw_eh_ptr_t action_record_offset_base = action_record; int displacement = read_sleb128(&action_record); action_record = displacement ? action_record_offset_base + displacement : 0; // We only check handler types for C++ exceptions - foreign exceptions // are only allowed for cleanups and catchalls. if (filter > 0) { std::type_info *handler_type = get_type_info_entry(context, lsda, filter); if (check_type_signature(ex, handler_type, adjustedPtr)) { *selector = filter; return handler_catch; } } else if (filter < 0 && 0 != ex) { bool matched = false; *selector = filter; -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) filter++; std::type_info *handler_type = get_type_info_entry(context, lsda, filter--); while (handler_type) { if (check_type_signature(ex, handler_type, adjustedPtr)) { matched = true; break; } handler_type = get_type_info_entry(context, lsda, filter--); } #else - unsigned char *type_index = ((unsigned char*)lsda->type_table - filter - 1); + unsigned char *type_index = reinterpret_cast(lsda->type_table) - filter - 1; while (*type_index) { std::type_info *handler_type = get_type_info_entry(context, lsda, *(type_index++)); // If the exception spec matches a permitted throw type for // this function, don't report a handler - we are allowed to // propagate this exception out. if (check_type_signature(ex, handler_type, adjustedPtr)) { matched = true; break; } } #endif if (matched) { continue; } // If we don't find an allowed exception spec, we need to install // the context for this action. The landing pad will then call the // unexpected exception function. Treat this as a catch return handler_catch; } else if (filter == 0) { *selector = filter; found = handler_cleanup; } } return found; } static void pushCleanupException(_Unwind_Exception *exceptionObject, __cxa_exception *ex) { -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) __cxa_thread_info *info = thread_info_fast(); if (ex) { ex->cleanupCount++; if (ex->cleanupCount > 1) { assert(exceptionObject == info->currentCleanup); return; } ex->nextCleanup = info->currentCleanup; } info->currentCleanup = exceptionObject; #endif } /** * The exception personality function. This is referenced in the unwinding * DWARF metadata and is called by the unwind library for each C++ stack frame * containing catch or cleanup code. */ extern "C" BEGIN_PERSONALITY_FUNCTION(__gxx_personality_v0) // This personality function is for version 1 of the ABI. If you use it // with a future version of the ABI, it won't know what to do, so it // reports a fatal error and give up before it breaks anything. if (1 != version) { return _URC_FATAL_PHASE1_ERROR; } __cxa_exception *ex = 0; __cxa_exception *realEx = 0; // If this exception is throw by something else then we can't make any // assumptions about its layout beyond the fields declared in // _Unwind_Exception. bool foreignException = !isCXXException(exceptionClass); // If this isn't a foreign exception, then we have a C++ exception structure if (!foreignException) { ex = exceptionFromPointer(exceptionObject); realEx = realExceptionFromException(ex); } +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) unsigned char *lsda_addr = - (unsigned char*)_Unwind_GetLanguageSpecificData(context); + static_cast(_Unwind_GetLanguageSpecificData(context)); +#else + unsigned char *lsda_addr = + reinterpret_cast(static_cast(_Unwind_GetLanguageSpecificData(context))); +#endif // No LSDA implies no landing pads - try the next frame if (0 == lsda_addr) { return continueUnwinding(exceptionObject, context); } // These two variables define how the exception will be handled. dwarf_eh_action action = {0}; unsigned long selector = 0; // During the search phase, we do a complete lookup. If we return // _URC_HANDLER_FOUND, then the phase 2 unwind will call this function with // a _UA_HANDLER_FRAME action, telling us to install the handler frame. If // we return _URC_CONTINUE_UNWIND, we may be called again later with a // _UA_CLEANUP_PHASE action for this frame. // // The point of the two-stage unwind allows us to entirely avoid any stack // unwinding if there is no handler. If there are just cleanups found, // then we can just panic call an abort function. // // Matching a handler is much more expensive than matching a cleanup, // because we don't need to bother doing type comparisons (or looking at // the type table at all) for a cleanup. This means that there is no need // to cache the result of finding a cleanup, because it's (quite) quick to // look it up again from the action table. if (actions & _UA_SEARCH_PHASE) { struct dwarf_eh_lsda lsda = parse_lsda(context, lsda_addr); if (!dwarf_eh_find_callsite(context, &lsda, &action)) { // EH range not found. This happens if exception is thrown and not // caught inside a cleanup (destructor). We should call // terminate() in this case. The catchTemp (landing pad) field of // exception object will contain null when personality function is // called with _UA_HANDLER_FRAME action for phase 2 unwinding. return _URC_HANDLER_FOUND; } handler_type found_handler = check_action_record(context, &lsda, action.action_record, realEx, &selector, ex->adjustedPtr); // If there's no action record, we've only found a cleanup, so keep // searching for something real if (found_handler == handler_catch) { // Cache the results for the phase 2 unwind, if we found a handler // and this is not a foreign exception. if (ex) { saveLandingPad(context, exceptionObject, ex, selector, action.landing_pad); - ex->languageSpecificData = (const char*)lsda_addr; - ex->actionRecord = (const char*)action.action_record; + ex->languageSpecificData = reinterpret_cast(lsda_addr); + ex->actionRecord = reinterpret_cast(action.action_record); // ex->adjustedPtr is set when finding the action record. } return _URC_HANDLER_FOUND; } return continueUnwinding(exceptionObject, context); } // If this is a foreign exception, we didn't have anywhere to cache the // lookup stuff, so we need to do it again. If this is either a forced // unwind, a foreign exception, or a cleanup, then we just install the // context for a cleanup. if (!(actions & _UA_HANDLER_FRAME)) { // cleanup struct dwarf_eh_lsda lsda = parse_lsda(context, lsda_addr); dwarf_eh_find_callsite(context, &lsda, &action); if (0 == action.landing_pad) { return continueUnwinding(exceptionObject, context); } handler_type found_handler = check_action_record(context, &lsda, action.action_record, realEx, &selector, ex->adjustedPtr); // Ignore handlers this time. if (found_handler != handler_cleanup) { return continueUnwinding(exceptionObject, context); } pushCleanupException(exceptionObject, ex); } else if (foreignException) { struct dwarf_eh_lsda lsda = parse_lsda(context, lsda_addr); dwarf_eh_find_callsite(context, &lsda, &action); check_action_record(context, &lsda, action.action_record, realEx, &selector, ex->adjustedPtr); } else if (ex->catchTemp == 0) { // Uncaught exception in cleanup, calling terminate std::terminate(); } else { // Restore the saved info if we saved some last time. loadLandingPad(context, exceptionObject, ex, &selector, &action.landing_pad); ex->catchTemp = 0; ex->handlerSwitchValue = 0; } - _Unwind_SetIP(context, (unsigned long)action.landing_pad); + _Unwind_SetIP(context, reinterpret_cast(action.landing_pad)); _Unwind_SetGR(context, __builtin_eh_return_data_regno(0), - (unsigned long)exceptionObject); + reinterpret_cast(exceptionObject)); _Unwind_SetGR(context, __builtin_eh_return_data_regno(1), selector); return _URC_INSTALL_CONTEXT; } /** * ABI function called when entering a catch statement. The argument is the * pointer passed out of the personality function. This is always the start of * the _Unwind_Exception object. The return value for this function is the * pointer to the caught exception, which is either the adjusted pointer (for * C++ exceptions) of the unadjusted pointer (for foreign exceptions). */ #if __GNUC__ > 3 && __GNUC_MINOR__ > 2 extern "C" void *__cxa_begin_catch(void *e) throw() #else extern "C" void *__cxa_begin_catch(void *e) #endif { // We can't call the fast version here, because if the first exception that // we see is a foreign exception then we won't have called it yet. __cxa_thread_info *ti = thread_info(); __cxa_eh_globals *globals = &ti->globals; globals->uncaughtExceptions--; - _Unwind_Exception *exceptionObject = (_Unwind_Exception*)e; + _Unwind_Exception *exceptionObject = static_cast<_Unwind_Exception*>(e); if (isCXXException(exceptionObject->exception_class)) { __cxa_exception *ex = exceptionFromPointer(exceptionObject); if (ex->handlerCount == 0) { // Add this to the front of the list of exceptions being handled // and increment its handler count so that it won't be deleted // prematurely. ex->nextException = globals->caughtExceptions; globals->caughtExceptions = ex; } if (ex->handlerCount < 0) { // Rethrown exception is catched before end of catch block. // Clear the rethrow flag (make value positive) - we are allowed // to delete this exception at the end of the catch block, as long // as it isn't thrown again later. // Code pattern: // // try { // throw x; // } // catch() { // try { // throw; // } // catch() { // __cxa_begin_catch() <- we are here // } // } ex->handlerCount = -ex->handlerCount + 1; } else { ex->handlerCount++; } ti->foreign_exception_state = __cxa_thread_info::none; return ex->adjustedPtr; } else { // If this is a foreign exception, then we need to be able to // store it. We can't chain foreign exceptions, so we give up // if there are already some outstanding ones. if (globals->caughtExceptions != 0) { std::terminate(); } - globals->caughtExceptions = (__cxa_exception*)exceptionObject; + globals->caughtExceptions = reinterpret_cast<__cxa_exception*>(exceptionObject); ti->foreign_exception_state = __cxa_thread_info::caught; } // exceptionObject is the pointer to the _Unwind_Exception within the // __cxa_exception. The throw object is after this - return ((char*)exceptionObject + sizeof(_Unwind_Exception)); + return (reinterpret_cast(exceptionObject) + sizeof(_Unwind_Exception)); } /** * ABI function called when exiting a catch block. This will free the current * exception if it is no longer referenced in other catch blocks. */ extern "C" void __cxa_end_catch() { // We can call the fast version here because the slow version is called in // __cxa_throw(), which must have been called before we end a catch block __cxa_thread_info *ti = thread_info_fast(); __cxa_eh_globals *globals = &ti->globals; __cxa_exception *ex = globals->caughtExceptions; assert(0 != ex && "Ending catch when no exception is on the stack!"); if (ti->foreign_exception_state != __cxa_thread_info::none) { globals->caughtExceptions = 0; if (ti->foreign_exception_state != __cxa_thread_info::rethrown) { - _Unwind_Exception *e = (_Unwind_Exception*)ti->globals.caughtExceptions; + _Unwind_Exception *e = reinterpret_cast<_Unwind_Exception*>(ti->globals.caughtExceptions); e->exception_cleanup(_URC_FOREIGN_EXCEPTION_CAUGHT, e); } ti->foreign_exception_state = __cxa_thread_info::none; return; } bool deleteException = true; if (ex->handlerCount < 0) { // exception was rethrown. Exception should not be deleted even if // handlerCount become zero. // Code pattern: // try { // throw x; // } // catch() { // { // throw; // } // cleanup { // __cxa_end_catch(); <- we are here // } // } // ex->handlerCount++; deleteException = false; } else { ex->handlerCount--; } if (ex->handlerCount == 0) { globals->caughtExceptions = ex->nextException; if (deleteException) { releaseException(ex); } } } /** * ABI function. Returns the type of the current exception. */ extern "C" std::type_info *__cxa_current_exception_type() { __cxa_eh_globals *globals = __cxa_get_globals(); __cxa_exception *ex = globals->caughtExceptions; return ex ? ex->exceptionType : 0; } /** * ABI function, called when an exception specification is violated. * * This function does not return. */ extern "C" void __cxa_call_unexpected(void*exception) { - _Unwind_Exception *exceptionObject = (_Unwind_Exception*)exception; + _Unwind_Exception *exceptionObject = static_cast<_Unwind_Exception*>(exception); if (exceptionObject->exception_class == exception_class) { __cxa_exception *ex = exceptionFromPointer(exceptionObject); if (ex->unexpectedHandler) { ex->unexpectedHandler(); // Should not be reached. abort(); } } std::unexpected(); // Should not be reached. abort(); } /** * ABI function, returns the adjusted pointer to the exception object. */ extern "C" void *__cxa_get_exception_ptr(void *exceptionObject) { return exceptionFromPointer(exceptionObject)->adjustedPtr; } /** * As an extension, we provide the ability for the unexpected and terminate * handlers to be thread-local. We default to the standards-compliant * behaviour where they are global. */ static bool thread_local_handlers = false; namespace pathscale { /** * Sets whether unexpected and terminate handlers should be thread-local. */ void set_use_thread_local_handlers(bool flag) throw() { thread_local_handlers = flag; } /** * Sets a thread-local unexpected handler. */ unexpected_handler set_unexpected(unexpected_handler f) throw() { static __cxa_thread_info *info = thread_info(); unexpected_handler old = info->unexpectedHandler; info->unexpectedHandler = f; return old; } /** * Sets a thread-local terminate handler. */ terminate_handler set_terminate(terminate_handler f) throw() { static __cxa_thread_info *info = thread_info(); terminate_handler old = info->terminateHandler; info->terminateHandler = f; return old; } } namespace std { /** * Sets the function that will be called when an exception specification is * violated. */ unexpected_handler set_unexpected(unexpected_handler f) throw() { if (thread_local_handlers) { return pathscale::set_unexpected(f); } return ATOMIC_SWAP(&unexpectedHandler, f); } /** * Sets the function that is called to terminate the program. */ terminate_handler set_terminate(terminate_handler f) throw() { if (thread_local_handlers) { return pathscale::set_terminate(f); } return ATOMIC_SWAP(&terminateHandler, f); } /** * Terminates the program, calling a custom terminate implementation if * required. */ void terminate() { static __cxa_thread_info *info = thread_info(); if (0 != info && 0 != info->terminateHandler) { info->terminateHandler(); // Should not be reached - a terminate handler is not expected to // return. abort(); } terminateHandler(); } /** * Called when an unexpected exception is encountered (i.e. an exception * violates an exception specification). This calls abort() unless a * custom handler has been set.. */ void unexpected() { static __cxa_thread_info *info = thread_info(); if (0 != info && 0 != info->unexpectedHandler) { info->unexpectedHandler(); // Should not be reached - a terminate handler is not expected to // return. abort(); } unexpectedHandler(); } /** * Returns whether there are any exceptions currently being thrown that * have not been caught. This can occur inside a nested catch statement. */ bool uncaught_exception() throw() { __cxa_thread_info *info = thread_info(); return info->globals.uncaughtExceptions != 0; } /** * Returns the current unexpected handler. */ unexpected_handler get_unexpected() throw() { __cxa_thread_info *info = thread_info(); if (info->unexpectedHandler) { return info->unexpectedHandler; } return ATOMIC_LOAD(&unexpectedHandler); } /** * Returns the current terminate handler. */ terminate_handler get_terminate() throw() { __cxa_thread_info *info = thread_info(); if (info->terminateHandler) { return info->terminateHandler; } return ATOMIC_LOAD(&terminateHandler); } } -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) extern "C" _Unwind_Exception *__cxa_get_cleanup(void) { __cxa_thread_info *info = thread_info_fast(); _Unwind_Exception *exceptionObject = info->currentCleanup; if (isCXXException(exceptionObject->exception_class)) { __cxa_exception *ex = exceptionFromPointer(exceptionObject); ex->cleanupCount--; if (ex->cleanupCount == 0) { info->currentCleanup = ex->nextCleanup; ex->nextCleanup = 0; } } else { info->currentCleanup = 0; } return exceptionObject; } asm ( ".pushsection .text.__cxa_end_cleanup \n" ".global __cxa_end_cleanup \n" ".type __cxa_end_cleanup, \"function\" \n" "__cxa_end_cleanup: \n" " push {r1, r2, r3, r4} \n" " bl __cxa_get_cleanup \n" " push {r1, r2, r3, r4} \n" " b _Unwind_Resume \n" " bl abort \n" ".popsection \n" ); #endif Index: head/contrib/libcxxrt/guard.cc =================================================================== --- head/contrib/libcxxrt/guard.cc (revision 276416) +++ head/contrib/libcxxrt/guard.cc (revision 276417) @@ -1,152 +1,193 @@ /* * Copyright 2010-2012 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * guard.cc: Functions for thread-safe static initialisation. * * Static values in C++ can be initialised lazily their first use. This file * contains functions that are used to ensure that two threads attempting to * initialize the same static do not call the constructor twice. This is * important because constructors can have side effects, so calling the * constructor twice may be very bad. * * Statics that require initialisation are protected by a 64-bit value. Any * platform that can do 32-bit atomic test and set operations can use this * value as a low-overhead lock. Because statics (in most sane code) are * accessed far more times than they are initialised, this lock implementation * is heavily optimised towards the case where the static has already been * initialised. */ #include #include #include #include #include #include "atomic.h" // Older GCC doesn't define __LITTLE_ENDIAN__ #ifndef __LITTLE_ENDIAN__ // If __BYTE_ORDER__ is defined, use that instead # ifdef __BYTE_ORDER__ # if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define __LITTLE_ENDIAN__ # endif // x86 and ARM are the most common little-endian CPUs, so let's have a // special case for them (ARM is already special cased). Assume everything // else is big endian. # elif defined(__x86_64) || defined(__i386) # define __LITTLE_ENDIAN__ # endif #endif /* * The least significant bit of the guard variable indicates that the object * has been initialised, the most significant bit is used for a spinlock. */ #ifdef __arm__ // ARM ABI - 32-bit guards. typedef uint32_t guard_t; -static const uint32_t LOCKED = ((guard_t)1) << 31; +typedef uint32_t guard_lock_t; +static const uint32_t LOCKED = static_cast(1) << 31; static const uint32_t INITIALISED = 1; -#else +#define LOCK_PART(guard) (guard) +#define INIT_PART(guard) (guard) +#elif defined(_LP64) typedef uint64_t guard_t; +typedef uint64_t guard_lock_t; # if defined(__LITTLE_ENDIAN__) -static const guard_t LOCKED = ((guard_t)1) << 63; +static const guard_t LOCKED = static_cast(1) << 63; static const guard_t INITIALISED = 1; # else static const guard_t LOCKED = 1; -static const guard_t INITIALISED = ((guard_t)1) << 56; +static const guard_t INITIALISED = static_cast(1) << 56; # endif +#define LOCK_PART(guard) (guard) +#define INIT_PART(guard) (guard) +#else +typedef uint32_t guard_lock_t; +# if defined(__LITTLE_ENDIAN__) +typedef struct { + uint32_t init_half; + uint32_t lock_half; +} guard_t; +static const uint32_t LOCKED = static_cast(1) << 31; +static const uint32_t INITIALISED = 1; +# else +typedef struct { + uint32_t init_half; + uint32_t lock_half; +} guard_t; +_Static_assert(sizeof(guard_t) == sizeof(uint64_t), ""); +static const uint32_t LOCKED = 1; +static const uint32_t INITIALISED = static_cast(1) << 24; +# endif +#define LOCK_PART(guard) (&(guard)->lock_half) +#define INIT_PART(guard) (&(guard)->init_half) #endif +static const guard_lock_t INITIAL = 0; /** * Acquires a lock on a guard, returning 0 if the object has already been * initialised, and 1 if it has not. If the object is already constructed then * this function just needs to read a byte from memory and return. */ extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object) { + guard_lock_t old; // Not an atomic read, doesn't establish a happens-before relationship, but // if one is already established and we end up seeing an initialised state // then it's a fast path, otherwise we'll do something more expensive than // this test anyway... - if ((INITIALISED == *guard_object)) { return 0; } + if (INITIALISED == *INIT_PART(guard_object)) + return 0; // Spin trying to do the initialisation - while (1) + for (;;) { // Loop trying to move the value of the guard from 0 (not // locked, not initialised) to the locked-uninitialised // position. - switch (__sync_val_compare_and_swap(guard_object, 0, LOCKED)) - { - // If the old value was 0, we succeeded, so continue - // initialising - case 0: + old = __sync_val_compare_and_swap(LOCK_PART(guard_object), + INITIAL, LOCKED); + if (old == INITIAL) { + // Lock obtained. If lock and init bit are + // in separate words, check for init race. + if (INIT_PART(guard_object) == LOCK_PART(guard_object)) return 1; - // If this was already initialised, return and let the caller skip - // initialising it again. - case INITIALISED: - return 0; - // If it is locked by another thread, relinquish the CPU and try - // again later. - case LOCKED: - case LOCKED | INITIALISED: - sched_yield(); - break; - // If it is some other value, then something has gone badly wrong. - // Give up. - default: - fprintf(stderr, "Invalid state detected attempting to lock static initialiser.\n"); - abort(); + if (INITIALISED != *INIT_PART(guard_object)) + return 1; + + // No need for a memory barrier here, + // see first comment. + *LOCK_PART(guard_object) = INITIAL; + return 0; } + // If lock and init bit are in the same word, check again + // if we are done. + if (INIT_PART(guard_object) == LOCK_PART(guard_object) && + old == INITIALISED) + return 0; + + assert(old == LOCKED); + // Another thread holds the lock. + // If lock and init bit are in different words, check + // if we are done before yielding and looping. + if (INIT_PART(guard_object) != LOCK_PART(guard_object) && + INITIALISED == *INIT_PART(guard_object)) + return 0; + sched_yield(); } - //__builtin_unreachable(); - return 0; } /** * Releases the lock without marking the object as initialised. This function * is called if initialising a static causes an exception to be thrown. */ extern "C" void __cxa_guard_abort(volatile guard_t *guard_object) { __attribute__((unused)) - bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, 0); + bool reset = __sync_bool_compare_and_swap(LOCK_PART(guard_object), + LOCKED, INITIAL); assert(reset); } /** * Releases the guard and marks the object as initialised. This function is * called after successful initialisation of a static. */ extern "C" void __cxa_guard_release(volatile guard_t *guard_object) { + guard_lock_t old; + if (INIT_PART(guard_object) == LOCK_PART(guard_object)) + old = LOCKED; + else + old = INITIAL; __attribute__((unused)) - bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, INITIALISED); + bool reset = __sync_bool_compare_and_swap(INIT_PART(guard_object), + old, INITIALISED); assert(reset); + if (INIT_PART(guard_object) != LOCK_PART(guard_object)) + *LOCK_PART(guard_object) = INITIAL; } - - Index: head/contrib/libcxxrt/stdexcept.cc =================================================================== --- head/contrib/libcxxrt/stdexcept.cc (revision 276416) +++ head/contrib/libcxxrt/stdexcept.cc (revision 276417) @@ -1,104 +1,104 @@ /* * Copyright 2010-2011 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * stdexcept.cc - provides stub implementations of the exceptions required by the runtime. */ #include "stdexcept.h" namespace std { exception::exception() throw() {} exception::~exception() {} exception::exception(const exception&) throw() {} exception& exception::operator=(const exception&) throw() { return *this; } const char* exception::what() const throw() { return "std::exception"; } bad_alloc::bad_alloc() throw() {} bad_alloc::~bad_alloc() {} bad_alloc::bad_alloc(const bad_alloc&) throw() {} bad_alloc& bad_alloc::operator=(const bad_alloc&) throw() { return *this; } const char* bad_alloc::what() const throw() { return "cxxrt::bad_alloc"; } bad_cast::bad_cast() throw() {} bad_cast::~bad_cast() {} bad_cast::bad_cast(const bad_cast&) throw() {} bad_cast& bad_cast::operator=(const bad_cast&) throw() { return *this; } const char* bad_cast::what() const throw() { return "std::bad_cast"; } bad_typeid::bad_typeid() throw() {} bad_typeid::~bad_typeid() {} bad_typeid::bad_typeid(const bad_typeid &__rhs) throw() {} bad_typeid& bad_typeid::operator=(const bad_typeid &__rhs) throw() { return *this; } const char* bad_typeid::what() const throw() { return "std::bad_typeid"; } __attribute__((weak)) bad_array_new_length::bad_array_new_length() throw() {} __attribute__((weak)) bad_array_new_length::~bad_array_new_length() {} __attribute__((weak)) bad_array_new_length::bad_array_new_length(const bad_array_new_length&) throw() {} __attribute__((weak)) bad_array_new_length& bad_array_new_length::operator=(const bad_array_new_length&) throw() { return *this; } __attribute__((weak)) -const char *bad_array_new_length::what() const throw() +const char* bad_array_new_length::what() const throw() { return "std::bad_array_new_length"; } } // namespace std Index: head/contrib/libcxxrt/stdexcept.h =================================================================== --- head/contrib/libcxxrt/stdexcept.h (revision 276416) +++ head/contrib/libcxxrt/stdexcept.h (revision 276417) @@ -1,96 +1,96 @@ /* * Copyright 2010-2011 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * stdexcept.h - provides a stub version of , which defines enough * of the exceptions for the runtime to use. */ namespace std { class exception { public: exception() throw(); exception(const exception&) throw(); exception& operator=(const exception&) throw(); virtual ~exception(); virtual const char* what() const throw(); }; /** * Bad allocation exception. Thrown by ::operator new() if it fails. */ class bad_alloc: public exception { public: bad_alloc() throw(); bad_alloc(const bad_alloc&) throw(); bad_alloc& operator=(const bad_alloc&) throw(); ~bad_alloc(); virtual const char* what() const throw(); }; /** * Bad cast exception. Thrown by the __cxa_bad_cast() helper function. */ class bad_cast: public exception { public: bad_cast() throw(); bad_cast(const bad_cast&) throw(); bad_cast& operator=(const bad_cast&) throw(); virtual ~bad_cast(); virtual const char* what() const throw(); }; /** * Bad typeidexception. Thrown by the __cxa_bad_typeid() helper function. */ class bad_typeid: public exception { public: bad_typeid() throw(); bad_typeid(const bad_typeid &__rhs) throw(); virtual ~bad_typeid(); bad_typeid& operator=(const bad_typeid &__rhs) throw(); virtual const char* what() const throw(); }; - class bad_array_new_length: public exception + class bad_array_new_length: public bad_alloc { public: bad_array_new_length() throw(); bad_array_new_length(const bad_array_new_length&) throw(); bad_array_new_length& operator=(const bad_array_new_length&) throw(); virtual ~bad_array_new_length(); virtual const char *what() const throw(); }; } // namespace std Index: head/contrib/libcxxrt/typeinfo.cc =================================================================== --- head/contrib/libcxxrt/typeinfo.cc (revision 276416) +++ head/contrib/libcxxrt/typeinfo.cc (revision 276417) @@ -1,140 +1,140 @@ /* * Copyright 2010-2012 PathScale, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "typeinfo.h" #include #include #include using std::type_info; type_info::~type_info() {} bool type_info::operator==(const type_info &other) const { -#ifdef LIBCXXRT_MERGED_TYPEINFO +#ifdef LIBCXXRT_MERGED_TYPEINFO return __type_name == other.__type_name; #else return __type_name == other.__type_name || strcmp(__type_name, other.__type_name) == 0; #endif } bool type_info::operator!=(const type_info &other) const { return !operator==(other); } bool type_info::before(const type_info &other) const { #ifdef LIBCXXRT_MERGED_TYPEINFO return __type_name < other.__type_name; #else return strcmp(__type_name, other.__type_name) < 0; #endif } const char* type_info::name() const { return __type_name; } type_info::type_info (const type_info& rhs) { __type_name = rhs.__type_name; } type_info& type_info::operator= (const type_info& rhs) { return *new type_info(rhs); } ABI_NAMESPACE::__fundamental_type_info::~__fundamental_type_info() {} ABI_NAMESPACE::__array_type_info::~__array_type_info() {} ABI_NAMESPACE::__function_type_info::~__function_type_info() {} ABI_NAMESPACE::__enum_type_info::~__enum_type_info() {} ABI_NAMESPACE::__class_type_info::~__class_type_info() {} ABI_NAMESPACE::__si_class_type_info::~__si_class_type_info() {} ABI_NAMESPACE::__vmi_class_type_info::~__vmi_class_type_info() {} ABI_NAMESPACE::__pbase_type_info::~__pbase_type_info() {} ABI_NAMESPACE::__pointer_type_info::~__pointer_type_info() {} ABI_NAMESPACE::__pointer_to_member_type_info::~__pointer_to_member_type_info() {} // From libelftc extern "C" char *__cxa_demangle_gnu3(const char *); extern "C" char* __cxa_demangle(const char* mangled_name, char* buf, size_t* n, int* status) { // TODO: We should probably just be linking against libelf-tc, rather than // copying their code. This requires them to do an actual release, // however, and for our changes to be pushed upstream. We also need to // call a different demangling function here depending on the ABI (e.g. // ARM). char *demangled = __cxa_demangle_gnu3(mangled_name); if (NULL != demangled) { size_t len = strlen(demangled); if (buf == NULL) { if (n) { *n = len; } return demangled; } if (*n < len+1) { - buf = (char*)realloc(buf, len+1); + buf = static_cast(realloc(buf, len+1)); } if (0 != buf) { memcpy(buf, demangled, len); buf[len] = 0; if (n) { *n = len; } if (status) { *status = 0; } } else { if (status) { *status = -1; } } free(demangled); } else { if (status) { *status = -2; } return NULL; } return buf; } Index: head/contrib/libcxxrt/unwind-arm.h =================================================================== --- head/contrib/libcxxrt/unwind-arm.h (revision 276416) +++ head/contrib/libcxxrt/unwind-arm.h (revision 276417) @@ -1,223 +1,223 @@ /* * Copyright 2012 David Chisnall. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * ARM-specific unwind definitions. These are taken from the ARM EHABI * specification. */ typedef enum { _URC_OK = 0, /* operation completed successfully */ _URC_FOREIGN_EXCEPTION_CAUGHT = 1, _URC_END_OF_STACK = 5, _URC_HANDLER_FOUND = 6, _URC_INSTALL_CONTEXT = 7, _URC_CONTINUE_UNWIND = 8, _URC_FAILURE = 9, /* unspecified failure of some kind */ _URC_FATAL_PHASE1_ERROR = _URC_FAILURE } _Unwind_Reason_Code; typedef uint32_t _Unwind_State; #ifdef __clang__ static const _Unwind_State _US_VIRTUAL_UNWIND_FRAME = 0; static const _Unwind_State _US_UNWIND_FRAME_STARTING = 1; static const _Unwind_State _US_UNWIND_FRAME_RESUME = 2; #else // GCC fails at knowing what a constant expression is # define _US_VIRTUAL_UNWIND_FRAME 0 # define _US_UNWIND_FRAME_STARTING 1 # define _US_UNWIND_FRAME_RESUME 2 #endif typedef struct _Unwind_Context _Unwind_Context; typedef uint32_t _Unwind_EHT_Header; struct _Unwind_Exception { uint64_t exception_class; void (*exception_cleanup)(_Unwind_Reason_Code, struct _Unwind_Exception *); /* Unwinder cache, private fields for the unwinder's use */ struct { uint32_t reserved1; uint32_t reserved2; uint32_t reserved3; uint32_t reserved4; uint32_t reserved5; /* init reserved1 to 0, then don't touch */ } unwinder_cache; /* Propagation barrier cache (valid after phase 1): */ struct { uint32_t sp; uint32_t bitpattern[5]; } barrier_cache; /* Cleanup cache (preserved over cleanup): */ struct { uint32_t bitpattern[4]; } cleanup_cache; /* Pr cache (for pr's benefit): */ struct { /** function start address */ uint32_t fnstart; /** pointer to EHT entry header word */ _Unwind_EHT_Header *ehtp; /** additional data */ uint32_t additional; uint32_t reserved1; } pr_cache; /** Force alignment of next item to 8-byte boundary */ long long int :0; }; /* Unwinding functions */ _Unwind_Reason_Code _Unwind_RaiseException(struct _Unwind_Exception *ucbp); void _Unwind_Resume(struct _Unwind_Exception *ucbp); void _Unwind_Complete(struct _Unwind_Exception *ucbp); void _Unwind_DeleteException(struct _Unwind_Exception *ucbp); void *_Unwind_GetLanguageSpecificData(struct _Unwind_Context*); typedef enum { _UVRSR_OK = 0, _UVRSR_NOT_IMPLEMENTED = 1, _UVRSR_FAILED = 2 } _Unwind_VRS_Result; typedef enum { _UVRSC_CORE = 0, _UVRSC_VFP = 1, _UVRSC_WMMXD = 3, _UVRSC_WMMXC = 4 } _Unwind_VRS_RegClass; typedef enum { _UVRSD_UINT32 = 0, _UVRSD_VFPX = 1, _UVRSD_UINT64 = 3, _UVRSD_FLOAT = 4, _UVRSD_DOUBLE = 5 } _Unwind_VRS_DataRepresentation; _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep); _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep); /* Return the base-address for data references. */ extern unsigned long _Unwind_GetDataRelBase(struct _Unwind_Context *); /* Return the base-address for text references. */ extern unsigned long _Unwind_GetTextRelBase(struct _Unwind_Context *); extern unsigned long _Unwind_GetRegionStart(struct _Unwind_Context *); typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (struct _Unwind_Context *, void *); extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *); extern _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (struct _Unwind_Exception *); /** * The next set of functions are compatibility extensions, implementing Itanium * ABI functions on top of ARM ones. */ #define _UA_SEARCH_PHASE 1 #define _UA_CLEANUP_PHASE 2 #define _UA_HANDLER_FRAME 4 #define _UA_FORCE_UNWIND 8 static inline unsigned long _Unwind_GetGR(struct _Unwind_Context *context, int reg) { unsigned long val; _Unwind_VRS_Get(context, _UVRSC_CORE, reg, _UVRSD_UINT32, &val); return val; } static inline void _Unwind_SetGR(struct _Unwind_Context *context, int reg, unsigned long val) { _Unwind_VRS_Set(context, _UVRSC_CORE, reg, _UVRSD_UINT32, &val); } static inline unsigned long _Unwind_GetIP(_Unwind_Context *context) { // Low bit store the thumb state - discard it return _Unwind_GetGR(context, 15) & ~1; } static inline void _Unwind_SetIP(_Unwind_Context *context, unsigned long val) { // The lowest bit of the instruction pointer indicates whether we're in // thumb or ARM mode. This is assumed to be fixed throughout a function, // so must be propagated when setting the program counter. unsigned long thumbState = _Unwind_GetGR(context, 15) & 1; _Unwind_SetGR(context, 15, (val | thumbState)); } /** GNU API function that unwinds the frame */ _Unwind_Reason_Code __gnu_unwind_frame(struct _Unwind_Exception*, struct _Unwind_Context*); #define DECLARE_PERSONALITY_FUNCTION(name) \ _Unwind_Reason_Code name(_Unwind_State state,\ struct _Unwind_Exception *exceptionObject,\ struct _Unwind_Context *context); #define BEGIN_PERSONALITY_FUNCTION(name) \ _Unwind_Reason_Code name(_Unwind_State state,\ struct _Unwind_Exception *exceptionObject,\ struct _Unwind_Context *context)\ {\ int version = 1;\ uint64_t exceptionClass = exceptionObject->exception_class;\ int actions;\ switch (state)\ {\ default: return _URC_FAILURE;\ case _US_VIRTUAL_UNWIND_FRAME:\ {\ actions = _UA_SEARCH_PHASE;\ break;\ }\ case _US_UNWIND_FRAME_STARTING:\ {\ actions = _UA_CLEANUP_PHASE;\ if (exceptionObject->barrier_cache.sp == _Unwind_GetGR(context, 13))\ {\ actions |= _UA_HANDLER_FRAME;\ }\ break;\ }\ case _US_UNWIND_FRAME_RESUME:\ {\ return continueUnwinding(exceptionObject, context);\ break;\ }\ }\ - _Unwind_SetGR (context, 12, (unsigned long)exceptionObject);\ + _Unwind_SetGR (context, 12, reinterpret_cast(exceptionObject));\ #define CALL_PERSONALITY_FUNCTION(name) name(state,exceptionObject,context) Index: head/contrib/libcxxrt/unwind.h =================================================================== --- head/contrib/libcxxrt/unwind.h (revision 276416) +++ head/contrib/libcxxrt/unwind.h (revision 276417) @@ -1,40 +1,40 @@ /* * Copyright 2012 David Chisnall. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef UNWIND_H_INCLUDED #define UNWIND_H_INCLUDED #ifdef __cplusplus extern "C" { #endif -#ifdef __arm__ +#if defined(__arm__) && !defined(__ARM_DWARF_EH__) #include "unwind-arm.h" #else #include "unwind-itanium.h" #endif #ifdef __cplusplus } #endif #endif Index: head/contrib/libcxxrt =================================================================== --- head/contrib/libcxxrt (revision 276416) +++ head/contrib/libcxxrt (revision 276417) Property changes on: head/contrib/libcxxrt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /vendor/libcxxrt/dist:r253149-276378