Can someone explain the safety of this magic? It terrifies me but I can’t sort of see how it might work. What happens if you use two encoding converters on the same const String & s in the same function call - unlikely but this will explode right?
template <class CharPointerType_Src, class CharPointerType_Dest>
struct StringEncodingConverter
{
static CharPointerType_Dest convert (const String& s)
{
auto& source = const_cast<String&> (s);
using DestChar = typename CharPointerType_Dest::CharType;
if (source.isEmpty())
return CharPointerType_Dest (reinterpret_cast<const DestChar*> (&emptyChar));
CharPointerType_Src text (source.getCharPointer());
auto extraBytesNeeded = CharPointerType_Dest::getBytesRequiredFor (text) + sizeof (typename CharPointerType_Dest::CharType);
auto endOffset = (text.sizeInBytes() + 3) & ~3u; // the new string must be word-aligned or many Windows
// functions will fail to read it correctly!
source.preallocateBytes (endOffset + extraBytesNeeded);
text = source.getCharPointer();
void* const newSpace = addBytesToPointer (text.getAddress(), (int) endOffset);
const CharPointerType_Dest extraSpace (static_cast<DestChar*> (newSpace));
#if JUCE_DEBUG // (This just avoids spurious warnings from valgrind about the uninitialised bytes at the end of the buffer..)
auto bytesToClear = (size_t) jmin ((int) extraBytesNeeded, 4);
zeromem (addBytesToPointer (newSpace, extraBytesNeeded - bytesToClear), bytesToClear);
#endif
CharPointerType_Dest (extraSpace).writeAll (text);
return extraSpace;
}
};