diff --git a/GECKO_REVISION b/GECKO_REVISION index 897c0de..9ce5193 100644 --- a/GECKO_REVISION +++ b/GECKO_REVISION @@ -1 +1 @@ -423673:403d573285b9 +424190:3d2edf73fb90 diff --git a/build.rs b/build.rs index 53d1136..ff0a7a1 100644 --- a/build.rs +++ b/build.rs @@ -22,6 +22,7 @@ fn make_builder(cpp: bool) -> gcc::Build { b.include("gecko/include/system_wrappers"); b.include("gecko/include/nspr"); b.include("gecko/include/nspr/private"); + b.include("gecko/include/webaudio"); b.include("gecko/include"); b.include("gecko/glue/include"); b.include("gecko/include/mozilla/"); @@ -74,7 +75,7 @@ fn compile_tests() { b.compile("geckotest"); } -fn configure_libsoundtouch(c_builder: &mut gcc::Build, cpp_builder: &mut gcc::Build) { +fn configure_libsoundtouch(_c_builder: &mut gcc::Build, cpp_builder: &mut gcc::Build) { let src_files = [ "media/libsoundtouch/src/cpu_detect_x86.cpp", "media/libsoundtouch/src/RateTransposer.cpp", @@ -276,9 +277,12 @@ fn compile_gecko_media() { configure_libsoundtouch(&mut c_builder, &mut cpp_builder); let src_cpp_files = [ + "dom/media/AudioChannelFormat.cpp", + "dom/media/AudioSegment.cpp", "dom/media/AudioStream.cpp", "dom/media/CubebUtils.cpp", "dom/media/MediaInfo.cpp", + "dom/media/webaudio/AudioBlock.cpp", "memory/fallible/fallible.cpp", "memory/mozalloc/mozalloc.cpp", "memory/mozalloc/mozalloc_abort.cpp", diff --git a/gecko/glue/include/nsTArray.h b/gecko/glue/include/nsTArray.h index 4c89050..dfc3bde 100644 --- a/gecko/glue/include/nsTArray.h +++ b/gecko/glue/include/nsTArray.h @@ -27,14 +27,11 @@ #include "nsAlgorithm.h" // #include "nscore.h" // #include "nsQuickSort.h" -// #include "nsDebug.h" +#include "nsDebug.h" #include "nsISupportsImpl.h" #ifdef GECKO_MEDIA_CRATE #include "mozilla/Unused.h" #include "mozilla/RefPtr.h" -#define NS_ABORT_OOM(x) \ - mozilla::Unused << x; \ - MOZ_CRASH() #endif // GECKO_MEDIA_CRATE // #include "nsRegionFwd.h" #include diff --git a/gecko/include/AudioChannelFormat.h b/gecko/include/AudioChannelFormat.h new file mode 100644 index 0000000..73bbe40 --- /dev/null +++ b/gecko/include/AudioChannelFormat.h @@ -0,0 +1,248 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIOCHANNELFORMAT_H_ +#define MOZILLA_AUDIOCHANNELFORMAT_H_ + +#include + +#include "nsTArrayForwardDeclare.h" +#include "AudioSampleFormat.h" +#include "nsTArray.h" + +namespace mozilla { + +/* + * This file provides utilities for upmixing and downmixing channels. + * + * The channel layouts, upmixing and downmixing are consistent with the + * Web Audio spec. + * + * Channel layouts for up to 6 channels: + * mono { M } + * stereo { L, R } + * { L, R, C } + * quad { L, R, SL, SR } + * { L, R, C, SL, SR } + * 5.1 { L, R, C, LFE, SL, SR } + * + * Only 1, 2, 4 and 6 are currently defined in Web Audio. + */ + +enum { + SURROUND_L, + SURROUND_R, + SURROUND_C, + SURROUND_LFE, + SURROUND_SL, + SURROUND_SR +}; + +const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6; + +// This is defined by some Windows SDK header. +#undef IGNORE + +const int IGNORE = CUSTOM_CHANNEL_LAYOUTS; +const float IGNORE_F = 0.0f; + +const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] = + { 0, 5, 9, 12, 14 }; + +/** + * Return a channel count whose channel layout includes all the channels from + * aChannels1 and aChannels2. + */ +uint32_t +GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2); + +/** + * DownMixMatrix represents a conversion matrix efficiently by exploiting the + * fact that each input channel contributes to at most one output channel, + * except possibly for the C input channel in layouts that have one. Also, + * every input channel is multiplied by the same coefficient for every output + * channel it contributes to. + */ +const float SQRT_ONE_HALF = 0.7071067811865476f; + +struct DownMixMatrix { + // Every input channel c is copied to output channel mInputDestination[c] + // after multiplying by mInputCoefficient[c]. + uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS]; + // If not IGNORE, then the C channel is copied to this output channel after + // multiplying by its coefficient. + uint8_t mCExtraDestination; + float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS]; +}; + +static const DownMixMatrix +gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] = +{ + // Downmixes to mono + { { 0, 0 }, IGNORE, { 0.5f, 0.5f } }, + { { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } }, + { { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } }, + { { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } }, + { { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { SQRT_ONE_HALF, SQRT_ONE_HALF, 1.0f, IGNORE_F, 0.5f, 0.5f } }, + // Downmixes to stereo + { { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } }, + { { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } }, + { { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } }, + { { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, SQRT_ONE_HALF, SQRT_ONE_HALF } }, + // Downmixes to 3-channel + { { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } }, + { { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } }, + { { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } }, + // Downmixes to quad + { { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }, + { { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, 1.0f, 1.0f } }, + // Downmixes to 5-channel + { { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } } +}; + +/** + * Given an array of input channels, downmix to aOutputChannelCount, and copy + * the results to the channel buffers in aOutputChannels. Don't call this with + * input count <= output count. + */ +template +void AudioChannelsDownMix(const nsTArray& aChannelArray, + T** aOutputChannels, + uint32_t aOutputChannelCount, + uint32_t aDuration) +{ + uint32_t inputChannelCount = aChannelArray.Length(); + const T* const* inputChannels = aChannelArray.Elements(); + NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do"); + + if (inputChannelCount > 6) { + // Just drop the unknown channels. + for (uint32_t o = 0; o < aOutputChannelCount; ++o) { + PodCopy(aOutputChannels[o], inputChannels[o], aDuration); + } + return; + } + + // Ignore unknown channels, they're just dropped. + inputChannelCount = std::min(6, inputChannelCount); + + const DownMixMatrix& m = gDownMixMatrices[ + gMixingMatrixIndexByChannels[aOutputChannelCount - 1] + + inputChannelCount - aOutputChannelCount - 1]; + + // This is slow, but general. We can define custom code for special + // cases later. + for (uint32_t s = 0; s < aDuration; ++s) { + // Reserve an extra junk channel at the end for the cases where we + // want an input channel to contribute to nothing + T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1] = {0}; + for (uint32_t c = 0; c < inputChannelCount; ++c) { + outputChannels[m.mInputDestination[c]] += + m.mInputCoefficient[c]*(static_cast(inputChannels[c]))[s]; + } + // Utilize the fact that in every layout, C is the third channel. + if (m.mCExtraDestination != IGNORE) { + outputChannels[m.mCExtraDestination] += + m.mInputCoefficient[SURROUND_C]*(static_cast(inputChannels[SURROUND_C]))[s]; + } + + for (uint32_t c = 0; c < aOutputChannelCount; ++c) { + aOutputChannels[c][s] = outputChannels[c]; + } + } +} + +/** + * UpMixMatrix represents a conversion matrix by exploiting the fact that + * each output channel comes from at most one input channel. + */ +struct UpMixMatrix { + uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS]; +}; + +static const UpMixMatrix +gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] = +{ + // Upmixes from mono + { { 0, 0 } }, + { { 0, IGNORE, IGNORE } }, + { { 0, 0, IGNORE, IGNORE } }, + { { 0, IGNORE, IGNORE, IGNORE, IGNORE } }, + { { IGNORE, IGNORE, 0, IGNORE, IGNORE, IGNORE } }, + // Upmixes from stereo + { { 0, 1, IGNORE } }, + { { 0, 1, IGNORE, IGNORE } }, + { { 0, 1, IGNORE, IGNORE, IGNORE } }, + { { 0, 1, IGNORE, IGNORE, IGNORE, IGNORE } }, + // Upmixes from 3-channel + { { 0, 1, 2, IGNORE } }, + { { 0, 1, 2, IGNORE, IGNORE } }, + { { 0, 1, 2, IGNORE, IGNORE, IGNORE } }, + // Upmixes from quad + { { 0, 1, 2, 3, IGNORE } }, + { { 0, 1, IGNORE, IGNORE, 2, 3 } }, + // Upmixes from 5-channel + { { 0, 1, 2, 3, 4, IGNORE } } +}; + + +/** + * Given an array of input channel data, and an output channel count, + * replaces the array with an array of upmixed channels. + * This shuffles the array and may set some channel buffers to aZeroChannel. + * Don't call this with input count >= output count. + * This may return *more* channels than requested. In that case, downmixing + * is required to to get to aOutputChannelCount. (This is how we handle + * odd cases like 3 -> 4 upmixing.) + * If aChannelArray.Length() was the input to one of a series of + * GetAudioChannelsSuperset calls resulting in aOutputChannelCount, + * no downmixing will be required. + */ +template +void +AudioChannelsUpMix(nsTArray* aChannelArray, + uint32_t aOutputChannelCount, + const T* aZeroChannel) +{ + uint32_t inputChannelCount = aChannelArray->Length(); + uint32_t outputChannelCount = + GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount); + NS_ASSERTION(outputChannelCount > inputChannelCount, + "No up-mix needed"); + MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels"); + MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels"); + + aChannelArray->SetLength(outputChannelCount); + + if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS && + outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) { + const UpMixMatrix& m = gUpMixMatrices[ + gMixingMatrixIndexByChannels[inputChannelCount - 1] + + outputChannelCount - inputChannelCount - 1]; + + const T* outputChannels[CUSTOM_CHANNEL_LAYOUTS]; + + for (uint32_t i = 0; i < outputChannelCount; ++i) { + uint8_t channelIndex = m.mInputDestination[i]; + if (channelIndex == IGNORE) { + outputChannels[i] = aZeroChannel; + } else { + outputChannels[i] = aChannelArray->ElementAt(channelIndex); + } + } + for (uint32_t i = 0; i < outputChannelCount; ++i) { + aChannelArray->ElementAt(i) = outputChannels[i]; + } + return; + } + + for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) { + aChannelArray->ElementAt(i) = aZeroChannel; + } +} + +} // namespace mozilla + +#endif /* MOZILLA_AUDIOCHANNELFORMAT_H_ */ diff --git a/gecko/include/AudioMixer.h b/gecko/include/AudioMixer.h new file mode 100644 index 0000000..c86aa33 --- /dev/null +++ b/gecko/include/AudioMixer.h @@ -0,0 +1,153 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIOMIXER_H_ +#define MOZILLA_AUDIOMIXER_H_ + +#include "AudioSampleFormat.h" +#include "nsTArray.h" +#include "mozilla/PodOperations.h" +#include "mozilla/LinkedList.h" +#include "AudioStream.h" + +namespace mozilla { + +struct MixerCallbackReceiver { + virtual void MixerCallback(AudioDataValue* aMixedBuffer, + AudioSampleFormat aFormat, + uint32_t aChannels, + uint32_t aFrames, + uint32_t aSampleRate) = 0; +}; +/** + * This class mixes multiple streams of audio together to output a single audio + * stream. + * + * AudioMixer::Mix is to be called repeatedly with buffers that have the same + * length, sample rate, sample format and channel count. This class works with + * interleaved and plannar buffers, but the buffer mixed must be of the same + * type during a mixing cycle. + * + * When all the tracks have been mixed, calling FinishMixing will call back with + * a buffer containing the mixed audio data. + * + * This class is not thread safe. + */ +class AudioMixer +{ +public: + AudioMixer() + : mFrames(0), + mChannels(0), + mSampleRate(0) + { } + + ~AudioMixer() + { + MixerCallback* cb; + while ((cb = mCallbacks.popFirst())) { + delete cb; + } + } + + void StartMixing() + { + mSampleRate = mChannels = mFrames = 0; + } + + /* Get the data from the mixer. This is supposed to be called when all the + * tracks have been mixed in. The caller should not hold onto the data. */ + void FinishMixing() { + MOZ_ASSERT(mChannels && mFrames && mSampleRate, "Mix not called for this cycle?"); + for (MixerCallback* cb = mCallbacks.getFirst(); + cb != nullptr; cb = cb->getNext()) { + cb->mReceiver->MixerCallback(mMixedAudio.Elements(), + AudioSampleTypeToFormat::Format, + mChannels, + mFrames, + mSampleRate); + } + PodZero(mMixedAudio.Elements(), mMixedAudio.Length()); + mSampleRate = mChannels = mFrames = 0; + } + + /* Add a buffer to the mix. */ + void Mix(AudioDataValue* aSamples, + uint32_t aChannels, + uint32_t aFrames, + uint32_t aSampleRate) { + if (!mFrames && !mChannels) { + mFrames = aFrames; + mChannels = aChannels; + mSampleRate = aSampleRate; + EnsureCapacityAndSilence(); + } + + MOZ_ASSERT(aFrames == mFrames); + MOZ_ASSERT(aChannels == mChannels); + MOZ_ASSERT(aSampleRate == mSampleRate); + + for (uint32_t i = 0; i < aFrames * aChannels; i++) { + mMixedAudio[i] += aSamples[i]; + } + } + + void AddCallback(MixerCallbackReceiver* aReceiver) { + mCallbacks.insertBack(new MixerCallback(aReceiver)); + } + + bool FindCallback(MixerCallbackReceiver* aReceiver) { + for (MixerCallback* cb = mCallbacks.getFirst(); + cb != nullptr; cb = cb->getNext()) { + if (cb->mReceiver == aReceiver) { + return true; + } + } + return false; + } + + bool RemoveCallback(MixerCallbackReceiver* aReceiver) { + for (MixerCallback* cb = mCallbacks.getFirst(); + cb != nullptr; cb = cb->getNext()) { + if (cb->mReceiver == aReceiver) { + cb->remove(); + delete cb; + return true; + } + } + return false; + } +private: + void EnsureCapacityAndSilence() { + if (mFrames * mChannels > mMixedAudio.Length()) { + mMixedAudio.SetLength(mFrames* mChannels); + } + PodZero(mMixedAudio.Elements(), mMixedAudio.Length()); + } + + class MixerCallback : public LinkedListElement + { + public: + explicit MixerCallback(MixerCallbackReceiver* aReceiver) + : mReceiver(aReceiver) + { } + MixerCallbackReceiver* mReceiver; + }; + + /* Function that is called when the mixing is done. */ + LinkedList mCallbacks; + /* Number of frames for this mixing block. */ + uint32_t mFrames; + /* Number of channels for this mixing block. */ + uint32_t mChannels; + /* Sample rate the of the mixed data. */ + uint32_t mSampleRate; + /* Buffer containing the mixed audio data. */ + nsTArray mMixedAudio; +}; + +} // namespace mozilla + +#endif // MOZILLA_AUDIOMIXER_H_ diff --git a/gecko/include/AudioSegment.h b/gecko/include/AudioSegment.h new file mode 100644 index 0000000..c78f568 --- /dev/null +++ b/gecko/include/AudioSegment.h @@ -0,0 +1,448 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIOSEGMENT_H_ +#define MOZILLA_AUDIOSEGMENT_H_ + +#include "MediaSegment.h" +#include "AudioSampleFormat.h" +#include "AudioChannelFormat.h" +#include "SharedBuffer.h" +#include "WebAudioUtils.h" +#ifdef MOZILLA_INTERNAL_API +#include "mozilla/TimeStamp.h" +#endif +#include + +namespace mozilla { + struct AudioChunk; +} +DECLARE_USE_COPY_CONSTRUCTORS(mozilla::AudioChunk) + +namespace mozilla { + +template +class SharedChannelArrayBuffer : public ThreadSharedObject { +public: + explicit SharedChannelArrayBuffer(nsTArray >* aBuffers) + { + mBuffers.SwapElements(*aBuffers); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = 0; + amount += mBuffers.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mBuffers.Length(); i++) { + amount += mBuffers[i].ShallowSizeOfExcludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + nsTArray > mBuffers; +}; + +class AudioMixer; + +/** + * For auto-arrays etc, guess this as the common number of channels. + */ +const int GUESS_AUDIO_CHANNELS = 2; + +// We ensure that the graph advances in steps that are multiples of the Web +// Audio block size +const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7; +const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS; + +template +static void +InterleaveAndConvertBuffer(const SrcT* const* aSourceChannels, + uint32_t aLength, float aVolume, + uint32_t aChannels, + DestT* aOutput) +{ + DestT* output = aOutput; + for (size_t i = 0; i < aLength; ++i) { + for (size_t channel = 0; channel < aChannels; ++channel) { + float v = AudioSampleToFloat(aSourceChannels[channel][i])*aVolume; + *output = FloatToAudioSample(v); + ++output; + } + } +} + +template +static void +DeinterleaveAndConvertBuffer(const SrcT* aSourceBuffer, + uint32_t aFrames, uint32_t aChannels, + DestT** aOutput) +{ + for (size_t i = 0; i < aChannels; i++) { + size_t interleavedIndex = i; + for (size_t j = 0; j < aFrames; j++) { + ConvertAudioSample(aSourceBuffer[interleavedIndex], + aOutput[i][j]); + interleavedIndex += aChannels; + } + } +} + +class SilentChannel +{ +public: + static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */ + static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES]; + // We take advantage of the fact that zero in float and zero in int have the + // same all-zeros bit layout. + template + static const T* ZeroChannel(); +}; + + +/** + * Given an array of input channels (aChannelData), downmix to aOutputChannels, + * interleave the channel data. A total of aOutputChannels*aDuration + * interleaved samples will be copied to a channel buffer in aOutput. + */ +template +void +DownmixAndInterleave(const nsTArray& aChannelData, + int32_t aDuration, float aVolume, uint32_t aOutputChannels, + DestT* aOutput) +{ + + if (aChannelData.Length() == aOutputChannels) { + InterleaveAndConvertBuffer(aChannelData.Elements(), + aDuration, aVolume, aOutputChannels, aOutput); + } else { + AutoTArray outputChannelData; + AutoTArray outputBuffers; + outputChannelData.SetLength(aOutputChannels); + outputBuffers.SetLength(aDuration * aOutputChannels); + for (uint32_t i = 0; i < aOutputChannels; i++) { + outputChannelData[i] = outputBuffers.Elements() + aDuration * i; + } + AudioChannelsDownMix(aChannelData, + outputChannelData.Elements(), + aOutputChannels, + aDuration); + InterleaveAndConvertBuffer(outputChannelData.Elements(), + aDuration, aVolume, aOutputChannels, aOutput); + } +} + +/** + * An AudioChunk represents a multi-channel buffer of audio samples. + * It references an underlying ThreadSharedObject which manages the lifetime + * of the buffer. An AudioChunk maintains its own duration and channel data + * pointers so it can represent a subinterval of a buffer without copying. + * An AudioChunk can store its individual channels anywhere; it maintains + * separate pointers to each channel's buffer. + */ +struct AudioChunk { + typedef mozilla::AudioSampleFormat SampleFormat; + + AudioChunk() : mPrincipalHandle(PRINCIPAL_HANDLE_NONE) {} + + // Generic methods + void SliceTo(StreamTime aStart, StreamTime aEnd) + { + MOZ_ASSERT(aStart >= 0 && aStart < aEnd && aEnd <= mDuration, + "Slice out of bounds"); + if (mBuffer) { + MOZ_ASSERT(aStart < INT32_MAX, "Can't slice beyond 32-bit sample lengths"); + for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { + mChannelData[channel] = AddAudioSampleOffset(mChannelData[channel], + mBufferFormat, int32_t(aStart)); + } + } + mDuration = aEnd - aStart; + } + StreamTime GetDuration() const { return mDuration; } + bool CanCombineWithFollowing(const AudioChunk& aOther) const + { + if (aOther.mBuffer != mBuffer) { + return false; + } + if (mBuffer) { + NS_ASSERTION(aOther.mBufferFormat == mBufferFormat, + "Wrong metadata about buffer"); + NS_ASSERTION(aOther.mChannelData.Length() == mChannelData.Length(), + "Mismatched channel count"); + if (mDuration > INT32_MAX) { + return false; + } + for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) { + if (aOther.mChannelData[channel] != AddAudioSampleOffset(mChannelData[channel], + mBufferFormat, int32_t(mDuration))) { + return false; + } + } + } + return true; + } + bool IsNull() const { return mBuffer == nullptr; } + void SetNull(StreamTime aDuration) + { + mBuffer = nullptr; + mChannelData.Clear(); + mDuration = aDuration; + mVolume = 1.0f; + mBufferFormat = AUDIO_FORMAT_SILENCE; + mPrincipalHandle = PRINCIPAL_HANDLE_NONE; + } + + size_t ChannelCount() const { return mChannelData.Length(); } + + bool IsMuted() const { return mVolume == 0.0f; } + + size_t SizeOfExcludingThisIfUnshared(MallocSizeOf aMallocSizeOf) const + { + return SizeOfExcludingThis(aMallocSizeOf, true); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf, bool aUnshared) const + { + size_t amount = 0; + + // Possibly owned: + // - mBuffer - Can hold data that is also in the decoded audio queue. If it + // is not shared, or unshared == false it gets counted. + if (mBuffer && (!aUnshared || !mBuffer->IsShared())) { + amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + + // Memory in the array is owned by mBuffer. + amount += mChannelData.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + template + const nsTArray& ChannelData() const + { + MOZ_ASSERT(AudioSampleTypeToFormat::Format == mBufferFormat); + return *reinterpret_cast*> + (&mChannelData); + } + + /** + * ChannelFloatsForWrite() should be used only when mBuffer is owned solely + * by the calling thread. + */ + template + T* ChannelDataForWrite(size_t aChannel) + { + MOZ_ASSERT(AudioSampleTypeToFormat::Format == mBufferFormat); + MOZ_ASSERT(!mBuffer->IsShared()); + return static_cast(const_cast(mChannelData[aChannel])); + } + + PrincipalHandle GetPrincipalHandle() const { return mPrincipalHandle; } + + StreamTime mDuration; // in frames within the buffer + RefPtr mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes + // one pointer per channel; empty if and only if mBuffer is null + AutoTArray mChannelData; + float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull) + SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull) +#ifdef MOZILLA_INTERNAL_API + mozilla::TimeStamp mTimeStamp; // time at which this has been fetched from the MediaEngine +#endif + // principalHandle for the data in this chunk. + // This can be compared to an nsIPrincipal* when back on main thread. + PrincipalHandle mPrincipalHandle; +}; + +/** + * A list of audio samples consisting of a sequence of slices of SharedBuffers. + * The audio rate is determined by the track, not stored in this class. + */ +class AudioSegment : public MediaSegmentBase { +public: + typedef mozilla::AudioSampleFormat SampleFormat; + + AudioSegment() : MediaSegmentBase(AUDIO) {} + + // Resample the whole segment in place. + template + void Resample(SpeexResamplerState* aResampler, uint32_t aInRate, uint32_t aOutRate) + { + mDuration = 0; +#ifdef DEBUG + uint32_t segmentChannelCount = ChannelCount(); +#endif + + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + AutoTArray, GUESS_AUDIO_CHANNELS> output; + AutoTArray bufferPtrs; + AudioChunk& c = *ci; + // If this chunk is null, don't bother resampling, just alter its duration + if (c.IsNull()) { + c.mDuration = (c.mDuration * aOutRate) / aInRate; + mDuration += c.mDuration; + continue; + } + uint32_t channels = c.mChannelData.Length(); + MOZ_ASSERT(channels == segmentChannelCount); + output.SetLength(channels); + bufferPtrs.SetLength(channels); + uint32_t inFrames = c.mDuration; + // Round up to allocate; the last frame may not be used. + NS_ASSERTION((UINT32_MAX - aInRate + 1) / c.mDuration >= aOutRate, + "Dropping samples"); + uint32_t outSize = (c.mDuration * aOutRate + aInRate - 1) / aInRate; + for (uint32_t i = 0; i < channels; i++) { + T* out = output[i].AppendElements(outSize); + uint32_t outFrames = outSize; + + const T* in = static_cast(c.mChannelData[i]); + dom::WebAudioUtils::SpeexResamplerProcess(aResampler, i, + in, &inFrames, + out, &outFrames); + MOZ_ASSERT(inFrames == c.mDuration); + + bufferPtrs[i] = out; + output[i].SetLength(outFrames); + } + MOZ_ASSERT(channels > 0); + c.mDuration = output[0].Length(); + c.mBuffer = new mozilla::SharedChannelArrayBuffer(&output); + for (uint32_t i = 0; i < channels; i++) { + c.mChannelData[i] = bufferPtrs[i]; + } + mDuration += c.mDuration; + } + } + + void ResampleChunks(SpeexResamplerState* aResampler, + uint32_t aInRate, + uint32_t aOutRate); + + void AppendFrames(already_AddRefed aBuffer, + const nsTArray& aChannelData, + int32_t aDuration, const PrincipalHandle& aPrincipalHandle) + { + AudioChunk* chunk = AppendChunk(aDuration); + chunk->mBuffer = aBuffer; + for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) { + chunk->mChannelData.AppendElement(aChannelData[channel]); + } + chunk->mVolume = 1.0f; + chunk->mBufferFormat = AUDIO_FORMAT_FLOAT32; +#ifdef MOZILLA_INTERNAL_API + chunk->mTimeStamp = TimeStamp::Now(); +#endif + chunk->mPrincipalHandle = aPrincipalHandle; + } + void AppendFrames(already_AddRefed aBuffer, + const nsTArray& aChannelData, + int32_t aDuration, const PrincipalHandle& aPrincipalHandle) + { + AudioChunk* chunk = AppendChunk(aDuration); + chunk->mBuffer = aBuffer; + for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) { + chunk->mChannelData.AppendElement(aChannelData[channel]); + } + chunk->mVolume = 1.0f; + chunk->mBufferFormat = AUDIO_FORMAT_S16; +#ifdef MOZILLA_INTERNAL_API + chunk->mTimeStamp = TimeStamp::Now(); +#endif + chunk->mPrincipalHandle = aPrincipalHandle; + } + // Consumes aChunk, and returns a pointer to the persistent copy of aChunk + // in the segment. + AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk) + { + AudioChunk* chunk = AppendChunk(aChunk->mDuration); + chunk->mBuffer = aChunk->mBuffer.forget(); + chunk->mChannelData.SwapElements(aChunk->mChannelData); + chunk->mVolume = aChunk->mVolume; + chunk->mBufferFormat = aChunk->mBufferFormat; +#ifdef MOZILLA_INTERNAL_API + chunk->mTimeStamp = TimeStamp::Now(); +#endif + chunk->mPrincipalHandle = aChunk->mPrincipalHandle; + return chunk; + } + void ApplyVolume(float aVolume); + // Mix the segment into a mixer, interleaved. This is useful to output a + // segment to a system audio callback. It up or down mixes to aChannelCount + // channels. + void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount, + uint32_t aSampleRate); + // Mix the segment into a mixer, keeping it planar, up or down mixing to + // aChannelCount channels. + void Mix(AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate); + + int ChannelCount() { + NS_WARNING_ASSERTION( + !mChunks.IsEmpty(), + "Cannot query channel count on a AudioSegment with no chunks."); + // Find the first chunk that has non-zero channels. A chunk that hs zero + // channels is just silence and we can simply discard it. + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + if (ci->ChannelCount()) { + return ci->ChannelCount(); + } + } + return 0; + } + + bool IsNull() const { + for (ChunkIterator ci(*const_cast(this)); !ci.IsEnded(); + ci.Next()) { + if (!ci->IsNull()) { + return false; + } + } + return true; + } + + static Type StaticType() { return AUDIO; } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } +}; + +template +void WriteChunk(AudioChunk& aChunk, + uint32_t aOutputChannels, + AudioDataValue* aOutputBuffer) +{ + AutoTArray channelData; + + channelData = aChunk.ChannelData(); + + if (channelData.Length() < aOutputChannels) { + // Up-mix. Note that this might actually make channelData have more + // than aOutputChannels temporarily. + AudioChannelsUpMix(&channelData, aOutputChannels, SilentChannel::ZeroChannel()); + } + if (channelData.Length() > aOutputChannels) { + // Down-mix. + DownmixAndInterleave(channelData, aChunk.mDuration, + aChunk.mVolume, aOutputChannels, aOutputBuffer); + } else { + InterleaveAndConvertBuffer(channelData.Elements(), + aChunk.mDuration, aChunk.mVolume, + aOutputChannels, + aOutputBuffer); + } +} + + + +} // namespace mozilla + +#endif /* MOZILLA_AUDIOSEGMENT_H_ */ diff --git a/gecko/include/Latency.h b/gecko/include/Latency.h new file mode 100644 index 0000000..b86ee7b --- /dev/null +++ b/gecko/include/Latency.h @@ -0,0 +1,99 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_LATENCY_H +#define MOZILLA_LATENCY_H + +#include "mozilla/TimeStamp.h" +#include "mozilla/Logging.h" +#include "nsCOMPtr.h" +#include "nsIThread.h" +#include "mozilla/Monitor.h" +#include "nsISupportsImpl.h" +#include "nsIObserver.h" + +class AsyncLatencyLogger; + +mozilla::LogModule* GetLatencyLog(); + +// This class is a singleton. It is refcounted. +class AsyncLatencyLogger : public nsIObserver +{ + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSIOBSERVER + +public: + + enum LatencyLogIndex { + AudioMediaStreamTrack = 0, + VideoMediaStreamTrack, + Cubeb, + AudioStream, + NetEQ, + AudioCaptureBase, // base time for capturing an audio stream + AudioCapture, // records number of samples captured and the time + AudioTrackInsertion, // # of samples inserted into a mediastreamtrack and the time + MediaPipelineAudioInsertion, // Timestamp and time of timestamp + AudioTransmit, // Timestamp and socket send time + AudioReceive, // Timestamp and receive time + MediaPipelineAudioPlayout, // Timestamp and playout into MST time + MediaStreamCreate, // Source and TrackUnion streams + AudioStreamCreate, // TrackUnion stream and AudioStream + AudioSendRTP, + AudioRecvRTP, + _MAX_INDEX + }; + // Log with a null timestamp + void Log(LatencyLogIndex index, uint64_t aID, int64_t aValue); + // Log with a timestamp + void Log(LatencyLogIndex index, uint64_t aID, int64_t aValue, + mozilla::TimeStamp &aTime); + // Write a log message to NSPR + void WriteLog(LatencyLogIndex index, uint64_t aID, int64_t aValue, + mozilla::TimeStamp timestamp); + // Get the base time used by the logger for delta calculations + void GetStartTime(mozilla::TimeStamp &aStart); + + static AsyncLatencyLogger* Get(bool aStartTimer = false); + static void InitializeStatics(); + // After this is called, the global log object may go away + static void ShutdownLogger(); +private: + AsyncLatencyLogger(); + virtual ~AsyncLatencyLogger(); + int64_t GetTimeStamp(); + void Init(); + // Shut down the thread associated with this, and make sure it doesn't + // start up again. + void Shutdown(); + // The thread on which the IO happens + nsCOMPtr mThread; + // This can be initialized on multiple threads, but is protected by a + // monitor. After the initialization phase, it is accessed on the log + // thread only. + mozilla::TimeStamp mStart; + // This monitor protects mStart and mMediaLatencyLog for the + // initialization sequence. It is initialized at layout startup, and + // destroyed at layout shutdown. + mozilla::Mutex mMutex; +}; + +// need uint32_t versions for access from webrtc/trunk code +// Log without a time delta +void LogLatency(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t aValue); +void LogLatency(uint32_t index, uint64_t aID, int64_t aValue); +// Log TimeStamp::Now() (as delta) +void LogTime(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t aValue); +void LogTime(uint32_t index, uint64_t aID, int64_t aValue); +// Log the specified time (as delta) +void LogTime(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t aValue, + mozilla::TimeStamp &aTime); + +// For generating unique-ish ids for logged sources +#define LATENCY_STREAM_ID(source, trackID) \ + ((((uint64_t) (source)) & ~0x0F) | (trackID)) + +#endif diff --git a/gecko/include/MediaInfo.h b/gecko/include/MediaInfo.h index 4dfbfed..7f5dfc4 100644 --- a/gecko/include/MediaInfo.h +++ b/gecko/include/MediaInfo.h @@ -297,7 +297,7 @@ class VideoInfo : public TrackInfo return imageRect; } - Rotation ToSupportedRotation(int32_t aDegree) + Rotation ToSupportedRotation(int32_t aDegree) const { switch (aDegree) { case 90: diff --git a/gecko/include/MediaSegment.h b/gecko/include/MediaSegment.h new file mode 100644 index 0000000..3478435 --- /dev/null +++ b/gecko/include/MediaSegment.h @@ -0,0 +1,509 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_MEDIASEGMENT_H_ +#define MOZILLA_MEDIASEGMENT_H_ + +#include "nsTArray.h" +#include "nsIPrincipal.h" +#include "nsProxyRelease.h" +#ifdef MOZILLA_INTERNAL_API +#include "mozilla/TimeStamp.h" +#endif +#include +#include "Latency.h" + +namespace mozilla { + +/** + * Track or graph rate in Hz. Maximum 1 << TRACK_RATE_MAX_BITS Hz. This + * maximum avoids overflow in conversions between track rates and conversions + * from seconds. + */ +typedef int32_t TrackRate; +const int64_t TRACK_RATE_MAX_BITS = 20; +const TrackRate TRACK_RATE_MAX = 1 << TRACK_RATE_MAX_BITS; + +/** + * A number of ticks at a rate determined by some underlying track (e.g. + * audio sample rate). We want to make sure that multiplying TrackTicks by + * a TrackRate doesn't overflow, so we set its max accordingly. + * StreamTime should be used instead when we're working with MediaStreamGraph's + * rate, but TrackTicks can be used outside MediaStreams when we have data + * at a different rate. + */ +typedef int64_t TrackTicks; +const int64_t TRACK_TICKS_MAX = INT64_MAX >> TRACK_RATE_MAX_BITS; + +/** + * We represent media times in 64-bit audio frame counts or ticks. + * All tracks in a MediaStreamGraph have the same rate. + */ +typedef int64_t MediaTime; +const int64_t MEDIA_TIME_MAX = TRACK_TICKS_MAX; + +/** + * Media time relative to the start of a StreamTracks. + */ +typedef MediaTime StreamTime; +const StreamTime STREAM_TIME_MAX = MEDIA_TIME_MAX; + +/** + * Media time relative to the start of the graph timeline. + */ +typedef MediaTime GraphTime; +const GraphTime GRAPH_TIME_MAX = MEDIA_TIME_MAX; + +/** + * We pass the principal through the MediaStreamGraph by wrapping it in a thread + * safe nsMainThreadPtrHandle, since it cannot be used directly off the main + * thread. We can compare two PrincipalHandles to each other on any thread, but + * they can only be created and converted back to nsIPrincipal* on main thread. + */ +typedef nsMainThreadPtrHandle PrincipalHandle; + +inline PrincipalHandle MakePrincipalHandle(nsIPrincipal* aPrincipal) +{ + RefPtr> holder = + new nsMainThreadPtrHolder( + "MakePrincipalHandle::nsIPrincipal", aPrincipal); + return PrincipalHandle(holder); +} + +#define PRINCIPAL_HANDLE_NONE nullptr + +inline nsIPrincipal* GetPrincipalFromHandle(PrincipalHandle& aPrincipalHandle) +{ + MOZ_ASSERT(NS_IsMainThread()); + return aPrincipalHandle.get(); +} + +inline bool PrincipalHandleMatches(PrincipalHandle& aPrincipalHandle, + nsIPrincipal* aOther) +{ + if (!aOther) { + return false; + } + + nsIPrincipal* principal = GetPrincipalFromHandle(aPrincipalHandle); + if (!principal) { + return false; + } + + bool result; + if (NS_FAILED(principal->Equals(aOther, &result))) { + NS_ERROR("Principal check failed"); + return false; + } + + return result; +} + +/** + * A MediaSegment is a chunk of media data sequential in time. Different + * types of data have different subclasses of MediaSegment, all inheriting + * from MediaSegmentBase. + * All MediaSegment data is timed using StreamTime. The actual tick rate + * is defined on a per-track basis. For some track types, this can be + * a fixed constant for all tracks of that type (e.g. 1MHz for video). + * + * Each media segment defines a concept of "null media data" (e.g. silence + * for audio or "no video frame" for video), which can be efficiently + * represented. This is used for padding. + */ +class MediaSegment { +public: + virtual ~MediaSegment() + { + MOZ_COUNT_DTOR(MediaSegment); + } + + enum Type { + AUDIO, + VIDEO, + TYPE_COUNT + }; + + /** + * Gets the total duration of the segment. + */ + StreamTime GetDuration() const { return mDuration; } + Type GetType() const { return mType; } + + /** + * Gets the last principal id that was appended to this segment. + */ + PrincipalHandle GetLastPrincipalHandle() const { return mLastPrincipalHandle; } + /** + * Called by the MediaStreamGraph as it appends a chunk with a different + * principal id than the current one. + */ + void SetLastPrincipalHandle(const PrincipalHandle& aLastPrincipalHandle) + { + mLastPrincipalHandle = aLastPrincipalHandle; + } + + /** + * Create a MediaSegment of the same type. + */ + virtual MediaSegment* CreateEmptyClone() const = 0; + /** + * Moves contents of aSource to the end of this segment. + */ + virtual void AppendFrom(MediaSegment* aSource) = 0; + /** + * Append a slice of aSource to this segment. + */ + virtual void AppendSlice(const MediaSegment& aSource, + StreamTime aStart, StreamTime aEnd) = 0; + /** + * Replace all contents up to aDuration with null data. + */ + virtual void ForgetUpTo(StreamTime aDuration) = 0; + /** + * Forget all data buffered after a given point + */ + virtual void FlushAfter(StreamTime aNewEnd) = 0; + /** + * Insert aDuration of null data at the start of the segment. + */ + virtual void InsertNullDataAtStart(StreamTime aDuration) = 0; + /** + * Insert aDuration of null data at the end of the segment. + */ + virtual void AppendNullData(StreamTime aDuration) = 0; + /** + * Replace contents with disabled (silence/black) data of the same duration + */ + virtual void ReplaceWithDisabled() = 0; + /** + * Replace contents with null data of the same duration + */ + virtual void ReplaceWithNull() = 0; + /** + * Remove all contents, setting duration to 0. + */ + virtual void Clear() = 0; + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + return 0; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +protected: + explicit MediaSegment(Type aType) + : mDuration(0), mType(aType), mLastPrincipalHandle(PRINCIPAL_HANDLE_NONE) + { + MOZ_COUNT_CTOR(MediaSegment); + } + + StreamTime mDuration; // total of mDurations of all chunks + Type mType; + + // The latest principal handle that the MediaStreamGraph has processed for + // this segment. + PrincipalHandle mLastPrincipalHandle; +}; + +/** + * C is the implementation class subclassed from MediaSegmentBase. + * C must contain a Chunk class. + */ +template class MediaSegmentBase : public MediaSegment { +public: + MediaSegment* CreateEmptyClone() const override + { + return new C(); + } + void AppendFrom(MediaSegment* aSource) override + { + NS_ASSERTION(aSource->GetType() == C::StaticType(), "Wrong type"); + AppendFromInternal(static_cast(aSource)); + } + void AppendFrom(C* aSource) + { + AppendFromInternal(aSource); + } + void AppendSlice(const MediaSegment& aSource, + StreamTime aStart, StreamTime aEnd) override + { + NS_ASSERTION(aSource.GetType() == C::StaticType(), "Wrong type"); + AppendSliceInternal(static_cast(aSource), aStart, aEnd); + } + void AppendSlice(const C& aOther, StreamTime aStart, StreamTime aEnd) + { + AppendSliceInternal(aOther, aStart, aEnd); + } + /** + * Replace the first aDuration ticks with null media data, because the data + * will not be required again. + */ + void ForgetUpTo(StreamTime aDuration) override + { + if (mChunks.IsEmpty() || aDuration <= 0) { + return; + } + if (mChunks[0].IsNull()) { + StreamTime extraToForget = std::min(aDuration, mDuration) - mChunks[0].GetDuration(); + if (extraToForget > 0) { + RemoveLeading(extraToForget, 1); + mChunks[0].mDuration += extraToForget; + mDuration += extraToForget; + } + return; + } + RemoveLeading(aDuration, 0); + mChunks.InsertElementAt(0)->SetNull(aDuration); + mDuration += aDuration; + } + void FlushAfter(StreamTime aNewEnd) override + { + if (mChunks.IsEmpty()) { + return; + } + + if (mChunks[0].IsNull()) { + StreamTime extraToKeep = aNewEnd - mChunks[0].GetDuration(); + if (extraToKeep < 0) { + // reduce the size of the Null, get rid of everthing else + mChunks[0].SetNull(aNewEnd); + extraToKeep = 0; + } + RemoveTrailing(extraToKeep, 1); + } else { + if (aNewEnd > mDuration) { + NS_ASSERTION(aNewEnd <= mDuration, "can't add data in FlushAfter"); + return; + } + RemoveTrailing(aNewEnd, 0); + } + mDuration = aNewEnd; + } + void InsertNullDataAtStart(StreamTime aDuration) override + { + if (aDuration <= 0) { + return; + } + if (!mChunks.IsEmpty() && mChunks[0].IsNull()) { + mChunks[0].mDuration += aDuration; + } else { + mChunks.InsertElementAt(0)->SetNull(aDuration); + } +#ifdef MOZILLA_INTERNAL_API + mChunks[0].mTimeStamp = mozilla::TimeStamp::Now(); +#endif + mDuration += aDuration; + } + void AppendNullData(StreamTime aDuration) override + { + if (aDuration <= 0) { + return; + } + if (!mChunks.IsEmpty() && mChunks[mChunks.Length() - 1].IsNull()) { + mChunks[mChunks.Length() - 1].mDuration += aDuration; + } else { + mChunks.AppendElement()->SetNull(aDuration); + } + mDuration += aDuration; + } + void ReplaceWithDisabled() override + { + if (GetType() != AUDIO) { + MOZ_CRASH("Disabling unknown segment type"); + } + ReplaceWithNull(); + } + void ReplaceWithNull() override + { + StreamTime duration = GetDuration(); + Clear(); + AppendNullData(duration); + } + void Clear() override + { + mDuration = 0; + mChunks.Clear(); + } + + class ChunkIterator { + public: + explicit ChunkIterator(MediaSegmentBase& aSegment) + : mSegment(aSegment), mIndex(0) {} + bool IsEnded() { return mIndex >= mSegment.mChunks.Length(); } + void Next() { ++mIndex; } + Chunk& operator*() { return mSegment.mChunks[mIndex]; } + Chunk* operator->() { return &mSegment.mChunks[mIndex]; } + private: + MediaSegmentBase& mSegment; + uint32_t mIndex; + }; + class ConstChunkIterator { + public: + explicit ConstChunkIterator(const MediaSegmentBase& aSegment) + : mSegment(aSegment), mIndex(0) {} + bool IsEnded() { return mIndex >= mSegment.mChunks.Length(); } + void Next() { ++mIndex; } + const Chunk& operator*() { return mSegment.mChunks[mIndex]; } + const Chunk* operator->() { return &mSegment.mChunks[mIndex]; } + private: + const MediaSegmentBase& mSegment; + uint32_t mIndex; + }; + + Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr) + { + if (aOffset < 0) { + return nullptr; + } + StreamTime offset = 0; + for (uint32_t i = 0; i < mChunks.Length(); ++i) { + Chunk& c = mChunks[i]; + StreamTime nextOffset = offset + c.GetDuration(); + if (aOffset < nextOffset) { + if (aStart) { + *aStart = offset; + } + return &c; + } + offset = nextOffset; + } + return nullptr; + } + + void RemoveLeading(StreamTime aDuration) + { + RemoveLeading(aDuration, 0); + } + +#ifdef MOZILLA_INTERNAL_API + void GetStartTime(TimeStamp &aTime) { + aTime = mChunks[0].mTimeStamp; + } +#endif + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mChunks.Length(); i++) { + amount += mChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf); + } + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + Chunk* GetLastChunk() + { + if (mChunks.IsEmpty()) { + return nullptr; + } + return &mChunks[mChunks.Length() - 1]; + } + +protected: + explicit MediaSegmentBase(Type aType) : MediaSegment(aType) {} + + /** + * Appends the contents of aSource to this segment, clearing aSource. + */ + void AppendFromInternal(MediaSegmentBase* aSource) + { + MOZ_ASSERT(aSource->mDuration >= 0); + mDuration += aSource->mDuration; + aSource->mDuration = 0; + if (!mChunks.IsEmpty() && !aSource->mChunks.IsEmpty() && + mChunks[mChunks.Length() - 1].CanCombineWithFollowing(aSource->mChunks[0])) { + mChunks[mChunks.Length() - 1].mDuration += aSource->mChunks[0].mDuration; + aSource->mChunks.RemoveElementAt(0); + } + mChunks.AppendElements(Move(aSource->mChunks)); + } + + void AppendSliceInternal(const MediaSegmentBase& aSource, + StreamTime aStart, StreamTime aEnd) + { + MOZ_ASSERT(aStart <= aEnd, "Endpoints inverted"); + NS_ASSERTION(aStart >= 0 && aEnd <= aSource.mDuration, "Slice out of range"); + mDuration += aEnd - aStart; + StreamTime offset = 0; + for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) { + const Chunk& c = aSource.mChunks[i]; + StreamTime start = std::max(aStart, offset); + StreamTime nextOffset = offset + c.GetDuration(); + StreamTime end = std::min(aEnd, nextOffset); + if (start < end) { + mChunks.AppendElement(c)->SliceTo(start - offset, end - offset); + } + offset = nextOffset; + } + } + + Chunk* AppendChunk(StreamTime aDuration) + { + MOZ_ASSERT(aDuration >= 0); + Chunk* c = mChunks.AppendElement(); + c->mDuration = aDuration; + mDuration += aDuration; + return c; + } + + void RemoveLeading(StreamTime aDuration, uint32_t aStartIndex) + { + NS_ASSERTION(aDuration >= 0, "Can't remove negative duration"); + StreamTime t = aDuration; + uint32_t chunksToRemove = 0; + for (uint32_t i = aStartIndex; i < mChunks.Length() && t > 0; ++i) { + Chunk* c = &mChunks[i]; + if (c->GetDuration() > t) { + c->SliceTo(t, c->GetDuration()); + t = 0; + break; + } + t -= c->GetDuration(); + chunksToRemove = i + 1 - aStartIndex; + } + mChunks.RemoveElementsAt(aStartIndex, chunksToRemove); + mDuration -= aDuration - t; + } + + void RemoveTrailing(StreamTime aKeep, uint32_t aStartIndex) + { + NS_ASSERTION(aKeep >= 0, "Can't keep negative duration"); + StreamTime t = aKeep; + uint32_t i; + for (i = aStartIndex; i < mChunks.Length(); ++i) { + Chunk* c = &mChunks[i]; + if (c->GetDuration() > t) { + c->SliceTo(0, t); + break; + } + t -= c->GetDuration(); + if (t == 0) { + break; + } + } + if (i+1 < mChunks.Length()) { + mChunks.RemoveElementsAt(i+1, mChunks.Length() - (i+1)); + } + // Caller must adjust mDuration + } + + nsTArray mChunks; +#ifdef MOZILLA_INTERNAL_API + mozilla::TimeStamp mTimeStamp; +#endif +}; + +} // namespace mozilla + +#endif /* MOZILLA_MEDIASEGMENT_H_ */ diff --git a/gecko/include/TimeUnits.h b/gecko/include/TimeUnits.h index 2e324e0..595a9eb 100644 --- a/gecko/include/TimeUnits.h +++ b/gecko/include/TimeUnits.h @@ -40,9 +40,11 @@ static const int64_t NSECS_PER_S = 1000000000; // TimeUnit at present uses a CheckedInt64 as storage. // INT64_MAX has the special meaning of being +oo. -class TimeUnit final { +class TimeUnit final +{ public: - static TimeUnit FromSeconds(double aValue) { + static TimeUnit FromSeconds(double aValue) + { MOZ_ASSERT(!IsNaN(aValue)); if (mozilla::IsInfinite(aValue)) { @@ -60,27 +62,27 @@ class TimeUnit final { } } - static constexpr TimeUnit FromMicroseconds(int64_t aValue) { + static constexpr TimeUnit FromMicroseconds(int64_t aValue) + { return TimeUnit(aValue); } - static constexpr TimeUnit FromNanoseconds(int64_t aValue) { + static constexpr TimeUnit FromNanoseconds(int64_t aValue) + { return TimeUnit(aValue / 1000); } - static constexpr TimeUnit FromInfinity() { - return TimeUnit(INT64_MAX); - } + static constexpr TimeUnit FromInfinity() { return TimeUnit(INT64_MAX); } - static TimeUnit FromTimeDuration(const TimeDuration& aDuration) { + static TimeUnit FromTimeDuration(const TimeDuration& aDuration) + { return FromSeconds(aDuration.ToSeconds()); } - static constexpr TimeUnit Zero() { - return TimeUnit(0); - } + static constexpr TimeUnit Zero() { return TimeUnit(0); } - static TimeUnit Invalid() { + static TimeUnit Invalid() + { TimeUnit ret; ret.mValue = CheckedInt64(INT64_MAX); // Force an overflow to render the CheckedInt invalid. @@ -88,112 +90,110 @@ class TimeUnit final { return ret; } - int64_t ToMicroseconds() const { - return mValue.value(); - } + int64_t ToMicroseconds() const { return mValue.value(); } - int64_t ToNanoseconds() const { - return mValue.value() * 1000; - } + int64_t ToNanoseconds() const { return mValue.value() * 1000; } - double ToSeconds() const { + double ToSeconds() const + { if (IsInfinite()) { return PositiveInfinity(); } return double(mValue.value()) / USECS_PER_S; } - TimeDuration ToTimeDuration() const { + TimeDuration ToTimeDuration() const + { return TimeDuration::FromMicroseconds(mValue.value()); } - bool IsInfinite() const { - return mValue.value() == INT64_MAX; - } + bool IsInfinite() const { return mValue.value() == INT64_MAX; } - bool IsPositive() const { - return mValue.value() > 0; - } + bool IsPositive() const { return mValue.value() > 0; } - bool IsNegative() const { - return mValue.value() < 0; - } + bool IsNegative() const { return mValue.value() < 0; } - bool operator == (const TimeUnit& aOther) const { + bool operator==(const TimeUnit& aOther) const + { MOZ_ASSERT(IsValid() && aOther.IsValid()); return mValue.value() == aOther.mValue.value(); } - bool operator != (const TimeUnit& aOther) const { + bool operator!=(const TimeUnit& aOther) const + { MOZ_ASSERT(IsValid() && aOther.IsValid()); return mValue.value() != aOther.mValue.value(); } - bool operator >= (const TimeUnit& aOther) const { + bool operator>=(const TimeUnit& aOther) const + { MOZ_ASSERT(IsValid() && aOther.IsValid()); return mValue.value() >= aOther.mValue.value(); } - bool operator > (const TimeUnit& aOther) const { - return !(*this <= aOther); - } - bool operator <= (const TimeUnit& aOther) const { + bool operator>(const TimeUnit& aOther) const { return !(*this <= aOther); } + bool operator<=(const TimeUnit& aOther) const + { MOZ_ASSERT(IsValid() && aOther.IsValid()); return mValue.value() <= aOther.mValue.value(); } - bool operator < (const TimeUnit& aOther) const { - return !(*this >= aOther); - } - TimeUnit operator + (const TimeUnit& aOther) const { + bool operator<(const TimeUnit& aOther) const { return !(*this >= aOther); } + TimeUnit operator+(const TimeUnit& aOther) const + { if (IsInfinite() || aOther.IsInfinite()) { return FromInfinity(); } return TimeUnit(mValue + aOther.mValue); } - TimeUnit operator - (const TimeUnit& aOther) const { + TimeUnit operator-(const TimeUnit& aOther) const + { if (IsInfinite() && !aOther.IsInfinite()) { return FromInfinity(); } MOZ_ASSERT(!IsInfinite() && !aOther.IsInfinite()); return TimeUnit(mValue - aOther.mValue); } - TimeUnit& operator += (const TimeUnit& aOther) { + TimeUnit& operator+=(const TimeUnit& aOther) + { *this = *this + aOther; return *this; } - TimeUnit& operator -= (const TimeUnit& aOther) { + TimeUnit& operator-=(const TimeUnit& aOther) + { *this = *this - aOther; return *this; } - template - TimeUnit operator*(T aVal) const { + template + TimeUnit operator*(T aVal) const + { // See bug 853398 for the reason to block double multiplier. // If required, use MultDouble below and with caution. static_assert(mozilla::IsIntegral::value, "Must be an integral type"); return TimeUnit(mValue * aVal); } - TimeUnit MultDouble(double aVal) const { + TimeUnit MultDouble(double aVal) const + { return TimeUnit::FromSeconds(ToSeconds() * aVal); } - friend TimeUnit operator/ (const TimeUnit& aUnit, int aVal) { + friend TimeUnit operator/(const TimeUnit& aUnit, int aVal) + { return TimeUnit(aUnit.mValue / aVal); } - bool IsValid() const - { - return mValue.isValid(); - } + bool IsValid() const { return mValue.isValid(); } constexpr TimeUnit() : mValue(CheckedInt64(0)) - {} + { + } TimeUnit(const TimeUnit&) = default; - TimeUnit& operator = (const TimeUnit&) = default; + TimeUnit& operator=(const TimeUnit&) = default; private: explicit constexpr TimeUnit(CheckedInt64 aMicroseconds) : mValue(aMicroseconds) - {} + { + } // Our internal representation is in microseconds. CheckedInt64 mValue; @@ -217,16 +217,20 @@ class TimeIntervals : public IntervalSet // TimeIntervals i = ... like we would do with IntervalSet i = ... MOZ_IMPLICIT TimeIntervals(const BaseType& aOther) : BaseType(aOther) - {} + { + } MOZ_IMPLICIT TimeIntervals(BaseType&& aOther) : BaseType(Move(aOther)) - {} + { + } explicit TimeIntervals(const BaseType::ElemType& aOther) : BaseType(aOther) - {} + { + } explicit TimeIntervals(BaseType::ElemType&& aOther) : BaseType(Move(aOther)) - {} + { + } static TimeIntervals Invalid() { @@ -236,7 +240,7 @@ class TimeIntervals : public IntervalSet bool IsInvalid() const { return Length() == 1 && Start(0).ToMicroseconds() == INT64_MIN && - End(0).ToMicroseconds() == INT64_MIN; + End(0).ToMicroseconds() == INT64_MIN; } TimeIntervals() = default; diff --git a/gecko/include/js/GCAPI.h b/gecko/include/js/GCAPI.h new file mode 100644 index 0000000..6ff6fb4 --- /dev/null +++ b/gecko/include/js/GCAPI.h @@ -0,0 +1,712 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef js_GCAPI_h +#define js_GCAPI_h + +#include "mozilla/TimeStamp.h" +#include "mozilla/Vector.h" + +#include "js/GCAnnotations.h" +#include "js/HeapAPI.h" +#include "js/UniquePtr.h" +#include "js/Utility.h" + +namespace js { +namespace gc { +class GCRuntime; +} // namespace gc +namespace gcstats { +struct Statistics; +} // namespace gcstats +} // namespace js + +typedef enum JSGCMode { + /** Perform only global GCs. */ + JSGC_MODE_GLOBAL = 0, + + /** Perform per-zone GCs until too much garbage has accumulated. */ + JSGC_MODE_ZONE = 1, + + /** + * Collect in short time slices rather than all at once. Implies + * JSGC_MODE_ZONE. + */ + JSGC_MODE_INCREMENTAL = 2 +} JSGCMode; + +/** + * Kinds of js_GC invocation. + */ +typedef enum JSGCInvocationKind { + /* Normal invocation. */ + GC_NORMAL = 0, + + /* Minimize GC triggers and release empty GC chunks right away. */ + GC_SHRINK = 1 +} JSGCInvocationKind; + +namespace JS { + +#define GCREASONS(D) \ + /* Reasons internal to the JS engine */ \ + D(API) \ + D(EAGER_ALLOC_TRIGGER) \ + D(DESTROY_RUNTIME) \ + D(ROOTS_REMOVED) \ + D(LAST_DITCH) \ + D(TOO_MUCH_MALLOC) \ + D(ALLOC_TRIGGER) \ + D(DEBUG_GC) \ + D(COMPARTMENT_REVIVED) \ + D(RESET) \ + D(OUT_OF_NURSERY) \ + D(EVICT_NURSERY) \ + D(DELAYED_ATOMS_GC) \ + D(SHARED_MEMORY_LIMIT) \ + D(UNUSED1) \ + D(INCREMENTAL_TOO_SLOW) \ + D(ABORT_GC) \ + D(FULL_WHOLE_CELL_BUFFER) \ + D(FULL_GENERIC_BUFFER) \ + D(FULL_VALUE_BUFFER) \ + D(FULL_CELL_PTR_BUFFER) \ + D(FULL_SLOT_BUFFER) \ + D(FULL_SHAPE_BUFFER) \ + \ + /* These are reserved for future use. */ \ + D(RESERVED0) \ + D(RESERVED1) \ + D(RESERVED2) \ + D(RESERVED3) \ + D(RESERVED4) \ + D(RESERVED5) \ + D(RESERVED6) \ + D(RESERVED7) \ + D(RESERVED8) \ + D(RESERVED9) \ + \ + /* Reasons from Firefox */ \ + D(DOM_WINDOW_UTILS) \ + D(COMPONENT_UTILS) \ + D(MEM_PRESSURE) \ + D(CC_WAITING) \ + D(CC_FORCED) \ + D(LOAD_END) \ + D(POST_COMPARTMENT) \ + D(PAGE_HIDE) \ + D(NSJSCONTEXT_DESTROY) \ + D(SET_NEW_DOCUMENT) \ + D(SET_DOC_SHELL) \ + D(DOM_UTILS) \ + D(DOM_IPC) \ + D(DOM_WORKER) \ + D(INTER_SLICE_GC) \ + D(REFRESH_FRAME) \ + D(FULL_GC_TIMER) \ + D(SHUTDOWN_CC) \ + D(UNUSED2) \ + D(USER_INACTIVE) \ + D(XPCONNECT_SHUTDOWN) \ + D(DOCSHELL) \ + D(HTML_PARSER) + +namespace gcreason { + +/* GCReasons will end up looking like JSGC_MAYBEGC */ +enum Reason { +#define MAKE_REASON(name) name, + GCREASONS(MAKE_REASON) +#undef MAKE_REASON + NO_REASON, + NUM_REASONS, + + /* + * For telemetry, we want to keep a fixed max bucket size over time so we + * don't have to switch histograms. 100 is conservative; as of this writing + * there are 52. But the cost of extra buckets seems to be low while the + * cost of switching histograms is high. + */ + NUM_TELEMETRY_REASONS = 100 +}; + +/** + * Get a statically allocated C string explaining the given GC reason. + */ +extern JS_PUBLIC_API(const char*) +ExplainReason(JS::gcreason::Reason reason); + +} /* namespace gcreason */ + +/* + * Zone GC: + * + * SpiderMonkey's GC is capable of performing a collection on an arbitrary + * subset of the zones in the system. This allows an embedding to minimize + * collection time by only collecting zones that have run code recently, + * ignoring the parts of the heap that are unlikely to have changed. + * + * When triggering a GC using one of the functions below, it is first necessary + * to select the zones to be collected. To do this, you can call + * PrepareZoneForGC on each zone, or you can call PrepareForFullGC to select + * all zones. Failing to select any zone is an error. + */ + +/** + * Schedule the given zone to be collected as part of the next GC. + */ +extern JS_PUBLIC_API(void) +PrepareZoneForGC(Zone* zone); + +/** + * Schedule all zones to be collected in the next GC. + */ +extern JS_PUBLIC_API(void) +PrepareForFullGC(JSContext* cx); + +/** + * When performing an incremental GC, the zones that were selected for the + * previous incremental slice must be selected in subsequent slices as well. + * This function selects those slices automatically. + */ +extern JS_PUBLIC_API(void) +PrepareForIncrementalGC(JSContext* cx); + +/** + * Returns true if any zone in the system has been scheduled for GC with one of + * the functions above or by the JS engine. + */ +extern JS_PUBLIC_API(bool) +IsGCScheduled(JSContext* cx); + +/** + * Undoes the effect of the Prepare methods above. The given zone will not be + * collected in the next GC. + */ +extern JS_PUBLIC_API(void) +SkipZoneForGC(Zone* zone); + +/* + * Non-Incremental GC: + * + * The following functions perform a non-incremental GC. + */ + +/** + * Performs a non-incremental collection of all selected zones. + * + * If the gckind argument is GC_NORMAL, then some objects that are unreachable + * from the program may still be alive afterwards because of internal + * references; if GC_SHRINK is passed then caches and other temporary references + * to objects will be cleared and all unreferenced objects will be removed from + * the system. + */ +extern JS_PUBLIC_API(void) +GCForReason(JSContext* cx, JSGCInvocationKind gckind, gcreason::Reason reason); + +/* + * Incremental GC: + * + * Incremental GC divides the full mark-and-sweep collection into multiple + * slices, allowing client JavaScript code to run between each slice. This + * allows interactive apps to avoid long collection pauses. Incremental GC does + * not make collection take less time, it merely spreads that time out so that + * the pauses are less noticable. + * + * For a collection to be carried out incrementally the following conditions + * must be met: + * - The collection must be run by calling JS::IncrementalGC() rather than + * JS_GC(). + * - The GC mode must have been set to JSGC_MODE_INCREMENTAL with + * JS_SetGCParameter(). + * + * Note: Even if incremental GC is enabled and working correctly, + * non-incremental collections can still happen when low on memory. + */ + +/** + * Begin an incremental collection and perform one slice worth of work. When + * this function returns, the collection may not be complete. + * IncrementalGCSlice() must be called repeatedly until + * !IsIncrementalGCInProgress(cx). + * + * Note: SpiderMonkey's GC is not realtime. Slices in practice may be longer or + * shorter than the requested interval. + */ +extern JS_PUBLIC_API(void) +StartIncrementalGC(JSContext* cx, JSGCInvocationKind gckind, gcreason::Reason reason, + int64_t millis = 0); + +/** + * Perform a slice of an ongoing incremental collection. When this function + * returns, the collection may not be complete. It must be called repeatedly + * until !IsIncrementalGCInProgress(cx). + * + * Note: SpiderMonkey's GC is not realtime. Slices in practice may be longer or + * shorter than the requested interval. + */ +extern JS_PUBLIC_API(void) +IncrementalGCSlice(JSContext* cx, gcreason::Reason reason, int64_t millis = 0); + +/** + * If IsIncrementalGCInProgress(cx), this call finishes the ongoing collection + * by performing an arbitrarily long slice. If !IsIncrementalGCInProgress(cx), + * this is equivalent to GCForReason. When this function returns, + * IsIncrementalGCInProgress(cx) will always be false. + */ +extern JS_PUBLIC_API(void) +FinishIncrementalGC(JSContext* cx, gcreason::Reason reason); + +/** + * If IsIncrementalGCInProgress(cx), this call aborts the ongoing collection and + * performs whatever work needs to be done to return the collector to its idle + * state. This may take an arbitrarily long time. When this function returns, + * IsIncrementalGCInProgress(cx) will always be false. + */ +extern JS_PUBLIC_API(void) +AbortIncrementalGC(JSContext* cx); + +namespace dbg { + +// The `JS::dbg::GarbageCollectionEvent` class is essentially a view of the +// `js::gcstats::Statistics` data without the uber implementation-specific bits. +// It should generally be palatable for web developers. +class GarbageCollectionEvent +{ + // The major GC number of the GC cycle this data pertains to. + uint64_t majorGCNumber_; + + // Reference to a non-owned, statically allocated C string. This is a very + // short reason explaining why a GC was triggered. + const char* reason; + + // Reference to a nullable, non-owned, statically allocated C string. If the + // collection was forced to be non-incremental, this is a short reason of + // why the GC could not perform an incremental collection. + const char* nonincrementalReason; + + // Represents a single slice of a possibly multi-slice incremental garbage + // collection. + struct Collection { + mozilla::TimeStamp startTimestamp; + mozilla::TimeStamp endTimestamp; + }; + + // The set of garbage collection slices that made up this GC cycle. + mozilla::Vector collections; + + GarbageCollectionEvent(const GarbageCollectionEvent& rhs) = delete; + GarbageCollectionEvent& operator=(const GarbageCollectionEvent& rhs) = delete; + + public: + explicit GarbageCollectionEvent(uint64_t majorGCNum) + : majorGCNumber_(majorGCNum) + , reason(nullptr) + , nonincrementalReason(nullptr) + , collections() + { } + + using Ptr = js::UniquePtr; + static Ptr Create(JSRuntime* rt, ::js::gcstats::Statistics& stats, uint64_t majorGCNumber); + + JSObject* toJSObject(JSContext* cx) const; + + uint64_t majorGCNumber() const { return majorGCNumber_; } +}; + +} // namespace dbg + +enum GCProgress { + /* + * During GC, the GC is bracketed by GC_CYCLE_BEGIN/END callbacks. Each + * slice between those (whether an incremental or the sole non-incremental + * slice) is bracketed by GC_SLICE_BEGIN/GC_SLICE_END. + */ + + GC_CYCLE_BEGIN, + GC_SLICE_BEGIN, + GC_SLICE_END, + GC_CYCLE_END +}; + +struct JS_PUBLIC_API(GCDescription) { + bool isZone_; + bool isComplete_; + JSGCInvocationKind invocationKind_; + gcreason::Reason reason_; + + GCDescription(bool isZone, bool isComplete, JSGCInvocationKind kind, gcreason::Reason reason) + : isZone_(isZone), isComplete_(isComplete), invocationKind_(kind), reason_(reason) {} + + char16_t* formatSliceMessage(JSContext* cx) const; + char16_t* formatSummaryMessage(JSContext* cx) const; + char16_t* formatJSON(JSContext* cx, uint64_t timestamp) const; + + mozilla::TimeStamp startTime(JSContext* cx) const; + mozilla::TimeStamp endTime(JSContext* cx) const; + mozilla::TimeStamp lastSliceStart(JSContext* cx) const; + mozilla::TimeStamp lastSliceEnd(JSContext* cx) const; + + JS::UniqueChars sliceToJSON(JSContext* cx) const; + JS::UniqueChars summaryToJSON(JSContext* cx) const; + + JS::dbg::GarbageCollectionEvent::Ptr toGCEvent(JSContext* cx) const; +}; + +extern JS_PUBLIC_API(UniqueChars) +MinorGcToJSON(JSContext* cx); + +typedef void +(* GCSliceCallback)(JSContext* cx, GCProgress progress, const GCDescription& desc); + +/** + * The GC slice callback is called at the beginning and end of each slice. This + * callback may be used for GC notifications as well as to perform additional + * marking. + */ +extern JS_PUBLIC_API(GCSliceCallback) +SetGCSliceCallback(JSContext* cx, GCSliceCallback callback); + +/** + * Describes the progress of an observed nursery collection. + */ +enum class GCNurseryProgress { + /** + * The nursery collection is starting. + */ + GC_NURSERY_COLLECTION_START, + /** + * The nursery collection is ending. + */ + GC_NURSERY_COLLECTION_END +}; + +/** + * A nursery collection callback receives the progress of the nursery collection + * and the reason for the collection. + */ +using GCNurseryCollectionCallback = void(*)(JSContext* cx, GCNurseryProgress progress, + gcreason::Reason reason); + +/** + * Set the nursery collection callback for the given runtime. When set, it will + * be called at the start and end of every nursery collection. + */ +extern JS_PUBLIC_API(GCNurseryCollectionCallback) +SetGCNurseryCollectionCallback(JSContext* cx, GCNurseryCollectionCallback callback); + +typedef void +(* DoCycleCollectionCallback)(JSContext* cx); + +/** + * The purge gray callback is called after any COMPARTMENT_REVIVED GC in which + * the majority of compartments have been marked gray. + */ +extern JS_PUBLIC_API(DoCycleCollectionCallback) +SetDoCycleCollectionCallback(JSContext* cx, DoCycleCollectionCallback callback); + +/** + * Incremental GC defaults to enabled, but may be disabled for testing or in + * embeddings that have not yet implemented barriers on their native classes. + * There is not currently a way to re-enable incremental GC once it has been + * disabled on the runtime. + */ +extern JS_PUBLIC_API(void) +DisableIncrementalGC(JSContext* cx); + +/** + * Returns true if incremental GC is enabled. Simply having incremental GC + * enabled is not sufficient to ensure incremental collections are happening. + * See the comment "Incremental GC" above for reasons why incremental GC may be + * suppressed. Inspection of the "nonincremental reason" field of the + * GCDescription returned by GCSliceCallback may help narrow down the cause if + * collections are not happening incrementally when expected. + */ +extern JS_PUBLIC_API(bool) +IsIncrementalGCEnabled(JSContext* cx); + +/** + * Returns true while an incremental GC is ongoing, both when actively + * collecting and between slices. + */ +extern JS_PUBLIC_API(bool) +IsIncrementalGCInProgress(JSContext* cx); + +/** + * Returns true while an incremental GC is ongoing, both when actively + * collecting and between slices. + */ +extern JS_PUBLIC_API(bool) +IsIncrementalGCInProgress(JSRuntime* rt); + +/* + * Returns true when writes to GC thing pointers (and reads from weak pointers) + * must call an incremental barrier. This is generally only true when running + * mutator code in-between GC slices. At other times, the barrier may be elided + * for performance. + */ +extern JS_PUBLIC_API(bool) +IsIncrementalBarrierNeeded(JSContext* cx); + +/* + * Notify the GC that a reference to a JSObject is about to be overwritten. + * This method must be called if IsIncrementalBarrierNeeded. + */ +extern JS_PUBLIC_API(void) +IncrementalPreWriteBarrier(JSObject* obj); + +/* + * Notify the GC that a weak reference to a GC thing has been read. + * This method must be called if IsIncrementalBarrierNeeded. + */ +extern JS_PUBLIC_API(void) +IncrementalReadBarrier(GCCellPtr thing); + +/** + * Returns true if the most recent GC ran incrementally. + */ +extern JS_PUBLIC_API(bool) +WasIncrementalGC(JSRuntime* rt); + +/* + * Generational GC: + * + * Note: Generational GC is not yet enabled by default. The following class + * is non-functional unless SpiderMonkey was configured with + * --enable-gcgenerational. + */ + +/** Ensure that generational GC is disabled within some scope. */ +class JS_PUBLIC_API(AutoDisableGenerationalGC) +{ + JSContext* cx; + + public: + explicit AutoDisableGenerationalGC(JSContext* cx); + ~AutoDisableGenerationalGC(); +}; + +/** + * Returns true if generational allocation and collection is currently enabled + * on the given runtime. + */ +extern JS_PUBLIC_API(bool) +IsGenerationalGCEnabled(JSRuntime* rt); + +/** + * Returns the GC's "number". This does not correspond directly to the number + * of GCs that have been run, but is guaranteed to be monotonically increasing + * with GC activity. + */ +extern JS_PUBLIC_API(size_t) +GetGCNumber(); + +/** + * Pass a subclass of this "abstract" class to callees to require that they + * never GC. Subclasses can use assertions or the hazard analysis to ensure no + * GC happens. + */ +class JS_PUBLIC_API(AutoRequireNoGC) +{ + protected: + AutoRequireNoGC() {} + ~AutoRequireNoGC() {} +}; + +/** + * Diagnostic assert (see MOZ_DIAGNOSTIC_ASSERT) that GC cannot occur while this + * class is live. This class does not disable the static rooting hazard + * analysis. + * + * This works by entering a GC unsafe region, which is checked on allocation and + * on GC. + */ +class JS_PUBLIC_API(AutoAssertNoGC) : public AutoRequireNoGC +{ +#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED + JSContext* cx_; + + public: + // This gets the context from TLS if it is not passed in. + explicit AutoAssertNoGC(JSContext* cx = nullptr); + ~AutoAssertNoGC(); +#else + public: + explicit AutoAssertNoGC(JSContext* cx = nullptr) {} + ~AutoAssertNoGC() {} +#endif +}; + +/** + * Disable the static rooting hazard analysis in the live region and assert in + * debug builds if any allocation that could potentially trigger a GC occurs + * while this guard object is live. This is most useful to help the exact + * rooting hazard analysis in complex regions, since it cannot understand + * dataflow. + * + * Note: GC behavior is unpredictable even when deterministic and is generally + * non-deterministic in practice. The fact that this guard has not + * asserted is not a guarantee that a GC cannot happen in the guarded + * region. As a rule, anyone performing a GC unsafe action should + * understand the GC properties of all code in that region and ensure + * that the hazard analysis is correct for that code, rather than relying + * on this class. + */ +#ifdef DEBUG +class JS_PUBLIC_API(AutoSuppressGCAnalysis) : public AutoAssertNoGC +{ + public: + explicit AutoSuppressGCAnalysis(JSContext* cx = nullptr) : AutoAssertNoGC(cx) {} +} JS_HAZ_GC_SUPPRESSED; +#else +class JS_PUBLIC_API(AutoSuppressGCAnalysis) : public AutoRequireNoGC +{ + public: + explicit AutoSuppressGCAnalysis(JSContext* cx = nullptr) {} +} JS_HAZ_GC_SUPPRESSED; +#endif + +/** + * Assert that code is only ever called from a GC callback, disable the static + * rooting hazard analysis and assert if any allocation that could potentially + * trigger a GC occurs while this guard object is live. + * + * This is useful to make the static analysis ignore code that runs in GC + * callbacks. + */ +class JS_PUBLIC_API(AutoAssertGCCallback) : public AutoSuppressGCAnalysis +{ + public: +#ifdef DEBUG + AutoAssertGCCallback(); +#else + AutoAssertGCCallback() {} +#endif +}; + +/** + * Place AutoCheckCannotGC in scopes that you believe can never GC. These + * annotations will be verified both dynamically via AutoAssertNoGC, and + * statically with the rooting hazard analysis (implemented by making the + * analysis consider AutoCheckCannotGC to be a GC pointer, and therefore + * complain if it is live across a GC call.) It is useful when dealing with + * internal pointers to GC things where the GC thing itself may not be present + * for the static analysis: e.g. acquiring inline chars from a JSString* on the + * heap. + * + * We only do the assertion checking in DEBUG builds. + */ +#ifdef DEBUG +class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoAssertNoGC +{ + public: + explicit AutoCheckCannotGC(JSContext* cx = nullptr) : AutoAssertNoGC(cx) {} +} JS_HAZ_GC_INVALIDATED; +#else +class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoRequireNoGC +{ + public: + explicit AutoCheckCannotGC(JSContext* cx = nullptr) {} +} JS_HAZ_GC_INVALIDATED; +#endif + +/** + * Unsets the gray bit for anything reachable from |thing|. |kind| should not be + * JS::TraceKind::Shape. |thing| should be non-null. The return value indicates + * if anything was unmarked. + */ +extern JS_FRIEND_API(bool) +UnmarkGrayGCThingRecursively(GCCellPtr thing); + +} /* namespace JS */ + +namespace js { +namespace gc { + +static MOZ_ALWAYS_INLINE void +ExposeGCThingToActiveJS(JS::GCCellPtr thing) +{ + // GC things residing in the nursery cannot be gray: they have no mark bits. + // All live objects in the nursery are moved to tenured at the beginning of + // each GC slice, so the gray marker never sees nursery things. + if (IsInsideNursery(thing.asCell())) + return; + + // There's nothing to do for permanent GC things that might be owned by + // another runtime. + if (thing.mayBeOwnedByOtherRuntime()) + return; + + if (IsIncrementalBarrierNeededOnTenuredGCThing(thing)) + JS::IncrementalReadBarrier(thing); + else if (js::gc::detail::TenuredCellIsMarkedGray(thing.asCell())) + JS::UnmarkGrayGCThingRecursively(thing); + + MOZ_ASSERT(!js::gc::detail::TenuredCellIsMarkedGray(thing.asCell())); +} + +template +extern JS_PUBLIC_API(bool) +EdgeNeedsSweepUnbarrieredSlow(T* thingp); + +static MOZ_ALWAYS_INLINE bool +EdgeNeedsSweepUnbarriered(JSObject** objp) +{ + // This function does not handle updating nursery pointers. Raw JSObject + // pointers should be updated separately or replaced with + // JS::Heap which handles this automatically. + MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting()); + if (IsInsideNursery(reinterpret_cast(*objp))) + return false; + + auto zone = JS::shadow::Zone::asShadowZone(detail::GetGCThingZone(uintptr_t(*objp))); + if (!zone->isGCSweepingOrCompacting()) + return false; + + return EdgeNeedsSweepUnbarrieredSlow(objp); +} + +} /* namespace gc */ +} /* namespace js */ + +namespace JS { + +/* + * This should be called when an object that is marked gray is exposed to the JS + * engine (by handing it to running JS code or writing it into live JS + * data). During incremental GC, since the gray bits haven't been computed yet, + * we conservatively mark the object black. + */ +static MOZ_ALWAYS_INLINE void +ExposeObjectToActiveJS(JSObject* obj) +{ + MOZ_ASSERT(obj); + MOZ_ASSERT(!js::gc::EdgeNeedsSweepUnbarrieredSlow(&obj)); + js::gc::ExposeGCThingToActiveJS(GCCellPtr(obj)); +} + +static MOZ_ALWAYS_INLINE void +ExposeScriptToActiveJS(JSScript* script) +{ + MOZ_ASSERT(!js::gc::EdgeNeedsSweepUnbarrieredSlow(&script)); + js::gc::ExposeGCThingToActiveJS(GCCellPtr(script)); +} + +/* + * Internal to Firefox. + */ +extern JS_FRIEND_API(void) +NotifyGCRootsRemoved(JSContext* cx); + +/* + * Internal to Firefox. + */ +extern JS_FRIEND_API(void) +NotifyDidPaint(JSContext* cx); + +} /* namespace JS */ + +#endif /* js_GCAPI_h */ diff --git a/gecko/include/js/GCAnnotations.h b/gecko/include/js/GCAnnotations.h new file mode 100644 index 0000000..366d787 --- /dev/null +++ b/gecko/include/js/GCAnnotations.h @@ -0,0 +1,57 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef js_GCAnnotations_h +#define js_GCAnnotations_h + +// Set of annotations for the rooting hazard analysis, used to categorize types +// and functions. +#ifdef XGILL_PLUGIN + +// Mark a type as being a GC thing (eg js::gc::Cell has this annotation). +# define JS_HAZ_GC_THING __attribute__((tag("GC Thing"))) + +// Mark a type as holding a pointer to a GC thing (eg JS::Value has this +// annotation.) +# define JS_HAZ_GC_POINTER __attribute__((tag("GC Pointer"))) + +// Mark a type as a rooted pointer, suitable for use on the stack (eg all +// Rooted instantiations should have this.) +# define JS_HAZ_ROOTED __attribute__((tag("Rooted Pointer"))) + +// Mark a type as something that should not be held live across a GC, but which +// is not itself a GC pointer. +# define JS_HAZ_GC_INVALIDATED __attribute__((tag("Invalidated by GC"))) + +// Mark a type that would otherwise be considered a GC Pointer (eg because it +// contains a JS::Value field) as a non-GC pointer. It is handled almost the +// same in the analysis as a rooted pointer, except it will not be reported as +// an unnecessary root if used across a GC call. This should rarely be used, +// but makes sense for something like ErrorResult, which only contains a GC +// pointer when it holds an exception (and it does its own rooting, +// conditionally.) +# define JS_HAZ_NON_GC_POINTER __attribute__((tag("Suppressed GC Pointer"))) + +// Mark a function as something that runs a garbage collection, potentially +// invalidating GC pointers. +# define JS_HAZ_GC_CALL __attribute__((tag("GC Call"))) + +// Mark an RAII class as suppressing GC within its scope. +# define JS_HAZ_GC_SUPPRESSED __attribute__((tag("Suppress GC"))) + +#else + +# define JS_HAZ_GC_THING +# define JS_HAZ_GC_POINTER +# define JS_HAZ_ROOTED +# define JS_HAZ_GC_INVALIDATED +# define JS_HAZ_NON_GC_POINTER +# define JS_HAZ_GC_CALL +# define JS_HAZ_GC_SUPPRESSED + +#endif + +#endif /* js_GCAnnotations_h */ diff --git a/gecko/include/js/GCPolicyAPI.h b/gecko/include/js/GCPolicyAPI.h new file mode 100644 index 0000000..09489e5 --- /dev/null +++ b/gecko/include/js/GCPolicyAPI.h @@ -0,0 +1,184 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// GC Policy Mechanism + +// A GCPolicy controls how the GC interacts with both direct pointers to GC +// things (e.g. JSObject* or JSString*), tagged and/or optional pointers to GC +// things (e.g. Value or jsid), and C++ container types (e.g. +// JSPropertyDescriptor or GCHashMap). +// +// The GCPolicy provides at a minimum: +// +// static T initial() +// - Construct and return an empty T. +// +// static void trace(JSTracer, T* tp, const char* name) +// - Trace the edge |*tp|, calling the edge |name|. Containers like +// GCHashMap and GCHashSet use this method to trace their children. +// +// static bool needsSweep(T* tp) +// - Return true if |*tp| is about to be finalized. Otherwise, update the +// edge for moving GC, and return false. Containers like GCHashMap and +// GCHashSet use this method to decide when to remove an entry: if this +// function returns true on a key/value/member/etc, its entry is dropped +// from the container. Specializing this method is the standard way to +// get custom weak behavior from a container type. +// +// The default GCPolicy assumes that T has a default constructor and |trace| +// and |needsSweep| methods, and forwards to them. GCPolicy has appropriate +// specializations for pointers to GC things and pointer-like types like +// JS::Heap and mozilla::UniquePtr. +// +// There are some stock structs your specializations can inherit from. +// IgnoreGCPolicy does nothing. StructGCPolicy forwards the methods to the +// referent type T. + +#ifndef GCPolicyAPI_h +#define GCPolicyAPI_h + +#include "mozilla/Maybe.h" +#include "mozilla/UniquePtr.h" + +#include "js/TraceKind.h" +#include "js/TracingAPI.h" + +// Expand the given macro D for each public GC pointer. +#define FOR_EACH_PUBLIC_GC_POINTER_TYPE(D) \ + D(JS::Symbol*) \ + D(JSAtom*) \ + D(JSFunction*) \ + D(JSObject*) \ + D(JSScript*) \ + D(JSString*) + +// Expand the given macro D for each public tagged GC pointer type. +#define FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(D) \ + D(JS::Value) \ + D(jsid) + +#define FOR_EACH_PUBLIC_AGGREGATE_GC_POINTER_TYPE(D) \ + D(JSPropertyDescriptor) + +class JSAtom; +class JSFunction; +class JSObject; +class JSScript; +class JSString; +namespace JS { +class Symbol; +} + +namespace JS { + +// Defines a policy for container types with non-GC, i.e. C storage. This +// policy dispatches to the underlying struct for GC interactions. +template +struct StructGCPolicy +{ + static T initial() { + return T(); + } + + static void trace(JSTracer* trc, T* tp, const char* name) { + tp->trace(trc); + } + + static void sweep(T* tp) { + return tp->sweep(); + } + + static bool needsSweep(T* tp) { + return tp->needsSweep(); + } +}; + +// The default GC policy attempts to defer to methods on the underlying type. +// Most C++ structures that contain a default constructor, a trace function and +// a sweep function will work out of the box with Rooted, Handle, GCVector, +// and GCHash{Set,Map}. +template struct GCPolicy : public StructGCPolicy {}; + +// This policy ignores any GC interaction, e.g. for non-GC types. +template +struct IgnoreGCPolicy { + static T initial() { return T(); } + static void trace(JSTracer* trc, T* t, const char* name) {} + static bool needsSweep(T* v) { return false; } +}; +template <> struct GCPolicy : public IgnoreGCPolicy {}; +template <> struct GCPolicy : public IgnoreGCPolicy {}; + +template +struct GCPointerPolicy +{ + static T initial() { return nullptr; } + static void trace(JSTracer* trc, T* vp, const char* name) { + if (*vp) + js::UnsafeTraceManuallyBarrieredEdge(trc, vp, name); + } + static bool needsSweep(T* vp) { + if (*vp) + return js::gc::IsAboutToBeFinalizedUnbarriered(vp); + return false; + } +}; +template <> struct GCPolicy : public GCPointerPolicy {}; +template <> struct GCPolicy : public GCPointerPolicy {}; +template <> struct GCPolicy : public GCPointerPolicy {}; +template <> struct GCPolicy : public GCPointerPolicy {}; +template <> struct GCPolicy : public GCPointerPolicy {}; +template <> struct GCPolicy : public GCPointerPolicy {}; + +template +struct GCPolicy> +{ + static void trace(JSTracer* trc, JS::Heap* thingp, const char* name) { + TraceEdge(trc, thingp, name); + } + static bool needsSweep(JS::Heap* thingp) { + return *thingp && js::gc::EdgeNeedsSweep(thingp); + } +}; + +// GCPolicy> forwards the contained pointer to GCPolicy. +template +struct GCPolicy> +{ + static mozilla::UniquePtr initial() { return mozilla::UniquePtr(); } + static void trace(JSTracer* trc, mozilla::UniquePtr* tp, const char* name) { + if (tp->get()) + GCPolicy::trace(trc, tp->get(), name); + } + static bool needsSweep(mozilla::UniquePtr* tp) { + if (tp->get()) + return GCPolicy::needsSweep(tp->get()); + return false; + } +}; + +// GCPolicy> forwards tracing/sweeping to GCPolicy if +// when the Maybe is full. +template +struct GCPolicy> +{ + static mozilla::Maybe initial() { return mozilla::Maybe(); } + static void trace(JSTracer* trc, mozilla::Maybe* tp, const char* name) { + if (tp->isSome()) + GCPolicy::trace(trc, tp->ptr(), name); + } + static bool needsSweep(mozilla::Maybe* tp) { + if (tp->isSome()) + return GCPolicy::needsSweep(tp->ptr()); + return false; + } +}; + +template <> struct GCPolicy; // see Realm.h + +} // namespace JS + +#endif // GCPolicyAPI_h diff --git a/gecko/include/js/HashTable.h b/gecko/include/js/HashTable.h new file mode 100644 index 0000000..5ec07d8 --- /dev/null +++ b/gecko/include/js/HashTable.h @@ -0,0 +1,1914 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef js_HashTable_h +#define js_HashTable_h + +#include "mozilla/Alignment.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/Casting.h" +#include "mozilla/HashFunctions.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/Move.h" +#include "mozilla/Opaque.h" +#include "mozilla/PodOperations.h" +#include "mozilla/ReentrancyGuard.h" +#include "mozilla/TemplateLib.h" +#include "mozilla/TypeTraits.h" +#include "mozilla/UniquePtr.h" + +#include "js/Utility.h" + +namespace js { + +class TempAllocPolicy; +template struct DefaultHasher; +template class HashMapEntry; +namespace detail { + template class HashTableEntry; + template class HashTable; +} // namespace detail + +/*****************************************************************************/ + +// The "generation" of a hash table is an opaque value indicating the state of +// modification of the hash table through its lifetime. If the generation of +// a hash table compares equal at times T1 and T2, then lookups in the hash +// table, pointers to (or into) hash table entries, etc. at time T1 are valid +// at time T2. If the generation compares unequal, these computations are all +// invalid and must be performed again to be used. +// +// Generations are meaningfully comparable only with respect to a single hash +// table. It's always nonsensical to compare the generation of distinct hash +// tables H1 and H2. +using Generation = mozilla::Opaque; + +// A JS-friendly, STL-like container providing a hash-based map from keys to +// values. In particular, HashMap calls constructors and destructors of all +// objects added so non-PODs may be used safely. +// +// Key/Value requirements: +// - movable, destructible, assignable +// HashPolicy requirements: +// - see Hash Policy section below +// AllocPolicy: +// - see jsalloc.h +// +// Note: +// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members +// called by HashMap must not call back into the same HashMap object. +// - Due to the lack of exception handling, the user must call |init()|. +template , + class AllocPolicy = TempAllocPolicy> +class HashMap +{ + typedef HashMapEntry TableEntry; + + struct MapHashPolicy : HashPolicy + { + using Base = HashPolicy; + typedef Key KeyType; + static const Key& getKey(TableEntry& e) { return e.key(); } + static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); } + }; + + typedef detail::HashTable Impl; + Impl impl; + + public: + typedef typename HashPolicy::Lookup Lookup; + typedef TableEntry Entry; + + // HashMap construction is fallible (due to OOM); thus the user must call + // init after constructing a HashMap and check the return value. + explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {} + MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); } + bool initialized() const { return impl.initialized(); } + + // Return whether the given lookup value is present in the map. E.g.: + // + // typedef HashMap HM; + // HM h; + // if (HM::Ptr p = h.lookup(3)) { + // const HM::Entry& e = *p; // p acts like a pointer to Entry + // assert(p->key == 3); // Entry contains the key + // char val = p->value; // and value + // } + // + // Also see the definition of Ptr in HashTable above (with T = Entry). + typedef typename Impl::Ptr Ptr; + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); } + + // Like lookup, but does not assert if two threads call lookup at the same + // time. Only use this method when none of the threads will modify the map. + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const { + return impl.readonlyThreadsafeLookup(l); + } + + // Assuming |p.found()|, remove |*p|. + void remove(Ptr p) { impl.remove(p); } + + // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient + // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using + // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.: + // + // typedef HashMap HM; + // HM h; + // HM::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // if (!h.add(p, 3, 'a')) + // return false; + // } + // const HM::Entry& e = *p; // p acts like a pointer to Entry + // assert(p->key == 3); // Entry contains the key + // char val = p->value; // and value + // + // Also see the definition of AddPtr in HashTable above (with T = Entry). + // + // N.B. The caller must ensure that no mutating hash table operations + // occur between a pair of |lookupForAdd| and |add| calls. To avoid + // looking up the key a second time, the caller may use the more efficient + // relookupOrAdd method. This method reuses part of the hashing computation + // to more efficiently insert the key if it has not been added. For + // example, a mutation-handling version of the previous example: + // + // HM::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // call_that_may_mutate_h(); + // if (!h.relookupOrAdd(p, 3, 'a')) + // return false; + // } + // const HM::Entry& e = *p; + // assert(p->key == 3); + // char val = p->value; + typedef typename Impl::AddPtr AddPtr; + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const { + return impl.lookupForAdd(l); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) { + return impl.add(p, + mozilla::Forward(k), + mozilla::Forward(v)); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) { + return impl.add(p, mozilla::Forward(k), Value()); + } + + template + MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) { + return impl.relookupOrAdd(p, k, + mozilla::Forward(k), + mozilla::Forward(v)); + } + + // |all()| returns a Range containing |count()| elements. E.g.: + // + // typedef HashMap HM; + // HM h; + // for (HM::Range r = h.all(); !r.empty(); r.popFront()) + // char c = r.front().value(); + // + // Also see the definition of Range in HashTable above (with T = Entry). + typedef typename Impl::Range Range; + Range all() const { return impl.all(); } + + // Typedef for the enumeration class. An Enum may be used to examine and + // remove table entries: + // + // typedef HashMap HM; + // HM s; + // for (HM::Enum e(s); !e.empty(); e.popFront()) + // if (e.front().value() == 'l') + // e.removeFront(); + // + // Table resize may occur in Enum's destructor. Also see the definition of + // Enum in HashTable above (with T = Entry). + typedef typename Impl::Enum Enum; + + // Remove all entries. This does not shrink the table. For that consider + // using the finish() method. + void clear() { impl.clear(); } + + // Remove all entries. Unlike clear() this method tries to shrink the table. + // Unlike finish() it does not require the map to be initialized again. + void clearAndShrink() { impl.clearAndShrink(); } + + // Remove all the entries and release all internal buffers. The map must + // be initialized again before any use. + void finish() { impl.finish(); } + + // Does the table contain any entries? + bool empty() const { return impl.empty(); } + + // Number of live elements in the map. + uint32_t count() const { return impl.count(); } + + // Total number of allocation in the dynamic table. Note: resize will + // happen well before count() == capacity(). + size_t capacity() const { return impl.capacity(); } + + // Don't just call |impl.sizeOfExcludingThis()| because there's no + // guarantee that |impl| is the first field in HashMap. + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return impl.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); + } + + Generation generation() const { + return impl.generation(); + } + + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return impl.lookup(l).found(); + } + + // Overwrite existing value with v. Return false on oom. + template + MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) { + AddPtr p = lookupForAdd(k); + if (p) { + p->value() = mozilla::Forward(v); + return true; + } + return add(p, mozilla::Forward(k), mozilla::Forward(v)); + } + + // Like put, but assert that the given key is not already present. + template + MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) { + return impl.putNew(k, mozilla::Forward(k), mozilla::Forward(v)); + } + + // Only call this to populate an empty map after reserving space with init(). + template + void putNewInfallible(KeyInput&& k, ValueInput&& v) { + impl.putNewInfallible(k, mozilla::Forward(k), mozilla::Forward(v)); + } + + // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom. + Ptr lookupWithDefault(const Key& k, const Value& defaultValue) { + AddPtr p = lookupForAdd(k); + if (p) + return p; + bool ok = add(p, k, defaultValue); + MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom. + (void)ok; + return p; + } + + // Remove if present. + void remove(const Lookup& l) { + if (Ptr p = lookup(l)) + remove(p); + } + + // Infallibly rekey one entry, if necessary. + // Requires template parameters Key and HashPolicy::Lookup to be the same type. + void rekeyIfMoved(const Key& old_key, const Key& new_key) { + if (old_key != new_key) + rekeyAs(old_key, new_key, new_key); + } + + // Infallibly rekey one entry if present, and return whether that happened. + bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) { + if (Ptr p = lookup(old_lookup)) { + impl.rekeyAndMaybeRehash(p, new_lookup, new_key); + return true; + } + return false; + } + + // HashMap is movable + HashMap(HashMap&& rhs) : impl(mozilla::Move(rhs.impl)) {} + void operator=(HashMap&& rhs) { + MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); + impl = mozilla::Move(rhs.impl); + } + + private: + // HashMap is not copyable or assignable + HashMap(const HashMap& hm) = delete; + HashMap& operator=(const HashMap& hm) = delete; + + friend class Impl::Enum; +}; + +/*****************************************************************************/ + +// A JS-friendly, STL-like container providing a hash-based set of values. In +// particular, HashSet calls constructors and destructors of all objects added +// so non-PODs may be used safely. +// +// T requirements: +// - movable, destructible, assignable +// HashPolicy requirements: +// - see Hash Policy section below +// AllocPolicy: +// - see jsalloc.h +// +// Note: +// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by +// HashSet must not call back into the same HashSet object. +// - Due to the lack of exception handling, the user must call |init()|. +template , + class AllocPolicy = TempAllocPolicy> +class HashSet +{ + struct SetOps : HashPolicy + { + using Base = HashPolicy; + typedef T KeyType; + static const KeyType& getKey(const T& t) { return t; } + static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); } + }; + + typedef detail::HashTable Impl; + Impl impl; + + public: + typedef typename HashPolicy::Lookup Lookup; + typedef T Entry; + + // HashSet construction is fallible (due to OOM); thus the user must call + // init after constructing a HashSet and check the return value. + explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {} + MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); } + bool initialized() const { return impl.initialized(); } + + // Return whether the given lookup value is present in the map. E.g.: + // + // typedef HashSet HS; + // HS h; + // if (HS::Ptr p = h.lookup(3)) { + // assert(*p == 3); // p acts like a pointer to int + // } + // + // Also see the definition of Ptr in HashTable above. + typedef typename Impl::Ptr Ptr; + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const { return impl.lookup(l); } + + // Like lookup, but does not assert if two threads call lookup at the same + // time. Only use this method when none of the threads will modify the map. + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const { + return impl.readonlyThreadsafeLookup(l); + } + + // Assuming |p.found()|, remove |*p|. + void remove(Ptr p) { impl.remove(p); } + + // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient + // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using + // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: + // + // typedef HashSet HS; + // HS h; + // HS::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // if (!h.add(p, 3)) + // return false; + // } + // assert(*p == 3); // p acts like a pointer to int + // + // Also see the definition of AddPtr in HashTable above. + // + // N.B. The caller must ensure that no mutating hash table operations + // occur between a pair of |lookupForAdd| and |add| calls. To avoid + // looking up the key a second time, the caller may use the more efficient + // relookupOrAdd method. This method reuses part of the hashing computation + // to more efficiently insert the key if it has not been added. For + // example, a mutation-handling version of the previous example: + // + // HS::AddPtr p = h.lookupForAdd(3); + // if (!p) { + // call_that_may_mutate_h(); + // if (!h.relookupOrAdd(p, 3, 3)) + // return false; + // } + // assert(*p == 3); + // + // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the + // entry |t|, where the caller ensures match(l,t). + typedef typename Impl::AddPtr AddPtr; + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const { + return impl.lookupForAdd(l); + } + + template + MOZ_MUST_USE bool add(AddPtr& p, U&& u) { + return impl.add(p, mozilla::Forward(u)); + } + + template + MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) { + return impl.relookupOrAdd(p, l, mozilla::Forward(u)); + } + + // |all()| returns a Range containing |count()| elements: + // + // typedef HashSet HS; + // HS h; + // for (HS::Range r = h.all(); !r.empty(); r.popFront()) + // int i = r.front(); + // + // Also see the definition of Range in HashTable above. + typedef typename Impl::Range Range; + Range all() const { return impl.all(); } + + // Typedef for the enumeration class. An Enum may be used to examine and + // remove table entries: + // + // typedef HashSet HS; + // HS s; + // for (HS::Enum e(s); !e.empty(); e.popFront()) + // if (e.front() == 42) + // e.removeFront(); + // + // Table resize may occur in Enum's destructor. Also see the definition of + // Enum in HashTable above. + typedef typename Impl::Enum Enum; + + // Remove all entries. This does not shrink the table. For that consider + // using the finish() method. + void clear() { impl.clear(); } + + // Remove all entries. Unlike clear() this method tries to shrink the table. + // Unlike finish() it does not require the set to be initialized again. + void clearAndShrink() { impl.clearAndShrink(); } + + // Remove all the entries and release all internal buffers. The set must + // be initialized again before any use. + void finish() { impl.finish(); } + + // Does the table contain any entries? + bool empty() const { return impl.empty(); } + + // Number of live elements in the map. + uint32_t count() const { return impl.count(); } + + // Total number of allocation in the dynamic table. Note: resize will + // happen well before count() == capacity(). + size_t capacity() const { return impl.capacity(); } + + // Don't just call |impl.sizeOfExcludingThis()| because there's no + // guarantee that |impl| is the first field in HashSet. + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return impl.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); + } + + Generation generation() const { + return impl.generation(); + } + + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return impl.lookup(l).found(); + } + + // Add |u| if it is not present already. Return false on oom. + template + MOZ_MUST_USE bool put(U&& u) { + AddPtr p = lookupForAdd(u); + return p ? true : add(p, mozilla::Forward(u)); + } + + // Like put, but assert that the given key is not already present. + template + MOZ_MUST_USE bool putNew(U&& u) { + return impl.putNew(u, mozilla::Forward(u)); + } + + template + MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) { + return impl.putNew(l, mozilla::Forward(u)); + } + + // Only call this to populate an empty set after reserving space with init(). + template + void putNewInfallible(const Lookup& l, U&& u) { + impl.putNewInfallible(l, mozilla::Forward(u)); + } + + void remove(const Lookup& l) { + if (Ptr p = lookup(l)) + remove(p); + } + + // Infallibly rekey one entry, if present. + // Requires template parameters T and HashPolicy::Lookup to be the same type. + void rekeyIfMoved(const Lookup& old_value, const T& new_value) { + if (old_value != new_value) + rekeyAs(old_value, new_value, new_value); + } + + // Infallibly rekey one entry if present, and return whether that happened. + bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) { + if (Ptr p = lookup(old_lookup)) { + impl.rekeyAndMaybeRehash(p, new_lookup, new_value); + return true; + } + return false; + } + + // Infallibly replace the current key at |p| with an equivalent key. + // Specifically, both HashPolicy::hash and HashPolicy::match must return + // identical results for the new and old key when applied against all + // possible matching values. + void replaceKey(Ptr p, const T& new_value) { + MOZ_ASSERT(p.found()); + MOZ_ASSERT(*p != new_value); + MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value)); + MOZ_ASSERT(HashPolicy::match(*p, new_value)); + const_cast(*p) = new_value; + } + + // HashSet is movable + HashSet(HashSet&& rhs) : impl(mozilla::Move(rhs.impl)) {} + void operator=(HashSet&& rhs) { + MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); + impl = mozilla::Move(rhs.impl); + } + + private: + // HashSet is not copyable or assignable + HashSet(const HashSet& hs) = delete; + HashSet& operator=(const HashSet& hs) = delete; + + friend class Impl::Enum; +}; + +/*****************************************************************************/ + +// Hash Policy +// +// A hash policy P for a hash table with key-type Key must provide: +// - a type |P::Lookup| to use to lookup table entries; +// - a static member function |P::hash| with signature +// +// static js::HashNumber hash(Lookup) +// +// to use to hash the lookup type; and +// - a static member function |P::match| with signature +// +// static bool match(Key, Lookup) +// +// to use to test equality of key and lookup values. +// +// Normally, Lookup = Key. In general, though, different values and types of +// values can be used to lookup and store. If a Lookup value |l| is != to the +// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.: +// +// js::HashSet::AddPtr p = h.lookup(l); +// if (!p) { +// assert(P::match(k, l)); // must hold +// h.add(p, k); +// } + +// Pointer hashing policy that uses HashGeneric() to create good hashes for +// pointers. Note that we don't shift out the lowest k bits to generate a +// good distribution for arena allocated pointers. +template +struct PointerHasher +{ + typedef Key Lookup; + static HashNumber hash(const Lookup& l) { + size_t word = reinterpret_cast(l); + return mozilla::HashGeneric(word); + } + static bool match(const Key& k, const Lookup& l) { + return k == l; + } + static void rekey(Key& k, const Key& newKey) { + k = newKey; + } +}; + +// Default hash policy: just use the 'lookup' value. This of course only +// works if the lookup value is integral. HashTable applies ScrambleHashCode to +// the result of the 'hash' which means that it is 'ok' if the lookup value is +// not well distributed over the HashNumber domain. +template +struct DefaultHasher +{ + typedef Key Lookup; + static HashNumber hash(const Lookup& l) { + // Hash if can implicitly cast to hash number type. + return l; + } + static bool match(const Key& k, const Lookup& l) { + // Use builtin or overloaded operator==. + return k == l; + } + static void rekey(Key& k, const Key& newKey) { + k = newKey; + } +}; + +// Specialize hashing policy for pointer types. It assumes that the type is +// at least word-aligned. For types with smaller size use PointerHasher. +template +struct DefaultHasher : PointerHasher +{}; + +// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's +// raw pointer to PointerHasher. +template +struct DefaultHasher> +{ + using Lookup = mozilla::UniquePtr; + using PtrHasher = PointerHasher; + + static HashNumber hash(const Lookup& l) { + return PtrHasher::hash(l.get()); + } + static bool match(const mozilla::UniquePtr& k, const Lookup& l) { + return PtrHasher::match(k.get(), l.get()); + } + static void rekey(mozilla::UniquePtr& k, mozilla::UniquePtr&& newKey) { + k = mozilla::Move(newKey); + } +}; + +// For doubles, we can xor the two uint32s. +template <> +struct DefaultHasher +{ + typedef double Lookup; + static HashNumber hash(double d) { + static_assert(sizeof(HashNumber) == 4, + "subsequent code assumes a four-byte hash"); + uint64_t u = mozilla::BitwiseCast(d); + return HashNumber(u ^ (u >> 32)); + } + static bool match(double lhs, double rhs) { + return mozilla::BitwiseCast(lhs) == mozilla::BitwiseCast(rhs); + } +}; + +template <> +struct DefaultHasher +{ + typedef float Lookup; + static HashNumber hash(float f) { + static_assert(sizeof(HashNumber) == 4, + "subsequent code assumes a four-byte hash"); + return HashNumber(mozilla::BitwiseCast(f)); + } + static bool match(float lhs, float rhs) { + return mozilla::BitwiseCast(lhs) == mozilla::BitwiseCast(rhs); + } +}; + +// A hash policy that compares C strings. +struct CStringHasher +{ + typedef const char* Lookup; + static js::HashNumber hash(Lookup l) { + return mozilla::HashString(l); + } + static bool match(const char* key, Lookup lookup) { + return strcmp(key, lookup) == 0; + } +}; + +// Fallible hashing interface. +// +// Most of the time generating a hash code is infallible so this class provides +// default methods that always succeed. Specialize this class for your own hash +// policy to provide fallible hashing. +// +// This is used by MovableCellHasher to handle the fact that generating a unique +// ID for cell pointer may fail due to OOM. +template +struct FallibleHashMethods +{ + // Return true if a hashcode is already available for its argument. Once + // this returns true for a specific argument it must continue to do so. + template static bool hasHash(Lookup&& l) { return true; } + + // Fallible method to ensure a hashcode exists for its argument and create + // one if not. Returns false on error, e.g. out of memory. + template static bool ensureHash(Lookup&& l) { return true; } +}; + +template +static bool +HasHash(Lookup&& l) { + return FallibleHashMethods::hasHash(mozilla::Forward(l)); +} + +template +static bool +EnsureHash(Lookup&& l) { + return FallibleHashMethods::ensureHash(mozilla::Forward(l)); +} + +/*****************************************************************************/ + +// Both HashMap and HashSet are implemented by a single HashTable that is even +// more heavily parameterized than the other two. This leaves HashTable gnarly +// and extremely coupled to HashMap and HashSet; thus code should not use +// HashTable directly. + +template +class HashMapEntry +{ + Key key_; + Value value_; + + template friend class detail::HashTable; + template friend class detail::HashTableEntry; + template friend class HashMap; + + public: + template + HashMapEntry(KeyInput&& k, ValueInput&& v) + : key_(mozilla::Forward(k)), + value_(mozilla::Forward(v)) + {} + + HashMapEntry(HashMapEntry&& rhs) + : key_(mozilla::Move(rhs.key_)), + value_(mozilla::Move(rhs.value_)) + {} + + void operator=(HashMapEntry&& rhs) { + key_ = mozilla::Move(rhs.key_); + value_ = mozilla::Move(rhs.value_); + } + + typedef Key KeyType; + typedef Value ValueType; + + const Key& key() const { return key_; } + Key& mutableKey() { return key_; } + const Value& value() const { return value_; } + Value& value() { return value_; } + + private: + HashMapEntry(const HashMapEntry&) = delete; + void operator=(const HashMapEntry&) = delete; +}; + +} // namespace js + +namespace mozilla { + +template +struct IsPod > : IsPod {}; + +template +struct IsPod > + : IntegralConstant::value && IsPod::value> +{}; + +} // namespace mozilla + +namespace js { + +namespace detail { + +template +class HashTable; + +template +class HashTableEntry +{ + template friend class HashTable; + typedef typename mozilla::RemoveConst::Type NonConstT; + + HashNumber keyHash; + mozilla::AlignedStorage2 mem; + + static const HashNumber sFreeKey = 0; + static const HashNumber sRemovedKey = 1; + static const HashNumber sCollisionBit = 1; + + static bool isLiveHash(HashNumber hash) + { + return hash > sRemovedKey; + } + + HashTableEntry(const HashTableEntry&) = delete; + void operator=(const HashTableEntry&) = delete; + ~HashTableEntry() = delete; + + public: + // NB: HashTableEntry is treated as a POD: no constructor or destructor calls. + + void destroyIfLive() { + if (isLive()) + mem.addr()->~T(); + } + + void destroy() { + MOZ_ASSERT(isLive()); + mem.addr()->~T(); + } + + void swap(HashTableEntry* other) { + if (this == other) + return; + MOZ_ASSERT(isLive()); + if (other->isLive()) { + mozilla::Swap(*mem.addr(), *other->mem.addr()); + } else { + *other->mem.addr() = mozilla::Move(*mem.addr()); + destroy(); + } + mozilla::Swap(keyHash, other->keyHash); + } + + T& get() { MOZ_ASSERT(isLive()); return *mem.addr(); } + NonConstT& getMutable() { MOZ_ASSERT(isLive()); return *mem.addr(); } + + bool isFree() const { return keyHash == sFreeKey; } + void clearLive() { MOZ_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); } + void clear() { if (isLive()) mem.addr()->~T(); keyHash = sFreeKey; } + bool isRemoved() const { return keyHash == sRemovedKey; } + void removeLive() { MOZ_ASSERT(isLive()); keyHash = sRemovedKey; mem.addr()->~T(); } + bool isLive() const { return isLiveHash(keyHash); } + void setCollision() { MOZ_ASSERT(isLive()); keyHash |= sCollisionBit; } + void unsetCollision() { keyHash &= ~sCollisionBit; } + bool hasCollision() const { return keyHash & sCollisionBit; } + bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; } + HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; } + + template + void setLive(HashNumber hn, Args&&... args) + { + MOZ_ASSERT(!isLive()); + keyHash = hn; + new(mem.addr()) T(mozilla::Forward(args)...); + MOZ_ASSERT(isLive()); + } +}; + +template +class HashTable : private AllocPolicy +{ + friend class mozilla::ReentrancyGuard; + + typedef typename mozilla::RemoveConst::Type NonConstT; + typedef typename HashPolicy::KeyType Key; + typedef typename HashPolicy::Lookup Lookup; + + public: + typedef HashTableEntry Entry; + + // A nullable pointer to a hash table element. A Ptr |p| can be tested + // either explicitly |if (p.found()) p->...| or using boolean conversion + // |if (p) p->...|. Ptr objects must not be used after any mutating hash + // table operations unless |generation()| is tested. + class Ptr + { + friend class HashTable; + + Entry* entry_; +#ifdef JS_DEBUG + const HashTable* table_; + Generation generation; +#endif + + protected: + Ptr(Entry& entry, const HashTable& tableArg) + : entry_(&entry) +#ifdef JS_DEBUG + , table_(&tableArg) + , generation(tableArg.generation()) +#endif + {} + + public: + Ptr() + : entry_(nullptr) +#ifdef JS_DEBUG + , table_(nullptr) + , generation(0) +#endif + {} + + bool isValid() const { + return !!entry_; + } + + bool found() const { + if (!isValid()) + return false; +#ifdef JS_DEBUG + MOZ_ASSERT(generation == table_->generation()); +#endif + return entry_->isLive(); + } + + explicit operator bool() const { + return found(); + } + + bool operator==(const Ptr& rhs) const { + MOZ_ASSERT(found() && rhs.found()); + return entry_ == rhs.entry_; + } + + bool operator!=(const Ptr& rhs) const { +#ifdef JS_DEBUG + MOZ_ASSERT(generation == table_->generation()); +#endif + return !(*this == rhs); + } + + T& operator*() const { +#ifdef JS_DEBUG + MOZ_ASSERT(found()); + MOZ_ASSERT(generation == table_->generation()); +#endif + return entry_->get(); + } + + T* operator->() const { +#ifdef JS_DEBUG + MOZ_ASSERT(found()); + MOZ_ASSERT(generation == table_->generation()); +#endif + return &entry_->get(); + } + }; + + // A Ptr that can be used to add a key after a failed lookup. + class AddPtr : public Ptr + { + friend class HashTable; + HashNumber keyHash; +#ifdef JS_DEBUG + uint64_t mutationCount; +#endif + + AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn) + : Ptr(entry, tableArg) + , keyHash(hn) +#ifdef JS_DEBUG + , mutationCount(tableArg.mutationCount) +#endif + {} + + public: + AddPtr() : keyHash(0) {} + }; + + // A collection of hash table entries. The collection is enumerated by + // calling |front()| followed by |popFront()| as long as |!empty()|. As + // with Ptr/AddPtr, Range objects must not be used after any mutating hash + // table operation unless the |generation()| is tested. + class Range + { + protected: + friend class HashTable; + + Range(const HashTable& tableArg, Entry* c, Entry* e) + : cur(c) + , end(e) +#ifdef JS_DEBUG + , table_(&tableArg) + , mutationCount(tableArg.mutationCount) + , generation(tableArg.generation()) + , validEntry(true) +#endif + { + while (cur < end && !cur->isLive()) + ++cur; + } + + Entry* cur; + Entry* end; +#ifdef JS_DEBUG + const HashTable* table_; + uint64_t mutationCount; + Generation generation; + bool validEntry; +#endif + + public: + Range() + : cur(nullptr) + , end(nullptr) +#ifdef JS_DEBUG + , table_(nullptr) + , mutationCount(0) + , generation(0) + , validEntry(false) +#endif + {} + + bool empty() const { +#ifdef JS_DEBUG + MOZ_ASSERT(generation == table_->generation()); + MOZ_ASSERT(mutationCount == table_->mutationCount); +#endif + return cur == end; + } + + T& front() const { + MOZ_ASSERT(!empty()); +#ifdef JS_DEBUG + MOZ_ASSERT(validEntry); + MOZ_ASSERT(generation == table_->generation()); + MOZ_ASSERT(mutationCount == table_->mutationCount); +#endif + return cur->get(); + } + + void popFront() { + MOZ_ASSERT(!empty()); +#ifdef JS_DEBUG + MOZ_ASSERT(generation == table_->generation()); + MOZ_ASSERT(mutationCount == table_->mutationCount); +#endif + while (++cur < end && !cur->isLive()) + continue; +#ifdef JS_DEBUG + validEntry = true; +#endif + } + }; + + // A Range whose lifetime delimits a mutating enumeration of a hash table. + // Since rehashing when elements were removed during enumeration would be + // bad, it is postponed until the Enum is destructed. Since the Enum's + // destructor touches the hash table, the user must ensure that the hash + // table is still alive when the destructor runs. + class Enum : public Range + { + friend class HashTable; + + HashTable& table_; + bool rekeyed; + bool removed; + + // Enum is movable but not copyable. + Enum(const Enum&) = delete; + void operator=(const Enum&) = delete; + + public: + template + explicit Enum(Map& map) + : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {} + + MOZ_IMPLICIT Enum(Enum&& other) + : Range(other), table_(other.table_), rekeyed(other.rekeyed), removed(other.removed) + { + other.rekeyed = false; + other.removed = false; + } + + // Removes the |front()| element from the table, leaving |front()| + // invalid until the next call to |popFront()|. For example: + // + // HashSet s; + // for (HashSet::Enum e(s); !e.empty(); e.popFront()) + // if (e.front() == 42) + // e.removeFront(); + void removeFront() { + table_.remove(*this->cur); + removed = true; +#ifdef JS_DEBUG + this->validEntry = false; + this->mutationCount = table_.mutationCount; +#endif + } + + NonConstT& mutableFront() { + MOZ_ASSERT(!this->empty()); +#ifdef JS_DEBUG + MOZ_ASSERT(this->validEntry); + MOZ_ASSERT(this->generation == this->Range::table_->generation()); + MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount); +#endif + return this->cur->getMutable(); + } + + // Removes the |front()| element and re-inserts it into the table with + // a new key at the new Lookup position. |front()| is invalid after + // this operation until the next call to |popFront()|. + void rekeyFront(const Lookup& l, const Key& k) { + MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get())); + Ptr p(*this->cur, table_); + table_.rekeyWithoutRehash(p, l, k); + rekeyed = true; +#ifdef JS_DEBUG + this->validEntry = false; + this->mutationCount = table_.mutationCount; +#endif + } + + void rekeyFront(const Key& k) { + rekeyFront(k, k); + } + + // Potentially rehashes the table. + ~Enum() { + if (rekeyed) { + table_.gen++; + table_.checkOverRemoved(); + } + + if (removed) + table_.compactIfUnderloaded(); + } + }; + + // HashTable is movable + HashTable(HashTable&& rhs) + : AllocPolicy(rhs) + { + mozilla::PodAssign(this, &rhs); + rhs.table = nullptr; + } + void operator=(HashTable&& rhs) { + MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); + if (table) + destroyTable(*this, table, capacity()); + mozilla::PodAssign(this, &rhs); + rhs.table = nullptr; + } + + private: + // HashTable is not copyable or assignable + HashTable(const HashTable&) = delete; + void operator=(const HashTable&) = delete; + + private: + static const size_t CAP_BITS = 30; + + public: + uint64_t gen:56; // entry storage generation number + uint64_t hashShift:8; // multiplicative hash shift + Entry* table; // entry storage + uint32_t entryCount; // number of entries in table + uint32_t removedCount; // removed entry sentinels in table + +#ifdef JS_DEBUG + uint64_t mutationCount; + mutable bool mEntered; + // Note that some updates to these stats are not thread-safe. See the + // comment on the three-argument overloading of HashTable::lookup(). + mutable struct Stats + { + uint32_t searches; // total number of table searches + uint32_t steps; // hash chain links traversed + uint32_t hits; // searches that found key + uint32_t misses; // searches that didn't find key + uint32_t addOverRemoved; // adds that recycled a removed entry + uint32_t removes; // calls to remove + uint32_t removeFrees; // calls to remove that freed the entry + uint32_t grows; // table expansions + uint32_t shrinks; // table contractions + uint32_t compresses; // table compressions + uint32_t rehashes; // tombstone decontaminations + } stats; +# define METER(x) x +#else +# define METER(x) +#endif + + // The default initial capacity is 32 (enough to hold 16 elements), but it + // can be as low as 4. + static const unsigned sMinCapacityLog2 = 2; + static const unsigned sMinCapacity = 1 << sMinCapacityLog2; + static const unsigned sMaxInit = JS_BIT(CAP_BITS - 1); + static const unsigned sMaxCapacity = JS_BIT(CAP_BITS); + static const unsigned sHashBits = mozilla::tl::BitSize::value; + + // Hash-table alpha is conceptually a fraction, but to avoid floating-point + // math we implement it as a ratio of integers. + static const uint8_t sAlphaDenominator = 4; + static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 + static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 + + static const HashNumber sFreeKey = Entry::sFreeKey; + static const HashNumber sRemovedKey = Entry::sRemovedKey; + static const HashNumber sCollisionBit = Entry::sCollisionBit; + + void setTableSizeLog2(unsigned sizeLog2) + { + hashShift = sHashBits - sizeLog2; + } + + static bool isLiveHash(HashNumber hash) + { + return Entry::isLiveHash(hash); + } + + static HashNumber prepareHash(const Lookup& l) + { + HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l)); + + // Avoid reserved hash codes. + if (!isLiveHash(keyHash)) + keyHash -= (sRemovedKey + 1); + return keyHash & ~sCollisionBit; + } + + enum FailureBehavior { DontReportFailure = false, ReportFailure = true }; + + static Entry* createTable(AllocPolicy& alloc, uint32_t capacity, + FailureBehavior reportFailure = ReportFailure) + { + static_assert(sFreeKey == 0, + "newly-calloc'd tables have to be considered empty"); + if (reportFailure) + return alloc.template pod_calloc(capacity); + + return alloc.template maybe_pod_calloc(capacity); + } + + static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity) + { + static_assert(sFreeKey == 0, + "newly-calloc'd tables have to be considered empty"); + return alloc.template maybe_pod_calloc(capacity); + } + + static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity) + { + Entry* end = oldTable + capacity; + for (Entry* e = oldTable; e < end; ++e) + e->destroyIfLive(); + alloc.free_(oldTable); + } + + public: + explicit HashTable(AllocPolicy ap) + : AllocPolicy(ap) + , gen(0) + , hashShift(sHashBits) + , table(nullptr) + , entryCount(0) + , removedCount(0) +#ifdef JS_DEBUG + , mutationCount(0) + , mEntered(false) +#endif + {} + + MOZ_MUST_USE bool init(uint32_t length) + { + MOZ_ASSERT(!initialized()); + + // Reject all lengths whose initial computed capacity would exceed + // sMaxCapacity. Round that maximum length down to the nearest power + // of two for speedier code. + if (MOZ_UNLIKELY(length > sMaxInit)) { + this->reportAllocOverflow(); + return false; + } + + static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, + "multiplication in numerator below could overflow"); + static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator, + "numerator calculation below could potentially overflow"); + + // Compute the smallest capacity allowing |length| elements to be + // inserted without rehashing: ceil(length / max-alpha). (Ceiling + // integral division: .) + uint32_t newCapacity = + (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator; + if (newCapacity < sMinCapacity) + newCapacity = sMinCapacity; + + // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034). + uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2; + while (roundUp < newCapacity) { + roundUp <<= 1; + ++roundUpLog2; + } + + newCapacity = roundUp; + MOZ_ASSERT(newCapacity >= length); + MOZ_ASSERT(newCapacity <= sMaxCapacity); + + table = createTable(*this, newCapacity); + if (!table) + return false; + + setTableSizeLog2(roundUpLog2); + METER(memset(&stats, 0, sizeof(stats))); + return true; + } + + bool initialized() const + { + return !!table; + } + + ~HashTable() + { + if (table) + destroyTable(*this, table, capacity()); + } + + private: + HashNumber hash1(HashNumber hash0) const + { + return hash0 >> hashShift; + } + + struct DoubleHash + { + HashNumber h2; + HashNumber sizeMask; + }; + + DoubleHash hash2(HashNumber curKeyHash) const + { + unsigned sizeLog2 = sHashBits - hashShift; + DoubleHash dh = { + ((curKeyHash << sizeLog2) >> hashShift) | 1, + (HashNumber(1) << sizeLog2) - 1 + }; + return dh; + } + + static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh) + { + return (h1 - dh.h2) & dh.sizeMask; + } + + bool overloaded() + { + static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator, + "multiplication below could overflow"); + return entryCount + removedCount >= + capacity() * sMaxAlphaNumerator / sAlphaDenominator; + } + + // Would the table be underloaded if it had the given capacity and entryCount? + static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount) + { + static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator, + "multiplication below could overflow"); + return capacity > sMinCapacity && + entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator; + } + + bool underloaded() + { + return wouldBeUnderloaded(capacity(), entryCount); + } + + static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l) + { + return HashPolicy::match(HashPolicy::getKey(e.get()), l); + } + + // Warning: in order for readonlyThreadsafeLookup() to be safe this + // function must not modify the table in any way when |collisionBit| is 0. + // (The use of the METER() macro to increment stats violates this + // restriction but we will live with that for now because it's enabled so + // rarely.) + MOZ_ALWAYS_INLINE Entry& + lookup(const Lookup& l, HashNumber keyHash, unsigned collisionBit) const + { + MOZ_ASSERT(isLiveHash(keyHash)); + MOZ_ASSERT(!(keyHash & sCollisionBit)); + MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit); + MOZ_ASSERT(table); + METER(stats.searches++); + + // Compute the primary hash address. + HashNumber h1 = hash1(keyHash); + Entry* entry = &table[h1]; + + // Miss: return space for a new entry. + if (entry->isFree()) { + METER(stats.misses++); + return *entry; + } + + // Hit: return entry. + if (entry->matchHash(keyHash) && match(*entry, l)) { + METER(stats.hits++); + return *entry; + } + + // Collision: double hash. + DoubleHash dh = hash2(keyHash); + + // Save the first removed entry pointer so we can recycle later. + Entry* firstRemoved = nullptr; + + while (true) { + if (MOZ_UNLIKELY(entry->isRemoved())) { + if (!firstRemoved) + firstRemoved = entry; + } else { + if (collisionBit == sCollisionBit) + entry->setCollision(); + } + + METER(stats.steps++); + h1 = applyDoubleHash(h1, dh); + + entry = &table[h1]; + if (entry->isFree()) { + METER(stats.misses++); + return firstRemoved ? *firstRemoved : *entry; + } + + if (entry->matchHash(keyHash) && match(*entry, l)) { + METER(stats.hits++); + return *entry; + } + } + } + + // This is a copy of lookup hardcoded to the assumptions: + // 1. the lookup is a lookupForAdd + // 2. the key, whose |keyHash| has been passed is not in the table, + // 3. no entries have been removed from the table. + // This specialized search avoids the need for recovering lookup values + // from entries, which allows more flexible Lookup/Key types. + Entry& findFreeEntry(HashNumber keyHash) + { + MOZ_ASSERT(!(keyHash & sCollisionBit)); + MOZ_ASSERT(table); + METER(stats.searches++); + + // We assume 'keyHash' has already been distributed. + + // Compute the primary hash address. + HashNumber h1 = hash1(keyHash); + Entry* entry = &table[h1]; + + // Miss: return space for a new entry. + if (!entry->isLive()) { + METER(stats.misses++); + return *entry; + } + + // Collision: double hash. + DoubleHash dh = hash2(keyHash); + + while (true) { + MOZ_ASSERT(!entry->isRemoved()); + entry->setCollision(); + + METER(stats.steps++); + h1 = applyDoubleHash(h1, dh); + + entry = &table[h1]; + if (!entry->isLive()) { + METER(stats.misses++); + return *entry; + } + } + } + + enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; + + RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure) + { + // Look, but don't touch, until we succeed in getting new entry store. + Entry* oldTable = table; + uint32_t oldCap = capacity(); + uint32_t newLog2 = sHashBits - hashShift + deltaLog2; + uint32_t newCapacity = JS_BIT(newLog2); + if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) { + if (reportFailure) + this->reportAllocOverflow(); + return RehashFailed; + } + + Entry* newTable = createTable(*this, newCapacity, reportFailure); + if (!newTable) + return RehashFailed; + + // We can't fail from here on, so update table parameters. + setTableSizeLog2(newLog2); + removedCount = 0; + gen++; + table = newTable; + + // Copy only live entries, leaving removed ones behind. + Entry* end = oldTable + oldCap; + for (Entry* src = oldTable; src < end; ++src) { + if (src->isLive()) { + HashNumber hn = src->getKeyHash(); + findFreeEntry(hn).setLive( + hn, mozilla::Move(const_cast(src->get()))); + src->destroy(); + } + } + + // All entries have been destroyed, no need to destroyTable. + this->free_(oldTable); + return Rehashed; + } + + bool shouldCompressTable() + { + // Compress if a quarter or more of all entries are removed. + return removedCount >= (capacity() >> 2); + } + + RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure) + { + if (!overloaded()) + return NotOverloaded; + + int deltaLog2; + if (shouldCompressTable()) { + METER(stats.compresses++); + deltaLog2 = 0; + } else { + METER(stats.grows++); + deltaLog2 = 1; + } + + return changeTableSize(deltaLog2, reportFailure); + } + + // Infallibly rehash the table if we are overloaded with removals. + void checkOverRemoved() + { + if (overloaded()) { + if (checkOverloaded(DontReportFailure) == RehashFailed) + rehashTableInPlace(); + } + } + + void remove(Entry& e) + { + MOZ_ASSERT(table); + METER(stats.removes++); + + if (e.hasCollision()) { + e.removeLive(); + removedCount++; + } else { + METER(stats.removeFrees++); + e.clearLive(); + } + entryCount--; +#ifdef JS_DEBUG + mutationCount++; +#endif + } + + void checkUnderloaded() + { + if (underloaded()) { + METER(stats.shrinks++); + (void) changeTableSize(-1, DontReportFailure); + } + } + + // Resize the table down to the largest capacity which doesn't underload the + // table. Since we call checkUnderloaded() on every remove, you only need + // to call this after a bulk removal of items done without calling remove(). + void compactIfUnderloaded() + { + int32_t resizeLog2 = 0; + uint32_t newCapacity = capacity(); + while (wouldBeUnderloaded(newCapacity, entryCount)) { + newCapacity = newCapacity >> 1; + resizeLog2--; + } + + if (resizeLog2 != 0) + (void) changeTableSize(resizeLog2, DontReportFailure); + } + + // This is identical to changeTableSize(currentSize), but without requiring + // a second table. We do this by recycling the collision bits to tell us if + // the element is already inserted or still waiting to be inserted. Since + // already-inserted elements win any conflicts, we get the same table as we + // would have gotten through random insertion order. + void rehashTableInPlace() + { + METER(stats.rehashes++); + removedCount = 0; + gen++; + for (size_t i = 0; i < capacity(); ++i) + table[i].unsetCollision(); + + for (size_t i = 0; i < capacity();) { + Entry* src = &table[i]; + + if (!src->isLive() || src->hasCollision()) { + ++i; + continue; + } + + HashNumber keyHash = src->getKeyHash(); + HashNumber h1 = hash1(keyHash); + DoubleHash dh = hash2(keyHash); + Entry* tgt = &table[h1]; + while (true) { + if (!tgt->hasCollision()) { + src->swap(tgt); + tgt->setCollision(); + break; + } + + h1 = applyDoubleHash(h1, dh); + tgt = &table[h1]; + } + } + + // TODO: this algorithm leaves collision bits on *all* elements, even if + // they are on no collision path. We have the option of setting the + // collision bits correctly on a subsequent pass or skipping the rehash + // unless we are totally filled with tombstones: benchmark to find out + // which approach is best. + } + + // Note: |l| may be a reference to a piece of |u|, so this function + // must take care not to use |l| after moving |u|. + // + // Prefer to use putNewInfallible; this function does not check + // invariants. + template + void putNewInfallibleInternal(const Lookup& l, Args&&... args) + { + MOZ_ASSERT(table); + + HashNumber keyHash = prepareHash(l); + Entry* entry = &findFreeEntry(keyHash); + MOZ_ASSERT(entry); + + if (entry->isRemoved()) { + METER(stats.addOverRemoved++); + removedCount--; + keyHash |= sCollisionBit; + } + + entry->setLive(keyHash, mozilla::Forward(args)...); + entryCount++; +#ifdef JS_DEBUG + mutationCount++; +#endif + } + + public: + void clear() + { + if (mozilla::IsPod::value) { + memset(table, 0, sizeof(*table) * capacity()); + } else { + uint32_t tableCapacity = capacity(); + Entry* end = table + tableCapacity; + for (Entry* e = table; e < end; ++e) + e->clear(); + } + removedCount = 0; + entryCount = 0; +#ifdef JS_DEBUG + mutationCount++; +#endif + } + + void clearAndShrink() + { + clear(); + compactIfUnderloaded(); + } + + void finish() + { +#ifdef JS_DEBUG + MOZ_ASSERT(!mEntered); +#endif + if (!table) + return; + + destroyTable(*this, table, capacity()); + table = nullptr; + gen++; + entryCount = 0; + removedCount = 0; +#ifdef JS_DEBUG + mutationCount++; +#endif + } + + Range all() const + { + MOZ_ASSERT(table); + return Range(*this, table, table + capacity()); + } + + bool empty() const + { + MOZ_ASSERT(table); + return !entryCount; + } + + uint32_t count() const + { + MOZ_ASSERT(table); + return entryCount; + } + + uint32_t capacity() const + { + MOZ_ASSERT(table); + return JS_BIT(sHashBits - hashShift); + } + + Generation generation() const + { + MOZ_ASSERT(table); + return Generation(gen); + } + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const + { + return mallocSizeOf(table); + } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const + { + return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf); + } + + MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& l) const + { + mozilla::ReentrancyGuard g(*this); + if (!HasHash(l)) + return Ptr(); + HashNumber keyHash = prepareHash(l); + return Ptr(lookup(l, keyHash, 0), *this); + } + + MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& l) const + { + if (!HasHash(l)) + return Ptr(); + HashNumber keyHash = prepareHash(l); + return Ptr(lookup(l, keyHash, 0), *this); + } + + MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& l) const + { + mozilla::ReentrancyGuard g(*this); + if (!EnsureHash(l)) + return AddPtr(); + HashNumber keyHash = prepareHash(l); + // Calling constructor in return statement here avoid excess copying + // when build with Visual Studio 2015 and 2017, but it triggers a bug in + // gcc which is fixed in gcc-6. See bug 1385181. +#if MOZ_IS_GCC && __GNUC__ < 6 + AddPtr p(lookup(l, keyHash, sCollisionBit), *this, keyHash); + return p; +#else + return AddPtr(lookup(l, keyHash, sCollisionBit), *this, keyHash); +#endif + } + + template + MOZ_MUST_USE bool add(AddPtr& p, Args&&... args) + { + mozilla::ReentrancyGuard g(*this); + MOZ_ASSERT(table); + MOZ_ASSERT_IF(p.isValid(), p.table_ == this); + MOZ_ASSERT(!p.found()); + MOZ_ASSERT(!(p.keyHash & sCollisionBit)); + + // Check for error from ensureHash() here. + if (!p.isValid()) + return false; + + MOZ_ASSERT(p.generation == generation()); + MOZ_ASSERT(p.mutationCount == mutationCount); + + // Changing an entry from removed to live does not affect whether we + // are overloaded and can be handled separately. + if (p.entry_->isRemoved()) { + if (!this->checkSimulatedOOM()) + return false; + METER(stats.addOverRemoved++); + removedCount--; + p.keyHash |= sCollisionBit; + } else { + // Preserve the validity of |p.entry_|. + RebuildStatus status = checkOverloaded(); + if (status == RehashFailed) + return false; + if (status == NotOverloaded && !this->checkSimulatedOOM()) + return false; + if (status == Rehashed) + p.entry_ = &findFreeEntry(p.keyHash); + } + + p.entry_->setLive(p.keyHash, mozilla::Forward(args)...); + entryCount++; +#ifdef JS_DEBUG + mutationCount++; + p.generation = generation(); + p.mutationCount = mutationCount; +#endif + return true; + } + + // Note: |l| may be a reference to a piece of |u|, so this function + // must take care not to use |l| after moving |u|. + template + void putNewInfallible(const Lookup& l, Args&&... args) + { + MOZ_ASSERT(!lookup(l).found()); + mozilla::ReentrancyGuard g(*this); + putNewInfallibleInternal(l, mozilla::Forward(args)...); + } + + // Note: |l| may be alias arguments in |args|, so this function must take + // care not to use |l| after moving |args|. + template + MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args) + { + if (!this->checkSimulatedOOM()) + return false; + + if (!EnsureHash(l)) + return false; + + if (checkOverloaded() == RehashFailed) + return false; + + putNewInfallible(l, mozilla::Forward(args)...); + return true; + } + + // Note: |l| may be a reference to a piece of |u|, so this function + // must take care not to use |l| after moving |u|. + template + MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args) + { + // Check for error from ensureHash() here. + if (!p.isValid()) + return false; + +#ifdef JS_DEBUG + p.generation = generation(); + p.mutationCount = mutationCount; +#endif + { + mozilla::ReentrancyGuard g(*this); + MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed + p.entry_ = &lookup(l, p.keyHash, sCollisionBit); + } + return p.found() || add(p, mozilla::Forward(args)...); + } + + void remove(Ptr p) + { + MOZ_ASSERT(table); + mozilla::ReentrancyGuard g(*this); + MOZ_ASSERT(p.found()); + MOZ_ASSERT(p.generation == generation()); + remove(*p.entry_); + checkUnderloaded(); + } + + void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k) + { + MOZ_ASSERT(table); + mozilla::ReentrancyGuard g(*this); + MOZ_ASSERT(p.found()); + MOZ_ASSERT(p.generation == generation()); + typename HashTableEntry::NonConstT t(mozilla::Move(*p)); + HashPolicy::setKey(t, const_cast(k)); + remove(*p.entry_); + putNewInfallibleInternal(l, mozilla::Move(t)); + } + + void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k) + { + rekeyWithoutRehash(p, l, k); + checkOverRemoved(); + } + +#undef METER +}; + +} // namespace detail +} // namespace js + +#endif /* js_HashTable_h */ diff --git a/gecko/include/js/RootingAPI.h b/gecko/include/js/RootingAPI.h new file mode 100644 index 0000000..fc28ee3 --- /dev/null +++ b/gecko/include/js/RootingAPI.h @@ -0,0 +1,1518 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef js_RootingAPI_h +#define js_RootingAPI_h + +#include "mozilla/Attributes.h" +#include "mozilla/DebugOnly.h" +#include "mozilla/GuardObjects.h" +#include "mozilla/LinkedList.h" +#include "mozilla/Move.h" +#include "mozilla/TypeTraits.h" + +#include + +#include "jspubtd.h" + +#include "js/GCAnnotations.h" +#include "js/GCAPI.h" +#include "js/GCPolicyAPI.h" +#include "js/HeapAPI.h" +#include "js/TypeDecls.h" +#include "js/UniquePtr.h" +#include "js/Utility.h" + +/* + * Moving GC Stack Rooting + * + * A moving GC may change the physical location of GC allocated things, even + * when they are rooted, updating all pointers to the thing to refer to its new + * location. The GC must therefore know about all live pointers to a thing, + * not just one of them, in order to behave correctly. + * + * The |Rooted| and |Handle| classes below are used to root stack locations + * whose value may be held live across a call that can trigger GC. For a + * code fragment such as: + * + * JSObject* obj = NewObject(cx); + * DoSomething(cx); + * ... = obj->lastProperty(); + * + * If |DoSomething()| can trigger a GC, the stack location of |obj| must be + * rooted to ensure that the GC does not move the JSObject referred to by + * |obj| without updating |obj|'s location itself. This rooting must happen + * regardless of whether there are other roots which ensure that the object + * itself will not be collected. + * + * If |DoSomething()| cannot trigger a GC, and the same holds for all other + * calls made between |obj|'s definitions and its last uses, then no rooting + * is required. + * + * SpiderMonkey can trigger a GC at almost any time and in ways that are not + * always clear. For example, the following innocuous-looking actions can + * cause a GC: allocation of any new GC thing; JSObject::hasProperty; + * JS_ReportError and friends; and ToNumber, among many others. The following + * dangerous-looking actions cannot trigger a GC: js_malloc, cx->malloc_, + * rt->malloc_, and friends and JS_ReportOutOfMemory. + * + * The following family of three classes will exactly root a stack location. + * Incorrect usage of these classes will result in a compile error in almost + * all cases. Therefore, it is very hard to be incorrectly rooted if you use + * these classes exclusively. These classes are all templated on the type T of + * the value being rooted. + * + * - Rooted declares a variable of type T, whose value is always rooted. + * Rooted may be automatically coerced to a Handle, below. Rooted + * should be used whenever a local variable's value may be held live across a + * call which can trigger a GC. + * + * - Handle is a const reference to a Rooted. Functions which take GC + * things or values as arguments and need to root those arguments should + * generally use handles for those arguments and avoid any explicit rooting. + * This has two benefits. First, when several such functions call each other + * then redundant rooting of multiple copies of the GC thing can be avoided. + * Second, if the caller does not pass a rooted value a compile error will be + * generated, which is quicker and easier to fix than when relying on a + * separate rooting analysis. + * + * - MutableHandle is a non-const reference to Rooted. It is used in the + * same way as Handle and includes a |set(const T& v)| method to allow + * updating the value of the referenced Rooted. A MutableHandle can be + * created with an implicit cast from a Rooted*. + * + * In some cases the small performance overhead of exact rooting (measured to + * be a few nanoseconds on desktop) is too much. In these cases, try the + * following: + * + * - Move all Rooted above inner loops: this allows you to re-use the root + * on each iteration of the loop. + * + * - Pass Handle through your hot call stack to avoid re-rooting costs at + * every invocation. + * + * The following diagram explains the list of supported, implicit type + * conversions between classes of this family: + * + * Rooted ----> Handle + * | ^ + * | | + * | | + * +---> MutableHandle + * (via &) + * + * All of these types have an implicit conversion to raw pointers. + */ + +namespace js { + +template +struct BarrierMethods { +}; + +template +class WrappedPtrOperations {}; + +template +class MutableWrappedPtrOperations : public WrappedPtrOperations {}; + +template +class RootedBase : public MutableWrappedPtrOperations {}; + +template +class HandleBase : public WrappedPtrOperations {}; + +template +class MutableHandleBase : public MutableWrappedPtrOperations {}; + +template +class HeapBase : public MutableWrappedPtrOperations {}; + +// Cannot use FOR_EACH_HEAP_ABLE_GC_POINTER_TYPE, as this would import too many macros into scope +template struct IsHeapConstructibleType { static constexpr bool value = false; }; +#define DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE(T) \ + template <> struct IsHeapConstructibleType { static constexpr bool value = true; }; +FOR_EACH_PUBLIC_GC_POINTER_TYPE(DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE) +FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE) +#undef DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE + +template +class PersistentRootedBase : public MutableWrappedPtrOperations {}; + +namespace gc { +struct Cell; +template +struct PersistentRootedMarker; +} /* namespace gc */ + +// Important: Return a reference so passing a Rooted, etc. to +// something that takes a |const T&| is not a GC hazard. +#define DECLARE_POINTER_CONSTREF_OPS(T) \ + operator const T&() const { return get(); } \ + const T& operator->() const { return get(); } + +// Assignment operators on a base class are hidden by the implicitly defined +// operator= on the derived class. Thus, define the operator= directly on the +// class as we would need to manually pass it through anyway. +#define DECLARE_POINTER_ASSIGN_OPS(Wrapper, T) \ + Wrapper& operator=(const T& p) { \ + set(p); \ + return *this; \ + } \ + Wrapper& operator=(T&& p) { \ + set(mozilla::Move(p)); \ + return *this; \ + } \ + Wrapper& operator=(const Wrapper& other) { \ + set(other.get()); \ + return *this; \ + } \ + +#define DELETE_ASSIGNMENT_OPS(Wrapper, T) \ + template Wrapper& operator=(S) = delete; \ + Wrapper& operator=(const Wrapper&) = delete; + +#define DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr) \ + const T* address() const { return &(ptr); } \ + const T& get() const { return (ptr); } \ + +#define DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr) \ + T* address() { return &(ptr); } \ + T& get() { return (ptr); } \ + +} /* namespace js */ + +namespace JS { + +template class Rooted; +template class PersistentRooted; + +/* This is exposing internal state of the GC for inlining purposes. */ +JS_FRIEND_API(bool) isGCEnabled(); + +JS_FRIEND_API(void) HeapObjectPostBarrier(JSObject** objp, JSObject* prev, JSObject* next); + +#ifdef JS_DEBUG +/** + * For generational GC, assert that an object is in the tenured generation as + * opposed to being in the nursery. + */ +extern JS_FRIEND_API(void) +AssertGCThingMustBeTenured(JSObject* obj); +extern JS_FRIEND_API(void) +AssertGCThingIsNotAnObjectSubclass(js::gc::Cell* cell); +#else +inline void +AssertGCThingMustBeTenured(JSObject* obj) {} +inline void +AssertGCThingIsNotAnObjectSubclass(js::gc::Cell* cell) {} +#endif + +/** + * The Heap class is a heap-stored reference to a JS GC thing. All members of + * heap classes that refer to GC things should use Heap (or possibly + * TenuredHeap, described below). + * + * Heap is an abstraction that hides some of the complexity required to + * maintain GC invariants for the contained reference. It uses operator + * overloading to provide a normal pointer interface, but notifies the GC every + * time the value it contains is updated. This is necessary for generational GC, + * which keeps track of all pointers into the nursery. + * + * Heap instances must be traced when their containing object is traced to + * keep the pointed-to GC thing alive. + * + * Heap objects should only be used on the heap. GC references stored on the + * C/C++ stack must use Rooted/Handle/MutableHandle instead. + * + * Type T must be a public GC pointer type. + */ +template +class Heap : public js::HeapBase> +{ + // Please note: this can actually also be used by nsXBLMaybeCompiled, for legacy reasons. + static_assert(js::IsHeapConstructibleType::value, + "Type T must be a public GC pointer type"); + public: + using ElementType = T; + + Heap() { + static_assert(sizeof(T) == sizeof(Heap), + "Heap must be binary compatible with T."); + init(GCPolicy::initial()); + } + explicit Heap(const T& p) { init(p); } + + /* + * For Heap, move semantics are equivalent to copy semantics. In C++, a + * copy constructor taking const-ref is the way to get a single function + * that will be used for both lvalue and rvalue copies, so we can simply + * omit the rvalue variant. + */ + explicit Heap(const Heap& p) { init(p.ptr); } + + ~Heap() { + post(ptr, GCPolicy::initial()); + } + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_POINTER_ASSIGN_OPS(Heap, T); + + const T* address() const { return &ptr; } + + void exposeToActiveJS() const { + js::BarrierMethods::exposeToJS(ptr); + } + const T& get() const { + exposeToActiveJS(); + return ptr; + } + const T& unbarrieredGet() const { + return ptr; + } + + T* unsafeGet() { return &ptr; } + + explicit operator bool() const { + return bool(js::BarrierMethods::asGCThingOrNull(ptr)); + } + explicit operator bool() { + return bool(js::BarrierMethods::asGCThingOrNull(ptr)); + } + + private: + void init(const T& newPtr) { + ptr = newPtr; + post(GCPolicy::initial(), ptr); + } + + void set(const T& newPtr) { + T tmp = ptr; + ptr = newPtr; + post(tmp, ptr); + } + + void post(const T& prev, const T& next) { + js::BarrierMethods::postBarrier(&ptr, prev, next); + } + + T ptr; +}; + +static MOZ_ALWAYS_INLINE bool +ObjectIsTenured(JSObject* obj) +{ + return !js::gc::IsInsideNursery(reinterpret_cast(obj)); +} + +static MOZ_ALWAYS_INLINE bool +ObjectIsTenured(const Heap& obj) +{ + return ObjectIsTenured(obj.unbarrieredGet()); +} + +static MOZ_ALWAYS_INLINE bool +ObjectIsMarkedGray(JSObject* obj) +{ + auto cell = reinterpret_cast(obj); + return js::gc::detail::CellIsMarkedGrayIfKnown(cell); +} + +static MOZ_ALWAYS_INLINE bool +ObjectIsMarkedGray(const JS::Heap& obj) +{ + return ObjectIsMarkedGray(obj.unbarrieredGet()); +} + +// The following *IsNotGray functions are for use in assertions and take account +// of the eventual gray marking state at the end of any ongoing incremental GC. +#ifdef DEBUG +inline bool +CellIsNotGray(js::gc::Cell* maybeCell) +{ + if (!maybeCell) + return true; + + return js::gc::detail::CellIsNotGray(maybeCell); +} + +inline bool +ObjectIsNotGray(JSObject* maybeObj) +{ + return CellIsNotGray(reinterpret_cast(maybeObj)); +} + +inline bool +ObjectIsNotGray(const JS::Heap& obj) +{ + return ObjectIsNotGray(obj.unbarrieredGet()); +} +#endif + +/** + * The TenuredHeap class is similar to the Heap class above in that it + * encapsulates the GC concerns of an on-heap reference to a JS object. However, + * it has two important differences: + * + * 1) Pointers which are statically known to only reference "tenured" objects + * can avoid the extra overhead of SpiderMonkey's write barriers. + * + * 2) Objects in the "tenured" heap have stronger alignment restrictions than + * those in the "nursery", so it is possible to store flags in the lower + * bits of pointers known to be tenured. TenuredHeap wraps a normal tagged + * pointer with a nice API for accessing the flag bits and adds various + * assertions to ensure that it is not mis-used. + * + * GC things are said to be "tenured" when they are located in the long-lived + * heap: e.g. they have gained tenure as an object by surviving past at least + * one GC. For performance, SpiderMonkey allocates some things which are known + * to normally be long lived directly into the tenured generation; for example, + * global objects. Additionally, SpiderMonkey does not visit individual objects + * when deleting non-tenured objects, so object with finalizers are also always + * tenured; for instance, this includes most DOM objects. + * + * The considerations to keep in mind when using a TenuredHeap vs a normal + * Heap are: + * + * - It is invalid for a TenuredHeap to refer to a non-tenured thing. + * - It is however valid for a Heap to refer to a tenured thing. + * - It is not possible to store flag bits in a Heap. + */ +template +class TenuredHeap : public js::HeapBase> +{ + public: + using ElementType = T; + + TenuredHeap() : bits(0) { + static_assert(sizeof(T) == sizeof(TenuredHeap), + "TenuredHeap must be binary compatible with T."); + } + explicit TenuredHeap(T p) : bits(0) { setPtr(p); } + explicit TenuredHeap(const TenuredHeap& p) : bits(0) { setPtr(p.getPtr()); } + + void setPtr(T newPtr) { + MOZ_ASSERT((reinterpret_cast(newPtr) & flagsMask) == 0); + if (newPtr) + AssertGCThingMustBeTenured(newPtr); + bits = (bits & flagsMask) | reinterpret_cast(newPtr); + } + + void setFlags(uintptr_t flagsToSet) { + MOZ_ASSERT((flagsToSet & ~flagsMask) == 0); + bits |= flagsToSet; + } + + void unsetFlags(uintptr_t flagsToUnset) { + MOZ_ASSERT((flagsToUnset & ~flagsMask) == 0); + bits &= ~flagsToUnset; + } + + bool hasFlag(uintptr_t flag) const { + MOZ_ASSERT((flag & ~flagsMask) == 0); + return (bits & flag) != 0; + } + + T unbarrieredGetPtr() const { return reinterpret_cast(bits & ~flagsMask); } + uintptr_t getFlags() const { return bits & flagsMask; } + + void exposeToActiveJS() const { + js::BarrierMethods::exposeToJS(unbarrieredGetPtr()); + } + T getPtr() const { + exposeToActiveJS(); + return unbarrieredGetPtr(); + } + + operator T() const { return getPtr(); } + T operator->() const { return getPtr(); } + + explicit operator bool() const { + return bool(js::BarrierMethods::asGCThingOrNull(unbarrieredGetPtr())); + } + explicit operator bool() { + return bool(js::BarrierMethods::asGCThingOrNull(unbarrieredGetPtr())); + } + + TenuredHeap& operator=(T p) { + setPtr(p); + return *this; + } + + TenuredHeap& operator=(const TenuredHeap& other) { + bits = other.bits; + return *this; + } + + private: + enum { + maskBits = 3, + flagsMask = (1 << maskBits) - 1, + }; + + uintptr_t bits; +}; + +/** + * Reference to a T that has been rooted elsewhere. This is most useful + * as a parameter type, which guarantees that the T lvalue is properly + * rooted. See "Move GC Stack Rooting" above. + * + * If you want to add additional methods to Handle for a specific + * specialization, define a HandleBase specialization containing them. + */ +template +class MOZ_NONHEAP_CLASS Handle : public js::HandleBase> +{ + friend class JS::MutableHandle; + + public: + using ElementType = T; + + /* Creates a handle from a handle of a type convertible to T. */ + template + MOZ_IMPLICIT Handle(Handle handle, + typename mozilla::EnableIf::value, int>::Type dummy = 0) + { + static_assert(sizeof(Handle) == sizeof(T*), + "Handle must be binary compatible with T*."); + ptr = reinterpret_cast(handle.address()); + } + + MOZ_IMPLICIT Handle(decltype(nullptr)) { + static_assert(mozilla::IsPointer::value, + "nullptr_t overload not valid for non-pointer types"); + static void* const ConstNullValue = nullptr; + ptr = reinterpret_cast(&ConstNullValue); + } + + MOZ_IMPLICIT Handle(MutableHandle handle) { + ptr = handle.address(); + } + + /* + * Take care when calling this method! + * + * This creates a Handle from the raw location of a T. + * + * It should be called only if the following conditions hold: + * + * 1) the location of the T is guaranteed to be marked (for some reason + * other than being a Rooted), e.g., if it is guaranteed to be reachable + * from an implicit root. + * + * 2) the contents of the location are immutable, or at least cannot change + * for the lifetime of the handle, as its users may not expect its value + * to change underneath them. + */ + static constexpr Handle fromMarkedLocation(const T* p) { + return Handle(p, DeliberatelyChoosingThisOverload, + ImUsingThisOnlyInFromFromMarkedLocation); + } + + /* + * Construct a handle from an explicitly rooted location. This is the + * normal way to create a handle, and normally happens implicitly. + */ + template + inline + MOZ_IMPLICIT Handle(const Rooted& root, + typename mozilla::EnableIf::value, int>::Type dummy = 0); + + template + inline + MOZ_IMPLICIT Handle(const PersistentRooted& root, + typename mozilla::EnableIf::value, int>::Type dummy = 0); + + /* Construct a read only handle from a mutable handle. */ + template + inline + MOZ_IMPLICIT Handle(MutableHandle& root, + typename mozilla::EnableIf::value, int>::Type dummy = 0); + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr); + + private: + Handle() {} + DELETE_ASSIGNMENT_OPS(Handle, T); + + enum Disambiguator { DeliberatelyChoosingThisOverload = 42 }; + enum CallerIdentity { ImUsingThisOnlyInFromFromMarkedLocation = 17 }; + constexpr Handle(const T* p, Disambiguator, CallerIdentity) : ptr(p) {} + + const T* ptr; +}; + +/** + * Similar to a handle, but the underlying storage can be changed. This is + * useful for outparams. + * + * If you want to add additional methods to MutableHandle for a specific + * specialization, define a MutableHandleBase specialization containing + * them. + */ +template +class MOZ_STACK_CLASS MutableHandle : public js::MutableHandleBase> +{ + public: + using ElementType = T; + + inline MOZ_IMPLICIT MutableHandle(Rooted* root); + inline MOZ_IMPLICIT MutableHandle(PersistentRooted* root); + + private: + // Disallow nullptr for overloading purposes. + MutableHandle(decltype(nullptr)) = delete; + + public: + void set(const T& v) { + *ptr = v; + } + void set(T&& v) { + *ptr = mozilla::Move(v); + } + + /* + * This may be called only if the location of the T is guaranteed + * to be marked (for some reason other than being a Rooted), + * e.g., if it is guaranteed to be reachable from an implicit root. + * + * Create a MutableHandle from a raw location of a T. + */ + static MutableHandle fromMarkedLocation(T* p) { + MutableHandle h; + h.ptr = p; + return h; + } + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr); + DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(*ptr); + + private: + MutableHandle() {} + DELETE_ASSIGNMENT_OPS(MutableHandle, T); + + T* ptr; +}; + +} /* namespace JS */ + +namespace js { + +template +struct BarrierMethods +{ + static T* initial() { return nullptr; } + static gc::Cell* asGCThingOrNull(T* v) { + if (!v) + return nullptr; + MOZ_ASSERT(uintptr_t(v) > 32); + return reinterpret_cast(v); + } + static void postBarrier(T** vp, T* prev, T* next) { + if (next) + JS::AssertGCThingIsNotAnObjectSubclass(reinterpret_cast(next)); + } + static void exposeToJS(T* t) { + if (t) + js::gc::ExposeGCThingToActiveJS(JS::GCCellPtr(t)); + } +}; + +template <> +struct BarrierMethods +{ + static JSObject* initial() { return nullptr; } + static gc::Cell* asGCThingOrNull(JSObject* v) { + if (!v) + return nullptr; + MOZ_ASSERT(uintptr_t(v) > 32); + return reinterpret_cast(v); + } + static void postBarrier(JSObject** vp, JSObject* prev, JSObject* next) { + JS::HeapObjectPostBarrier(vp, prev, next); + } + static void exposeToJS(JSObject* obj) { + if (obj) + JS::ExposeObjectToActiveJS(obj); + } +}; + +template <> +struct BarrierMethods +{ + static JSFunction* initial() { return nullptr; } + static gc::Cell* asGCThingOrNull(JSFunction* v) { + if (!v) + return nullptr; + MOZ_ASSERT(uintptr_t(v) > 32); + return reinterpret_cast(v); + } + static void postBarrier(JSFunction** vp, JSFunction* prev, JSFunction* next) { + JS::HeapObjectPostBarrier(reinterpret_cast(vp), + reinterpret_cast(prev), + reinterpret_cast(next)); + } + static void exposeToJS(JSFunction* fun) { + if (fun) + JS::ExposeObjectToActiveJS(reinterpret_cast(fun)); + } +}; + +// Provide hash codes for Cell kinds that may be relocated and, thus, not have +// a stable address to use as the base for a hash code. Instead of the address, +// this hasher uses Cell::getUniqueId to provide exact matches and as a base +// for generating hash codes. +// +// Note: this hasher, like PointerHasher can "hash" a nullptr. While a nullptr +// would not likely be a useful key, there are some cases where being able to +// hash a nullptr is useful, either on purpose or because of bugs: +// (1) existence checks where the key may happen to be null and (2) some +// aggregate Lookup kinds embed a JSObject* that is frequently null and do not +// null test before dispatching to the hasher. +template +struct JS_PUBLIC_API(MovableCellHasher) +{ + using Key = T; + using Lookup = T; + + static bool hasHash(const Lookup& l); + static bool ensureHash(const Lookup& l); + static HashNumber hash(const Lookup& l); + static bool match(const Key& k, const Lookup& l); + static void rekey(Key& k, const Key& newKey) { k = newKey; } +}; + +template +struct JS_PUBLIC_API(MovableCellHasher>) +{ + using Key = JS::Heap; + using Lookup = T; + + static bool hasHash(const Lookup& l) { return MovableCellHasher::hasHash(l); } + static bool ensureHash(const Lookup& l) { return MovableCellHasher::ensureHash(l); } + static HashNumber hash(const Lookup& l) { return MovableCellHasher::hash(l); } + static bool match(const Key& k, const Lookup& l) { + return MovableCellHasher::match(k.unbarrieredGet(), l); + } + static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } +}; + +template +struct FallibleHashMethods> +{ + template static bool hasHash(Lookup&& l) { + return MovableCellHasher::hasHash(mozilla::Forward(l)); + } + template static bool ensureHash(Lookup&& l) { + return MovableCellHasher::ensureHash(mozilla::Forward(l)); + } +}; + +} /* namespace js */ + +namespace js { + +// The alignment must be set because the Rooted and PersistentRooted ptr fields +// may be accessed through reinterpret_cast*>, and +// the compiler may choose a different alignment for the ptr field when it +// knows the actual type stored in DispatchWrapper. +// +// It would make more sense to align only those specific fields of type +// DispatchWrapper, rather than DispatchWrapper itself, but that causes MSVC to +// fail when Rooted is used in an IsConvertible test. +template +class alignas(8) DispatchWrapper +{ + static_assert(JS::MapTypeToRootKind::kind == JS::RootKind::Traceable, + "DispatchWrapper is intended only for usage with a Traceable"); + + using TraceFn = void (*)(JSTracer*, T*, const char*); + TraceFn tracer; + alignas(gc::CellAlignBytes) T storage; + + public: + template + MOZ_IMPLICIT DispatchWrapper(U&& initial) + : tracer(&JS::GCPolicy::trace), + storage(mozilla::Forward(initial)) + { } + + // Mimic a pointer type, so that we can drop into Rooted. + T* operator &() { return &storage; } + const T* operator &() const { return &storage; } + operator T&() { return storage; } + operator const T&() const { return storage; } + + // Trace the contained storage (of unknown type) using the trace function + // we set aside when we did know the type. + static void TraceWrapped(JSTracer* trc, T* thingp, const char* name) { + auto wrapper = reinterpret_cast( + uintptr_t(thingp) - offsetof(DispatchWrapper, storage)); + wrapper->tracer(trc, &wrapper->storage, name); + } +}; + +} /* namespace js */ + +namespace JS { + +namespace detail { + +/* + * For pointer types, the TraceKind for tracing is based on the list it is + * in (selected via MapTypeToRootKind), so no additional storage is + * required here. Non-pointer types, however, share the same list, so the + * function to call for tracing is stored adjacent to the struct. Since C++ + * cannot templatize on storage class, this is implemented via the wrapper + * class DispatchWrapper. + */ +template +using MaybeWrapped = typename mozilla::Conditional< + MapTypeToRootKind::kind == JS::RootKind::Traceable, + js::DispatchWrapper, + T>::Type; + +} /* namespace detail */ + +/** + * Local variable of type T whose value is always rooted. This is typically + * used for local variables, or for non-rooted values being passed to a + * function that requires a handle, e.g. Foo(Root(cx, x)). + * + * If you want to add additional methods to Rooted for a specific + * specialization, define a RootedBase specialization containing them. + */ +template +class MOZ_RAII Rooted : public js::RootedBase> +{ + inline void registerWithRootLists(RootedListHeads& roots) { + this->stack = &roots[JS::MapTypeToRootKind::kind]; + this->prev = *stack; + *stack = reinterpret_cast*>(this); + } + + inline RootedListHeads& rootLists(RootingContext* cx) { + return cx->stackRoots_; + } + inline RootedListHeads& rootLists(JSContext* cx) { + return rootLists(RootingContext::get(cx)); + } + + public: + using ElementType = T; + + template + explicit Rooted(const RootingContext& cx) + : ptr(GCPolicy::initial()) + { + registerWithRootLists(rootLists(cx)); + } + + template + Rooted(const RootingContext& cx, S&& initial) + : ptr(mozilla::Forward(initial)) + { + registerWithRootLists(rootLists(cx)); + } + + ~Rooted() { + MOZ_ASSERT(*stack == reinterpret_cast*>(this)); + *stack = prev; + } + + Rooted* previous() { return reinterpret_cast*>(prev); } + + /* + * This method is public for Rooted so that Codegen.py can use a Rooted + * interchangeably with a MutableHandleValue. + */ + void set(const T& value) { + ptr = value; + } + void set(T&& value) { + ptr = mozilla::Move(value); + } + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_POINTER_ASSIGN_OPS(Rooted, T); + DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr); + DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr); + + private: + /* + * These need to be templated on void* to avoid aliasing issues between, for + * example, Rooted and Rooted, which use the same + * stack head pointer for different classes. + */ + Rooted** stack; + Rooted* prev; + + detail::MaybeWrapped ptr; + + Rooted(const Rooted&) = delete; +} JS_HAZ_ROOTED; + +} /* namespace JS */ + +namespace js { + +/** + * Augment the generic Rooted interface when T = JSObject* with + * class-querying and downcasting operations. + * + * Given a Rooted obj, one can view + * Handle h = obj.as(); + * as an optimization of + * Rooted rooted(cx, &obj->as()); + * Handle h = rooted; + */ +template +class RootedBase : public MutableWrappedPtrOperations +{ + public: + template + JS::Handle as() const; +}; + +/** + * Augment the generic Handle interface when T = JSObject* with + * downcasting operations. + * + * Given a Handle obj, one can view + * Handle h = obj.as(); + * as an optimization of + * Rooted rooted(cx, &obj->as()); + * Handle h = rooted; + */ +template +class HandleBase : public WrappedPtrOperations +{ + public: + template + JS::Handle as() const; +}; + +/** Interface substitute for Rooted which does not root the variable's memory. */ +template +class MOZ_RAII FakeRooted : public RootedBase> +{ + public: + using ElementType = T; + + template + explicit FakeRooted(CX* cx) : ptr(JS::GCPolicy::initial()) {} + + template + FakeRooted(CX* cx, T initial) : ptr(initial) {} + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_POINTER_ASSIGN_OPS(FakeRooted, T); + DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr); + DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr); + + private: + T ptr; + + void set(const T& value) { + ptr = value; + } + + FakeRooted(const FakeRooted&) = delete; +}; + +/** Interface substitute for MutableHandle which is not required to point to rooted memory. */ +template +class FakeMutableHandle : public js::MutableHandleBase> +{ + public: + using ElementType = T; + + MOZ_IMPLICIT FakeMutableHandle(T* t) { + ptr = t; + } + + MOZ_IMPLICIT FakeMutableHandle(FakeRooted* root) { + ptr = root->address(); + } + + void set(const T& v) { + *ptr = v; + } + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr); + DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(*ptr); + + private: + FakeMutableHandle() {} + DELETE_ASSIGNMENT_OPS(FakeMutableHandle, T); + + T* ptr; +}; + +/** + * Types for a variable that either should or shouldn't be rooted, depending on + * the template parameter allowGC. Used for implementing functions that can + * operate on either rooted or unrooted data. + * + * The toHandle() and toMutableHandle() functions are for calling functions + * which require handle types and are only called in the CanGC case. These + * allow the calling code to type check. + */ +enum AllowGC { + NoGC = 0, + CanGC = 1 +}; +template +class MaybeRooted +{ +}; + +template class MaybeRooted +{ + public: + typedef JS::Handle HandleType; + typedef JS::Rooted RootType; + typedef JS::MutableHandle MutableHandleType; + + static inline JS::Handle toHandle(HandleType v) { + return v; + } + + static inline JS::MutableHandle toMutableHandle(MutableHandleType v) { + return v; + } + + template + static inline JS::Handle downcastHandle(HandleType v) { + return v.template as(); + } +}; + +template class MaybeRooted +{ + public: + typedef const T& HandleType; + typedef FakeRooted RootType; + typedef FakeMutableHandle MutableHandleType; + + static JS::Handle toHandle(HandleType v) { + MOZ_CRASH("Bad conversion"); + } + + static JS::MutableHandle toMutableHandle(MutableHandleType v) { + MOZ_CRASH("Bad conversion"); + } + + template + static inline T2* downcastHandle(HandleType v) { + return &v->template as(); + } +}; + +} /* namespace js */ + +namespace JS { + +template template +inline +Handle::Handle(const Rooted& root, + typename mozilla::EnableIf::value, int>::Type dummy) +{ + ptr = reinterpret_cast(root.address()); +} + +template template +inline +Handle::Handle(const PersistentRooted& root, + typename mozilla::EnableIf::value, int>::Type dummy) +{ + ptr = reinterpret_cast(root.address()); +} + +template template +inline +Handle::Handle(MutableHandle& root, + typename mozilla::EnableIf::value, int>::Type dummy) +{ + ptr = reinterpret_cast(root.address()); +} + +template +inline +MutableHandle::MutableHandle(Rooted* root) +{ + static_assert(sizeof(MutableHandle) == sizeof(T*), + "MutableHandle must be binary compatible with T*."); + ptr = root->address(); +} + +template +inline +MutableHandle::MutableHandle(PersistentRooted* root) +{ + static_assert(sizeof(MutableHandle) == sizeof(T*), + "MutableHandle must be binary compatible with T*."); + ptr = root->address(); +} + +JS_PUBLIC_API(void) +AddPersistentRoot(RootingContext* cx, RootKind kind, PersistentRooted* root); + +JS_PUBLIC_API(void) +AddPersistentRoot(JSRuntime* rt, RootKind kind, PersistentRooted* root); + +/** + * A copyable, assignable global GC root type with arbitrary lifetime, an + * infallible constructor, and automatic unrooting on destruction. + * + * These roots can be used in heap-allocated data structures, so they are not + * associated with any particular JSContext or stack. They are registered with + * the JSRuntime itself, without locking, so they require a full JSContext to be + * initialized, not one of its more restricted superclasses. Initialization may + * take place on construction, or in two phases if the no-argument constructor + * is called followed by init(). + * + * Note that you must not use an PersistentRooted in an object owned by a JS + * object: + * + * Whenever one object whose lifetime is decided by the GC refers to another + * such object, that edge must be traced only if the owning JS object is traced. + * This applies not only to JS objects (which obviously are managed by the GC) + * but also to C++ objects owned by JS objects. + * + * If you put a PersistentRooted in such a C++ object, that is almost certainly + * a leak. When a GC begins, the referent of the PersistentRooted is treated as + * live, unconditionally (because a PersistentRooted is a *root*), even if the + * JS object that owns it is unreachable. If there is any path from that + * referent back to the JS object, then the C++ object containing the + * PersistentRooted will not be destructed, and the whole blob of objects will + * not be freed, even if there are no references to them from the outside. + * + * In the context of Firefox, this is a severe restriction: almost everything in + * Firefox is owned by some JS object or another, so using PersistentRooted in + * such objects would introduce leaks. For these kinds of edges, Heap or + * TenuredHeap would be better types. It's up to the implementor of the type + * containing Heap or TenuredHeap members to make sure their referents get + * marked when the object itself is marked. + */ +template +class PersistentRooted : public js::RootedBase>, + private mozilla::LinkedListElement> +{ + using ListBase = mozilla::LinkedListElement>; + + friend class mozilla::LinkedList; + friend class mozilla::LinkedListElement; + + void registerWithRootLists(RootingContext* cx) { + MOZ_ASSERT(!initialized()); + JS::RootKind kind = JS::MapTypeToRootKind::kind; + AddPersistentRoot(cx, kind, reinterpret_cast*>(this)); + } + + void registerWithRootLists(JSRuntime* rt) { + MOZ_ASSERT(!initialized()); + JS::RootKind kind = JS::MapTypeToRootKind::kind; + AddPersistentRoot(rt, kind, reinterpret_cast*>(this)); + } + + public: + using ElementType = T; + + PersistentRooted() : ptr(GCPolicy::initial()) {} + + explicit PersistentRooted(RootingContext* cx) + : ptr(GCPolicy::initial()) + { + registerWithRootLists(cx); + } + + explicit PersistentRooted(JSContext* cx) + : ptr(GCPolicy::initial()) + { + registerWithRootLists(RootingContext::get(cx)); + } + + template + PersistentRooted(RootingContext* cx, U&& initial) + : ptr(mozilla::Forward(initial)) + { + registerWithRootLists(cx); + } + + template + PersistentRooted(JSContext* cx, U&& initial) + : ptr(mozilla::Forward(initial)) + { + registerWithRootLists(RootingContext::get(cx)); + } + + explicit PersistentRooted(JSRuntime* rt) + : ptr(GCPolicy::initial()) + { + registerWithRootLists(rt); + } + + template + PersistentRooted(JSRuntime* rt, U&& initial) + : ptr(mozilla::Forward(initial)) + { + registerWithRootLists(rt); + } + + PersistentRooted(const PersistentRooted& rhs) + : mozilla::LinkedListElement>(), + ptr(rhs.ptr) + { + /* + * Copy construction takes advantage of the fact that the original + * is already inserted, and simply adds itself to whatever list the + * original was on - no JSRuntime pointer needed. + * + * This requires mutating rhs's links, but those should be 'mutable' + * anyway. C++ doesn't let us declare mutable base classes. + */ + const_cast(rhs).setNext(this); + } + + bool initialized() { + return ListBase::isInList(); + } + + void init(JSContext* cx) { + init(cx, GCPolicy::initial()); + } + + template + void init(JSContext* cx, U&& initial) { + ptr = mozilla::Forward(initial); + registerWithRootLists(RootingContext::get(cx)); + } + + void reset() { + if (initialized()) { + set(GCPolicy::initial()); + ListBase::remove(); + } + } + + DECLARE_POINTER_CONSTREF_OPS(T); + DECLARE_POINTER_ASSIGN_OPS(PersistentRooted, T); + DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr); + + // These are the same as DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS, except + // they check that |this| is initialized in case the caller later stores + // something in |ptr|. + T* address() { + MOZ_ASSERT(initialized()); + return &ptr; + } + T& get() { + MOZ_ASSERT(initialized()); + return ptr; + } + + private: + template + void set(U&& value) { + MOZ_ASSERT(initialized()); + ptr = mozilla::Forward(value); + } + + detail::MaybeWrapped ptr; +} JS_HAZ_ROOTED; + +class JS_PUBLIC_API(ObjectPtr) +{ + Heap value; + + public: + using ElementType = JSObject*; + + ObjectPtr() : value(nullptr) {} + + explicit ObjectPtr(JSObject* obj) : value(obj) {} + + /* Always call finalize before the destructor. */ + ~ObjectPtr() { MOZ_ASSERT(!value); } + + void finalize(JSRuntime* rt); + void finalize(JSContext* cx); + + void init(JSObject* obj) { value = obj; } + + JSObject* get() const { return value; } + JSObject* unbarrieredGet() const { return value.unbarrieredGet(); } + + void writeBarrierPre(JSContext* cx) { + IncrementalPreWriteBarrier(value); + } + + void updateWeakPointerAfterGC(); + + ObjectPtr& operator=(JSObject* obj) { + IncrementalPreWriteBarrier(value); + value = obj; + return *this; + } + + void trace(JSTracer* trc, const char* name); + + JSObject& operator*() const { return *value; } + JSObject* operator->() const { return value; } + operator JSObject*() const { return value; } + + explicit operator bool() const { return value.unbarrieredGet(); } + explicit operator bool() { return value.unbarrieredGet(); } +}; + +} /* namespace JS */ + +namespace js { + +template +class WrappedPtrOperations, Container> +{ + const UniquePtr& uniquePtr() const { return static_cast(this)->get(); } + + public: + explicit operator bool() const { return !!uniquePtr(); } + T* get() const { return uniquePtr().get(); } + T* operator->() const { return get(); } + T& operator*() const { return *uniquePtr(); } +}; + +template +class MutableWrappedPtrOperations, Container> + : public WrappedPtrOperations, Container> +{ + UniquePtr& uniquePtr() { return static_cast(this)->get(); } + + public: + MOZ_MUST_USE typename UniquePtr::Pointer release() { return uniquePtr().release(); } + void reset(T* ptr = T()) { uniquePtr().reset(ptr); } +}; + +namespace gc { + +template +void +CallTraceCallbackOnNonHeap(T* v, const TraceCallbacks& aCallbacks, const char* aName, void* aClosure) +{ + static_assert(sizeof(T) == sizeof(JS::Heap), "T and Heap must be compatible."); + MOZ_ASSERT(v); + mozilla::DebugOnly cell = BarrierMethods::asGCThingOrNull(*v); + MOZ_ASSERT(cell); + MOZ_ASSERT(!IsInsideNursery(cell)); + JS::Heap* asHeapT = reinterpret_cast*>(v); + aCallbacks.Trace(asHeapT, aName, aClosure); +} + +} /* namespace gc */ +} /* namespace js */ + +// mozilla::Swap uses a stack temporary, which prevents classes like Heap +// from being declared MOZ_HEAP_CLASS. +namespace mozilla { + +template +inline void +Swap(JS::Heap& aX, JS::Heap& aY) +{ + T tmp = aX; + aX = aY; + aY = tmp; +} + +template +inline void +Swap(JS::TenuredHeap& aX, JS::TenuredHeap& aY) +{ + T tmp = aX; + aX = aY; + aY = tmp; +} + +} /* namespace mozilla */ + +namespace js { +namespace detail { + +// DefineComparisonOps is a trait which selects which wrapper classes to define +// operator== and operator!= for. It supplies a getter function to extract the +// value to compare. This is used to avoid triggering the automatic read +// barriers where appropriate. +// +// If DefineComparisonOps is not specialized for a particular wrapper you may +// get errors such as 'invalid operands to binary expression' or 'no match for +// operator==' when trying to compare against instances of the wrapper. + +template +struct DefineComparisonOps : mozilla::FalseType {}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const JS::Heap& v) { return v.unbarrieredGet(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T get(const JS::TenuredHeap& v) { return v.unbarrieredGetPtr(); } +}; + +template <> +struct DefineComparisonOps : mozilla::TrueType { + static const JSObject* get(const JS::ObjectPtr& v) { return v.unbarrieredGet(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const JS::Rooted& v) { return v.get(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const JS::Handle& v) { return v.get(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const JS::MutableHandle& v) { return v.get(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const JS::PersistentRooted& v) { return v.get(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const js::FakeRooted& v) { return v.get(); } +}; + +template +struct DefineComparisonOps> : mozilla::TrueType { + static const T& get(const js::FakeMutableHandle& v) { return v.get(); } +}; + +} /* namespace detail */ +} /* namespace js */ + +// Overload operator== and operator!= for all types with the DefineComparisonOps +// trait using the supplied getter. +// +// There are four cases: + +// Case 1: comparison between two wrapper objects. + +template +typename mozilla::EnableIf::value && + js::detail::DefineComparisonOps::value, bool>::Type +operator==(const T& a, const U& b) { + return js::detail::DefineComparisonOps::get(a) == js::detail::DefineComparisonOps::get(b); +} + +template +typename mozilla::EnableIf::value && + js::detail::DefineComparisonOps::value, bool>::Type +operator!=(const T& a, const U& b) { + return !(a == b); +} + +// Case 2: comparison between a wrapper object and its unwrapped element type. + +template +typename mozilla::EnableIf::value, bool>::Type +operator==(const T& a, const typename T::ElementType& b) { + return js::detail::DefineComparisonOps::get(a) == b; +} + +template +typename mozilla::EnableIf::value, bool>::Type +operator!=(const T& a, const typename T::ElementType& b) { + return !(a == b); +} + +template +typename mozilla::EnableIf::value, bool>::Type +operator==(const typename T::ElementType& a, const T& b) { + return a == js::detail::DefineComparisonOps::get(b); +} + +template +typename mozilla::EnableIf::value, bool>::Type +operator!=(const typename T::ElementType& a, const T& b) { + return !(a == b); +} + +// Case 3: For pointer wrappers, comparison between the wrapper and a const +// element pointer. + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator==(const typename mozilla::RemovePointer::Type* a, const T& b) { + return a == js::detail::DefineComparisonOps::get(b); +} + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator!=(const typename mozilla::RemovePointer::Type* a, const T& b) { + return !(a == b); +} + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator==(const T& a, const typename mozilla::RemovePointer::Type* b) { + return js::detail::DefineComparisonOps::get(a) == b; +} + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator!=(const T& a, const typename mozilla::RemovePointer::Type* b) { + return !(a == b); +} + +// Case 4: For pointer wrappers, comparison between the wrapper and nullptr. + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator==(std::nullptr_t a, const T& b) { + return a == js::detail::DefineComparisonOps::get(b); +} + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator!=(std::nullptr_t a, const T& b) { + return !(a == b); +} + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator==(const T& a, std::nullptr_t b) { + return js::detail::DefineComparisonOps::get(a) == b; +} + +template +typename mozilla::EnableIf::value && + mozilla::IsPointer::value, bool>::Type +operator!=(const T& a, std::nullptr_t b) { + return !(a == b); +} + +#undef DELETE_ASSIGNMENT_OPS + +#endif /* js_RootingAPI_h */ diff --git a/gecko/include/js/TracingAPI.h b/gecko/include/js/TracingAPI.h new file mode 100644 index 0000000..345e423 --- /dev/null +++ b/gecko/include/js/TracingAPI.h @@ -0,0 +1,443 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef js_TracingAPI_h +#define js_TracingAPI_h + +#include "jsalloc.h" + +#include "js/HashTable.h" +#include "js/HeapAPI.h" +#include "js/TraceKind.h" + +class JS_PUBLIC_API(JSTracer); + +namespace JS { +class JS_PUBLIC_API(CallbackTracer); +template class Heap; +template class TenuredHeap; + +/** Returns a static string equivalent of |kind|. */ +JS_FRIEND_API(const char*) +GCTraceKindToAscii(JS::TraceKind kind); + +} // namespace JS + +enum WeakMapTraceKind { + /** + * Do not trace into weak map keys or values during traversal. Users must + * handle weak maps manually. + */ + DoNotTraceWeakMaps, + + /** + * Do true ephemeron marking with a weak key lookup marking phase. This is + * the default for GCMarker. + */ + ExpandWeakMaps, + + /** + * Trace through to all values, irrespective of whether the keys are live + * or not. Used for non-marking tracers. + */ + TraceWeakMapValues, + + /** + * Trace through to all keys and values, irrespective of whether the keys + * are live or not. Used for non-marking tracers. + */ + TraceWeakMapKeysValues +}; + +class JS_PUBLIC_API(JSTracer) +{ + public: + // Return the runtime set on the tracer. + JSRuntime* runtime() const { return runtime_; } + + // Return the weak map tracing behavior currently set on this tracer. + WeakMapTraceKind weakMapAction() const { return weakMapAction_; } + + enum class TracerKindTag { + // Marking path: a tracer used only for marking liveness of cells, not + // for moving them. The kind will transition to WeakMarking after + // everything reachable by regular edges has been marked. + Marking, + + // Same as Marking, except we have now moved on to the "weak marking + // phase", in which every marked obj/script is immediately looked up to + // see if it is a weak map key (and therefore might require marking its + // weak map value). + WeakMarking, + + // A tracer that traverses the graph for the purposes of moving objects + // from the nursery to the tenured area. + Tenuring, + + // General-purpose traversal that invokes a callback on each cell. + // Traversing children is the responsibility of the callback. + Callback + }; + bool isMarkingTracer() const { return tag_ == TracerKindTag::Marking || tag_ == TracerKindTag::WeakMarking; } + bool isWeakMarkingTracer() const { return tag_ == TracerKindTag::WeakMarking; } + bool isTenuringTracer() const { return tag_ == TracerKindTag::Tenuring; } + bool isCallbackTracer() const { return tag_ == TracerKindTag::Callback; } + inline JS::CallbackTracer* asCallbackTracer(); + bool traceWeakEdges() const { return traceWeakEdges_; } +#ifdef DEBUG + bool checkEdges() { return checkEdges_; } +#endif + + protected: + JSTracer(JSRuntime* rt, TracerKindTag tag, + WeakMapTraceKind weakTraceKind = TraceWeakMapValues) + : runtime_(rt) + , weakMapAction_(weakTraceKind) +#ifdef DEBUG + , checkEdges_(true) +#endif + , tag_(tag) + , traceWeakEdges_(true) + {} + +#ifdef DEBUG + // Set whether to check edges are valid in debug builds. + void setCheckEdges(bool check) { + checkEdges_ = check; + } +#endif + + private: + JSRuntime* runtime_; + WeakMapTraceKind weakMapAction_; +#ifdef DEBUG + bool checkEdges_; +#endif + + protected: + TracerKindTag tag_; + bool traceWeakEdges_; +}; + +namespace JS { + +class AutoTracingName; +class AutoTracingIndex; +class AutoTracingCallback; + +class JS_PUBLIC_API(CallbackTracer) : public JSTracer +{ + public: + CallbackTracer(JSRuntime* rt, WeakMapTraceKind weakTraceKind = TraceWeakMapValues) + : JSTracer(rt, JSTracer::TracerKindTag::Callback, weakTraceKind), + contextName_(nullptr), contextIndex_(InvalidIndex), contextFunctor_(nullptr) + {} + CallbackTracer(JSContext* cx, WeakMapTraceKind weakTraceKind = TraceWeakMapValues); + + // Override these methods to receive notification when an edge is visited + // with the type contained in the callback. The default implementation + // dispatches to the fully-generic onChild implementation, so for cases that + // do not care about boxing overhead and do not need the actual edges, + // just override the generic onChild. + virtual void onObjectEdge(JSObject** objp) { onChild(JS::GCCellPtr(*objp)); } + virtual void onStringEdge(JSString** strp) { onChild(JS::GCCellPtr(*strp)); } + virtual void onSymbolEdge(JS::Symbol** symp) { onChild(JS::GCCellPtr(*symp)); } + virtual void onScriptEdge(JSScript** scriptp) { onChild(JS::GCCellPtr(*scriptp)); } + virtual void onShapeEdge(js::Shape** shapep) { + onChild(JS::GCCellPtr(*shapep, JS::TraceKind::Shape)); + } + virtual void onObjectGroupEdge(js::ObjectGroup** groupp) { + onChild(JS::GCCellPtr(*groupp, JS::TraceKind::ObjectGroup)); + } + virtual void onBaseShapeEdge(js::BaseShape** basep) { + onChild(JS::GCCellPtr(*basep, JS::TraceKind::BaseShape)); + } + virtual void onJitCodeEdge(js::jit::JitCode** codep) { + onChild(JS::GCCellPtr(*codep, JS::TraceKind::JitCode)); + } + virtual void onLazyScriptEdge(js::LazyScript** lazyp) { + onChild(JS::GCCellPtr(*lazyp, JS::TraceKind::LazyScript)); + } + virtual void onScopeEdge(js::Scope** scopep) { + onChild(JS::GCCellPtr(*scopep, JS::TraceKind::Scope)); + } + virtual void onRegExpSharedEdge(js::RegExpShared** sharedp) { + onChild(JS::GCCellPtr(*sharedp, JS::TraceKind::RegExpShared)); + } + + // Override this method to receive notification when a node in the GC + // heap graph is visited. + virtual void onChild(const JS::GCCellPtr& thing) = 0; + + // Access to the tracing context: + // When tracing with a JS::CallbackTracer, we invoke the callback with the + // edge location and the type of target. This is useful for operating on + // the edge in the abstract or on the target thing, satisfying most common + // use cases. However, some tracers need additional detail about the + // specific edge that is being traced in order to be useful. Unfortunately, + // the raw pointer to the edge that we provide is not enough information to + // infer much of anything useful about that edge. + // + // In order to better support use cases that care in particular about edges + // -- as opposed to the target thing -- tracing implementations are + // responsible for providing extra context information about each edge they + // trace, as it is traced. This contains, at a minimum, an edge name and, + // when tracing an array, the index. Further specialization can be achived + // (with some complexity), by associating a functor with the tracer so + // that, when requested, the user can generate totally custom edge + // descriptions. + + // Returns the current edge's name. It is only valid to call this when + // inside the trace callback, however, the edge name will always be set. + const char* contextName() const { MOZ_ASSERT(contextName_); return contextName_; } + + // Returns the current edge's index, if marked as part of an array of edges. + // This must be called only inside the trace callback. When not tracing an + // array, the value will be InvalidIndex. + const static size_t InvalidIndex = size_t(-1); + size_t contextIndex() const { return contextIndex_; } + + // Build a description of this edge in the heap graph. This call may invoke + // the context functor, if set, which may inspect arbitrary areas of the + // heap. On the other hand, the description provided by this method may be + // substantially more accurate and useful than those provided by only the + // contextName and contextIndex. + void getTracingEdgeName(char* buffer, size_t bufferSize); + + // The trace implementation may associate a callback with one or more edges + // using AutoTracingDetails. This functor is called by getTracingEdgeName + // and is responsible for providing a textual representation of the + // currently being traced edge. The callback has access to the full heap, + // including the currently set tracing context. + class ContextFunctor { + public: + virtual void operator()(CallbackTracer* trc, char* buf, size_t bufsize) = 0; + }; + +#ifdef DEBUG + enum class TracerKind { + DoNotCare, + Moving, + GrayBuffering, + VerifyTraceProtoAndIface, + ClearEdges, + UnmarkGray + }; + virtual TracerKind getTracerKind() const { return TracerKind::DoNotCare; } +#endif + + // In C++, overriding a method hides all methods in the base class with + // that name, not just methods with that signature. Thus, the typed edge + // methods have to have distinct names to allow us to override them + // individually, which is freqently useful if, for example, we only want to + // process only one type of edge. + void dispatchToOnEdge(JSObject** objp) { onObjectEdge(objp); } + void dispatchToOnEdge(JSString** strp) { onStringEdge(strp); } + void dispatchToOnEdge(JS::Symbol** symp) { onSymbolEdge(symp); } + void dispatchToOnEdge(JSScript** scriptp) { onScriptEdge(scriptp); } + void dispatchToOnEdge(js::Shape** shapep) { onShapeEdge(shapep); } + void dispatchToOnEdge(js::ObjectGroup** groupp) { onObjectGroupEdge(groupp); } + void dispatchToOnEdge(js::BaseShape** basep) { onBaseShapeEdge(basep); } + void dispatchToOnEdge(js::jit::JitCode** codep) { onJitCodeEdge(codep); } + void dispatchToOnEdge(js::LazyScript** lazyp) { onLazyScriptEdge(lazyp); } + void dispatchToOnEdge(js::Scope** scopep) { onScopeEdge(scopep); } + void dispatchToOnEdge(js::RegExpShared** sharedp) { onRegExpSharedEdge(sharedp); } + + protected: + void setTraceWeakEdges(bool value) { + traceWeakEdges_ = value; + } + + private: + friend class AutoTracingName; + const char* contextName_; + + friend class AutoTracingIndex; + size_t contextIndex_; + + friend class AutoTracingDetails; + ContextFunctor* contextFunctor_; +}; + +// Set the name portion of the tracer's context for the current edge. +class MOZ_RAII AutoTracingName +{ + CallbackTracer* trc_; + const char* prior_; + + public: + AutoTracingName(CallbackTracer* trc, const char* name) : trc_(trc), prior_(trc->contextName_) { + MOZ_ASSERT(name); + trc->contextName_ = name; + } + ~AutoTracingName() { + MOZ_ASSERT(trc_->contextName_); + trc_->contextName_ = prior_; + } +}; + +// Set the index portion of the tracer's context for the current range. +class MOZ_RAII AutoTracingIndex +{ + CallbackTracer* trc_; + + public: + explicit AutoTracingIndex(JSTracer* trc, size_t initial = 0) : trc_(nullptr) { + if (trc->isCallbackTracer()) { + trc_ = trc->asCallbackTracer(); + MOZ_ASSERT(trc_->contextIndex_ == CallbackTracer::InvalidIndex); + trc_->contextIndex_ = initial; + } + } + ~AutoTracingIndex() { + if (trc_) { + MOZ_ASSERT(trc_->contextIndex_ != CallbackTracer::InvalidIndex); + trc_->contextIndex_ = CallbackTracer::InvalidIndex; + } + } + + void operator++() { + if (trc_) { + MOZ_ASSERT(trc_->contextIndex_ != CallbackTracer::InvalidIndex); + ++trc_->contextIndex_; + } + } +}; + +// Set a context callback for the trace callback to use, if it needs a detailed +// edge description. +class MOZ_RAII AutoTracingDetails +{ + CallbackTracer* trc_; + + public: + AutoTracingDetails(JSTracer* trc, CallbackTracer::ContextFunctor& func) : trc_(nullptr) { + if (trc->isCallbackTracer()) { + trc_ = trc->asCallbackTracer(); + MOZ_ASSERT(trc_->contextFunctor_ == nullptr); + trc_->contextFunctor_ = &func; + } + } + ~AutoTracingDetails() { + if (trc_) { + MOZ_ASSERT(trc_->contextFunctor_); + trc_->contextFunctor_ = nullptr; + } + } +}; + +} // namespace JS + +JS::CallbackTracer* +JSTracer::asCallbackTracer() +{ + MOZ_ASSERT(isCallbackTracer()); + return static_cast(this); +} + +namespace js { +namespace gc { +template +JS_PUBLIC_API(void) TraceExternalEdge(JSTracer* trc, T* thingp, const char* name); +} // namespace gc +} // namespace js + +namespace JS { + +// The JS::TraceEdge family of functions traces the given GC thing reference. +// This performs the tracing action configured on the given JSTracer: typically +// calling the JSTracer::callback or marking the thing as live. +// +// The argument to JS::TraceEdge is an in-out param: when the function returns, +// the garbage collector might have moved the GC thing. In this case, the +// reference passed to JS::TraceEdge will be updated to the thing's new +// location. Callers of this method are responsible for updating any state that +// is dependent on the object's address. For example, if the object's address +// is used as a key in a hashtable, then the object must be removed and +// re-inserted with the correct hash. +// +// Note that while |edgep| must never be null, it is fine for |*edgep| to be +// nullptr. + +template +inline void +TraceEdge(JSTracer* trc, JS::Heap* thingp, const char* name) +{ + MOZ_ASSERT(thingp); + if (*thingp) + js::gc::TraceExternalEdge(trc, thingp->unsafeGet(), name); +} + +template +inline void +TraceEdge(JSTracer* trc, JS::TenuredHeap* thingp, const char* name) +{ + MOZ_ASSERT(thingp); + if (T ptr = thingp->unbarrieredGetPtr()) { + js::gc::TraceExternalEdge(trc, &ptr, name); + thingp->setPtr(ptr); + } +} + +// Edges that are always traced as part of root marking do not require +// incremental barriers. This function allows for marking non-barriered +// pointers, but asserts that this happens during root marking. +// +// Note that while |edgep| must never be null, it is fine for |*edgep| to be +// nullptr. +template +extern JS_PUBLIC_API(void) +UnsafeTraceRoot(JSTracer* trc, T* edgep, const char* name); + +extern JS_PUBLIC_API(void) +TraceChildren(JSTracer* trc, GCCellPtr thing); + +using ZoneSet = js::HashSet, js::SystemAllocPolicy>; +using CompartmentSet = js::HashSet, + js::SystemAllocPolicy>; + +/** + * Trace every value within |compartments| that is wrapped by a + * cross-compartment wrapper from a compartment that is not an element of + * |compartments|. + */ +extern JS_PUBLIC_API(void) +TraceIncomingCCWs(JSTracer* trc, const JS::CompartmentSet& compartments); + +} // namespace JS + +extern JS_PUBLIC_API(void) +JS_GetTraceThingInfo(char* buf, size_t bufsize, JSTracer* trc, + void* thing, JS::TraceKind kind, bool includeDetails); + +namespace js { + +// Trace an edge that is not a GC root and is not wrapped in a barriered +// wrapper for some reason. +// +// This method does not check if |*edgep| is non-null before tracing through +// it, so callers must check any nullable pointer before calling this method. +template +extern JS_PUBLIC_API(void) +UnsafeTraceManuallyBarrieredEdge(JSTracer* trc, T* edgep, const char* name); + +namespace gc { + +// Return true if the given edge is not live and is about to be swept. +template +extern JS_PUBLIC_API(bool) +EdgeNeedsSweep(JS::Heap* edgep); + +// Not part of the public API, but declared here so we can use it in GCPolicy +// which is. +template +bool +IsAboutToBeFinalizedUnbarriered(T* thingp); + +} // namespace gc +} // namespace js + +#endif /* js_TracingAPI_h */ diff --git a/gecko/include/js/UniquePtr.h b/gecko/include/js/UniquePtr.h new file mode 100644 index 0000000..37e14bc --- /dev/null +++ b/gecko/include/js/UniquePtr.h @@ -0,0 +1,61 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef js_UniquePtr_h +#define js_UniquePtr_h + +#include "mozilla/UniquePtr.h" + +#include "js/Utility.h" + +namespace js { + +// Replacement for mozilla::UniquePtr that defaults to JS::DeletePolicy. +template > +using UniquePtr = mozilla::UniquePtr; + +namespace detail { + +template +struct UniqueSelector +{ + typedef UniquePtr SingleObject; +}; + +template +struct UniqueSelector +{ + typedef UniquePtr UnknownBound; +}; + +template +struct UniqueSelector +{ + typedef UniquePtr KnownBound; +}; + +} // namespace detail + +// Replacement for mozilla::MakeUnique that correctly calls js_new and produces +// a js::UniquePtr. +template +typename detail::UniqueSelector::SingleObject +MakeUnique(Args&&... aArgs) +{ + return UniquePtr(js_new(mozilla::Forward(aArgs)...)); +} + +template +typename detail::UniqueSelector::UnknownBound +MakeUnique(decltype(sizeof(int)) aN) = delete; + +template +typename detail::UniqueSelector::KnownBound +MakeUnique(Args&&... aArgs) = delete; + +} // namespace js + +#endif /* js_UniquePtr_h */ diff --git a/gecko/include/js/Value.h b/gecko/include/js/Value.h new file mode 100644 index 0000000..92ab005 --- /dev/null +++ b/gecko/include/js/Value.h @@ -0,0 +1,1486 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* JS::Value implementation. */ + +#ifndef js_Value_h +#define js_Value_h + +#include "mozilla/Attributes.h" +#include "mozilla/Casting.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/Likely.h" + +#include /* for std::numeric_limits */ + +#include "js-config.h" +#include "jstypes.h" + +#include "js/GCAPI.h" +#include "js/RootingAPI.h" +#include "js/Utility.h" + +namespace JS { class Value; } + +/* JS::Value can store a full int32_t. */ +#define JSVAL_INT_BITS 32 +#define JSVAL_INT_MIN ((int32_t)0x80000000) +#define JSVAL_INT_MAX ((int32_t)0x7fffffff) + +#if defined(JS_PUNBOX64) +# define JSVAL_TAG_SHIFT 47 +#endif + +// Use enums so that printing a JS::Value in the debugger shows nice +// symbolic type tags. + +#if defined(_MSC_VER) +# define JS_ENUM_HEADER(id, type) enum id : type +# define JS_ENUM_FOOTER(id) +#else +# define JS_ENUM_HEADER(id, type) enum id +# define JS_ENUM_FOOTER(id) __attribute__((packed)) +#endif + +JS_ENUM_HEADER(JSValueType, uint8_t) +{ + JSVAL_TYPE_DOUBLE = 0x00, + JSVAL_TYPE_INT32 = 0x01, + JSVAL_TYPE_UNDEFINED = 0x02, + JSVAL_TYPE_NULL = 0x03, + JSVAL_TYPE_BOOLEAN = 0x04, + JSVAL_TYPE_MAGIC = 0x05, + JSVAL_TYPE_STRING = 0x06, + JSVAL_TYPE_SYMBOL = 0x07, + JSVAL_TYPE_PRIVATE_GCTHING = 0x08, + JSVAL_TYPE_OBJECT = 0x0c, + + /* These never appear in a jsval; they are only provided as an out-of-band value. */ + JSVAL_TYPE_UNKNOWN = 0x20, + JSVAL_TYPE_MISSING = 0x21 +} JS_ENUM_FOOTER(JSValueType); + +static_assert(sizeof(JSValueType) == 1, + "compiler typed enum support is apparently buggy"); + +#if defined(JS_NUNBOX32) + +JS_ENUM_HEADER(JSValueTag, uint32_t) +{ + JSVAL_TAG_CLEAR = 0xFFFFFF80, + JSVAL_TAG_INT32 = JSVAL_TAG_CLEAR | JSVAL_TYPE_INT32, + JSVAL_TAG_UNDEFINED = JSVAL_TAG_CLEAR | JSVAL_TYPE_UNDEFINED, + JSVAL_TAG_NULL = JSVAL_TAG_CLEAR | JSVAL_TYPE_NULL, + JSVAL_TAG_BOOLEAN = JSVAL_TAG_CLEAR | JSVAL_TYPE_BOOLEAN, + JSVAL_TAG_MAGIC = JSVAL_TAG_CLEAR | JSVAL_TYPE_MAGIC, + JSVAL_TAG_STRING = JSVAL_TAG_CLEAR | JSVAL_TYPE_STRING, + JSVAL_TAG_SYMBOL = JSVAL_TAG_CLEAR | JSVAL_TYPE_SYMBOL, + JSVAL_TAG_PRIVATE_GCTHING = JSVAL_TAG_CLEAR | JSVAL_TYPE_PRIVATE_GCTHING, + JSVAL_TAG_OBJECT = JSVAL_TAG_CLEAR | JSVAL_TYPE_OBJECT +} JS_ENUM_FOOTER(JSValueTag); + +static_assert(sizeof(JSValueTag) == sizeof(uint32_t), + "compiler typed enum support is apparently buggy"); + +#elif defined(JS_PUNBOX64) + +JS_ENUM_HEADER(JSValueTag, uint32_t) +{ + JSVAL_TAG_MAX_DOUBLE = 0x1FFF0, + JSVAL_TAG_INT32 = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_INT32, + JSVAL_TAG_UNDEFINED = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_UNDEFINED, + JSVAL_TAG_NULL = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_NULL, + JSVAL_TAG_BOOLEAN = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_BOOLEAN, + JSVAL_TAG_MAGIC = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_MAGIC, + JSVAL_TAG_STRING = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_STRING, + JSVAL_TAG_SYMBOL = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_SYMBOL, + JSVAL_TAG_PRIVATE_GCTHING = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_PRIVATE_GCTHING, + JSVAL_TAG_OBJECT = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_OBJECT +} JS_ENUM_FOOTER(JSValueTag); + +static_assert(sizeof(JSValueTag) == sizeof(uint32_t), + "compiler typed enum support is apparently buggy"); + +JS_ENUM_HEADER(JSValueShiftedTag, uint64_t) +{ + JSVAL_SHIFTED_TAG_MAX_DOUBLE = ((((uint64_t)JSVAL_TAG_MAX_DOUBLE) << JSVAL_TAG_SHIFT) | 0xFFFFFFFF), + JSVAL_SHIFTED_TAG_INT32 = (((uint64_t)JSVAL_TAG_INT32) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_UNDEFINED = (((uint64_t)JSVAL_TAG_UNDEFINED) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_NULL = (((uint64_t)JSVAL_TAG_NULL) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_BOOLEAN = (((uint64_t)JSVAL_TAG_BOOLEAN) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_MAGIC = (((uint64_t)JSVAL_TAG_MAGIC) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_STRING = (((uint64_t)JSVAL_TAG_STRING) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_SYMBOL = (((uint64_t)JSVAL_TAG_SYMBOL) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_PRIVATE_GCTHING = (((uint64_t)JSVAL_TAG_PRIVATE_GCTHING) << JSVAL_TAG_SHIFT), + JSVAL_SHIFTED_TAG_OBJECT = (((uint64_t)JSVAL_TAG_OBJECT) << JSVAL_TAG_SHIFT) +} JS_ENUM_FOOTER(JSValueShiftedTag); + +static_assert(sizeof(JSValueShiftedTag) == sizeof(uint64_t), + "compiler typed enum support is apparently buggy"); + +#endif + +/* + * All our supported compilers implement C++11 |enum Foo : T| syntax, so don't + * expose these macros. (This macro exists *only* because gcc bug 51242 + * makes bit-fields of + * typed enums trigger a warning that can't be turned off. Don't expose it + * beyond this file!) + */ +#undef JS_ENUM_HEADER +#undef JS_ENUM_FOOTER + +#if defined(JS_NUNBOX32) + +#define JSVAL_TYPE_TO_TAG(type) ((JSValueTag)(JSVAL_TAG_CLEAR | (type))) + +#define JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET JSVAL_TAG_OBJECT +#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET JSVAL_TAG_INT32 +#define JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET JSVAL_TAG_STRING + +#elif defined(JS_PUNBOX64) + +#define JSVAL_PAYLOAD_MASK 0x00007FFFFFFFFFFFLL +#define JSVAL_TAG_MASK 0xFFFF800000000000LL +#define JSVAL_TYPE_TO_TAG(type) ((JSValueTag)(JSVAL_TAG_MAX_DOUBLE | (type))) +#define JSVAL_TYPE_TO_SHIFTED_TAG(type) (((uint64_t)JSVAL_TYPE_TO_TAG(type)) << JSVAL_TAG_SHIFT) + +#define JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET JSVAL_TAG_OBJECT +#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET JSVAL_TAG_INT32 +#define JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET JSVAL_TAG_STRING + +#define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET JSVAL_SHIFTED_TAG_OBJECT +#define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET JSVAL_SHIFTED_TAG_UNDEFINED +#define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET JSVAL_SHIFTED_TAG_STRING + +#endif /* JS_PUNBOX64 */ + +typedef enum JSWhyMagic +{ + /** a hole in a native object's elements */ + JS_ELEMENTS_HOLE, + + /** there is not a pending iterator value */ + JS_NO_ITER_VALUE, + + /** exception value thrown when closing a generator */ + JS_GENERATOR_CLOSING, + + /** compiler sentinel value */ + JS_NO_CONSTANT, + + /** used in debug builds to catch tracing errors */ + JS_THIS_POISON, + + /** used in debug builds to catch tracing errors */ + JS_ARG_POISON, + + /** an empty subnode in the AST serializer */ + JS_SERIALIZE_NO_NODE, + + /** lazy arguments value on the stack */ + JS_LAZY_ARGUMENTS, + + /** optimized-away 'arguments' value */ + JS_OPTIMIZED_ARGUMENTS, + + /** magic value passed to natives to indicate construction */ + JS_IS_CONSTRUCTING, + + /** value of static block object slot */ + JS_BLOCK_NEEDS_CLONE, + + /** see class js::HashableValue */ + JS_HASH_KEY_EMPTY, + + /** error while running Ion code */ + JS_ION_ERROR, + + /** missing recover instruction result */ + JS_ION_BAILOUT, + + /** optimized out slot */ + JS_OPTIMIZED_OUT, + + /** uninitialized lexical bindings that produce ReferenceError on touch. */ + JS_UNINITIALIZED_LEXICAL, + + /** for local use */ + JS_GENERIC_MAGIC, + + JS_WHY_MAGIC_COUNT +} JSWhyMagic; + +namespace js { +static inline JS::Value PoisonedObjectValue(uintptr_t poison); +} // namespace js + +namespace JS { + +static inline constexpr JS::Value UndefinedValue(); + +namespace detail { + +constexpr int CanonicalizedNaNSignBit = 0; +constexpr uint64_t CanonicalizedNaNSignificand = 0x8000000000000ULL; + +constexpr uint64_t CanonicalizedNaNBits = + mozilla::SpecificNaNBits::value; + +} // namespace detail + +/** + * Returns a generic quiet NaN value, with all payload bits set to zero. + * + * Among other properties, this NaN's bit pattern conforms to JS::Value's + * bit pattern restrictions. + */ +static MOZ_ALWAYS_INLINE double +GenericNaN() +{ + return mozilla::SpecificNaN(detail::CanonicalizedNaNSignBit, + detail::CanonicalizedNaNSignificand); +} + +/* MSVC with PGO miscompiles this function. */ +#if defined(_MSC_VER) +# pragma optimize("g", off) +#endif +static inline double +CanonicalizeNaN(double d) +{ + if (MOZ_UNLIKELY(mozilla::IsNaN(d))) + return GenericNaN(); + return d; +} +#if defined(_MSC_VER) +# pragma optimize("", on) +#endif + +/** + * JS::Value is the interface for a single JavaScript Engine value. A few + * general notes on JS::Value: + * + * - JS::Value has setX() and isX() members for X in + * + * { Int32, Double, String, Symbol, Boolean, Undefined, Null, Object, Magic } + * + * JS::Value also contains toX() for each of the non-singleton types. + * + * - Magic is a singleton type whose payload contains either a JSWhyMagic "reason" for + * the magic value or a uint32_t value. By providing JSWhyMagic values when + * creating and checking for magic values, it is possible to assert, at + * runtime, that only magic values with the expected reason flow through a + * particular value. For example, if cx->exception has a magic value, the + * reason must be JS_GENERATOR_CLOSING. + * + * - The JS::Value operations are preferred. The JSVAL_* operations remain for + * compatibility; they may be removed at some point. These operations mostly + * provide similar functionality. But there are a few key differences. One + * is that JS::Value gives null a separate type. + * Also, to help prevent mistakenly boxing a nullable JSObject* as an object, + * Value::setObject takes a JSObject&. (Conversely, Value::toObject returns a + * JSObject&.) A convenience member Value::setObjectOrNull is provided. + * + * - JSVAL_VOID is the same as the singleton value of the Undefined type. + * + * - Note that JS::Value is 8 bytes on 32 and 64-bit architectures. Thus, on + * 32-bit user code should avoid copying jsval/JS::Value as much as possible, + * preferring to pass by const Value&. + */ +class MOZ_NON_PARAM alignas(8) Value +{ + public: +#if defined(JS_NUNBOX32) + using PayloadType = uint32_t; +#elif defined(JS_PUNBOX64) + using PayloadType = uint64_t; +#endif + + /* + * N.B. the default constructor leaves Value unitialized. Adding a default + * constructor prevents Value from being stored in a union. + */ + Value() = default; + Value(const Value& v) = default; + + /** + * Returns false if creating a NumberValue containing the given type would + * be lossy, true otherwise. + */ + template + static bool isNumberRepresentable(const T t) { + return T(double(t)) == t; + } + + /*** Mutators ***/ + + void setNull() { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_NULL, 0); + } + + void setUndefined() { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_UNDEFINED, 0); + } + + void setInt32(int32_t i) { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_INT32, uint32_t(i)); + } + + int32_t& getInt32Ref() { + MOZ_ASSERT(isInt32()); + return data.s.payload.i32; + } + + void setDouble(double d) { + // Don't assign to data.asDouble to fix a miscompilation with + // GCC 5.2.1 and 5.3.1. See bug 1312488. + data = layout(d); + MOZ_ASSERT(isDouble()); + } + + void setNaN() { + setDouble(GenericNaN()); + } + + double& getDoubleRef() { + MOZ_ASSERT(isDouble()); + return data.asDouble; + } + + void setString(JSString* str) { + MOZ_ASSERT(uintptr_t(str) > 0x1000); + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_STRING, PayloadType(str)); + } + + void setSymbol(JS::Symbol* sym) { + MOZ_ASSERT(uintptr_t(sym) > 0x1000); + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_SYMBOL, PayloadType(sym)); + } + + void setObject(JSObject& obj) { + MOZ_ASSERT(uintptr_t(&obj) >= 0x1000); +#if defined(JS_PUNBOX64) + // VisualStudio cannot contain parenthesized C++ style cast and shift + // inside decltype in template parameter: + // AssertionConditionType> 1))> + // It throws syntax error. + MOZ_ASSERT((((uintptr_t)&obj) >> JSVAL_TAG_SHIFT) == 0); +#endif + setObjectNoCheck(&obj); + } + + private: + void setObjectNoCheck(JSObject* obj) { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_OBJECT, PayloadType(obj)); + } + + friend inline Value js::PoisonedObjectValue(uintptr_t poison); + + public: + void setBoolean(bool b) { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_BOOLEAN, uint32_t(b)); + } + + void setMagic(JSWhyMagic why) { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_MAGIC, uint32_t(why)); + } + + void setMagicUint32(uint32_t payload) { + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_MAGIC, payload); + } + + bool setNumber(uint32_t ui) { + if (ui > JSVAL_INT_MAX) { + setDouble((double)ui); + return false; + } else { + setInt32((int32_t)ui); + return true; + } + } + + bool setNumber(double d) { + int32_t i; + if (mozilla::NumberIsInt32(d, &i)) { + setInt32(i); + return true; + } + + setDouble(d); + return false; + } + + void setObjectOrNull(JSObject* arg) { + if (arg) + setObject(*arg); + else + setNull(); + } + + void swap(Value& rhs) { + uint64_t tmp = rhs.data.asBits; + rhs.data.asBits = data.asBits; + data.asBits = tmp; + } + + private: + JSValueTag toTag() const { +#if defined(JS_NUNBOX32) + return data.s.tag; +#elif defined(JS_PUNBOX64) + return JSValueTag(data.asBits >> JSVAL_TAG_SHIFT); +#endif + } + + public: + /*** JIT-only interfaces to interact with and create raw Values ***/ +#if defined(JS_NUNBOX32) + PayloadType toNunboxPayload() const { + return static_cast(data.s.payload.i32); + } + + JSValueTag toNunboxTag() const { + return data.s.tag; + } +#elif defined(JS_PUNBOX64) + const void* bitsAsPunboxPointer() const { + return reinterpret_cast(data.asBits); + } +#endif + + /*** Value type queries ***/ + + /* + * N.B. GCC, in some but not all cases, chooses to emit signed comparison + * of JSValueTag even though its underlying type has been forced to be + * uint32_t. Thus, all comparisons should explicitly cast operands to + * uint32_t. + */ + + bool isUndefined() const { +#if defined(JS_NUNBOX32) + return toTag() == JSVAL_TAG_UNDEFINED; +#elif defined(JS_PUNBOX64) + return data.asBits == JSVAL_SHIFTED_TAG_UNDEFINED; +#endif + } + + bool isNull() const { +#if defined(JS_NUNBOX32) + return toTag() == JSVAL_TAG_NULL; +#elif defined(JS_PUNBOX64) + return data.asBits == JSVAL_SHIFTED_TAG_NULL; +#endif + } + + bool isNullOrUndefined() const { + return isNull() || isUndefined(); + } + + bool isInt32() const { + return toTag() == JSVAL_TAG_INT32; + } + + bool isInt32(int32_t i32) const { + return data.asBits == bitsFromTagAndPayload(JSVAL_TAG_INT32, uint32_t(i32)); + } + + bool isDouble() const { +#if defined(JS_NUNBOX32) + return uint32_t(toTag()) <= uint32_t(JSVAL_TAG_CLEAR); +#elif defined(JS_PUNBOX64) + return (data.asBits | mozilla::DoubleTypeTraits::kSignBit) <= JSVAL_SHIFTED_TAG_MAX_DOUBLE; +#endif + } + + bool isNumber() const { +#if defined(JS_NUNBOX32) + MOZ_ASSERT(toTag() != JSVAL_TAG_CLEAR); + return uint32_t(toTag()) <= uint32_t(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET); +#elif defined(JS_PUNBOX64) + return data.asBits < JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET; +#endif + } + + bool isString() const { + return toTag() == JSVAL_TAG_STRING; + } + + bool isSymbol() const { + return toTag() == JSVAL_TAG_SYMBOL; + } + + bool isObject() const { +#if defined(JS_NUNBOX32) + return toTag() == JSVAL_TAG_OBJECT; +#elif defined(JS_PUNBOX64) + MOZ_ASSERT((data.asBits >> JSVAL_TAG_SHIFT) <= JSVAL_TAG_OBJECT); + return data.asBits >= JSVAL_SHIFTED_TAG_OBJECT; +#endif + } + + bool isPrimitive() const { +#if defined(JS_NUNBOX32) + return uint32_t(toTag()) < uint32_t(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET); +#elif defined(JS_PUNBOX64) + return data.asBits < JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET; +#endif + } + + bool isObjectOrNull() const { + return isObject() || isNull(); + } + + bool isGCThing() const { +#if defined(JS_NUNBOX32) + /* gcc sometimes generates signed < without explicit casts. */ + return uint32_t(toTag()) >= uint32_t(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET); +#elif defined(JS_PUNBOX64) + return data.asBits >= JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET; +#endif + } + + bool isBoolean() const { + return toTag() == JSVAL_TAG_BOOLEAN; + } + + bool isTrue() const { + return data.asBits == bitsFromTagAndPayload(JSVAL_TAG_BOOLEAN, uint32_t(true)); + } + + bool isFalse() const { + return data.asBits == bitsFromTagAndPayload(JSVAL_TAG_BOOLEAN, uint32_t(false)); + } + + bool isMagic() const { + return toTag() == JSVAL_TAG_MAGIC; + } + + bool isMagic(JSWhyMagic why) const { + MOZ_ASSERT_IF(isMagic(), data.s.payload.why == why); + return isMagic(); + } + + JS::TraceKind traceKind() const { + MOZ_ASSERT(isGCThing()); + static_assert((JSVAL_TAG_STRING & 0x03) == size_t(JS::TraceKind::String), + "Value type tags must correspond with JS::TraceKinds."); + static_assert((JSVAL_TAG_SYMBOL & 0x03) == size_t(JS::TraceKind::Symbol), + "Value type tags must correspond with JS::TraceKinds."); + static_assert((JSVAL_TAG_OBJECT & 0x03) == size_t(JS::TraceKind::Object), + "Value type tags must correspond with JS::TraceKinds."); + if (MOZ_UNLIKELY(isPrivateGCThing())) + return JS::GCThingTraceKind(toGCThing()); + return JS::TraceKind(toTag() & 0x03); + } + + JSWhyMagic whyMagic() const { + MOZ_ASSERT(isMagic()); + return data.s.payload.why; + } + + uint32_t magicUint32() const { + MOZ_ASSERT(isMagic()); + return data.s.payload.u32; + } + + /*** Comparison ***/ + + bool operator==(const Value& rhs) const { + return data.asBits == rhs.data.asBits; + } + + bool operator!=(const Value& rhs) const { + return data.asBits != rhs.data.asBits; + } + + friend inline bool SameType(const Value& lhs, const Value& rhs); + + /*** Extract the value's typed payload ***/ + + int32_t toInt32() const { + MOZ_ASSERT(isInt32()); +#if defined(JS_NUNBOX32) + return data.s.payload.i32; +#elif defined(JS_PUNBOX64) + return int32_t(data.asBits); +#endif + } + + double toDouble() const { + MOZ_ASSERT(isDouble()); + return data.asDouble; + } + + double toNumber() const { + MOZ_ASSERT(isNumber()); + return isDouble() ? toDouble() : double(toInt32()); + } + + JSString* toString() const { + MOZ_ASSERT(isString()); +#if defined(JS_NUNBOX32) + return data.s.payload.str; +#elif defined(JS_PUNBOX64) + return reinterpret_cast(data.asBits & JSVAL_PAYLOAD_MASK); +#endif + } + + JS::Symbol* toSymbol() const { + MOZ_ASSERT(isSymbol()); +#if defined(JS_NUNBOX32) + return data.s.payload.sym; +#elif defined(JS_PUNBOX64) + return reinterpret_cast(data.asBits & JSVAL_PAYLOAD_MASK); +#endif + } + + JSObject& toObject() const { + MOZ_ASSERT(isObject()); +#if defined(JS_NUNBOX32) + return *data.s.payload.obj; +#elif defined(JS_PUNBOX64) + return *toObjectOrNull(); +#endif + } + + JSObject* toObjectOrNull() const { + MOZ_ASSERT(isObjectOrNull()); +#if defined(JS_NUNBOX32) + return data.s.payload.obj; +#elif defined(JS_PUNBOX64) + uint64_t ptrBits = data.asBits & JSVAL_PAYLOAD_MASK; + MOZ_ASSERT((ptrBits & 0x7) == 0); + return reinterpret_cast(ptrBits); +#endif + } + + js::gc::Cell* toGCThing() const { + MOZ_ASSERT(isGCThing()); +#if defined(JS_NUNBOX32) + return data.s.payload.cell; +#elif defined(JS_PUNBOX64) + uint64_t ptrBits = data.asBits & JSVAL_PAYLOAD_MASK; + MOZ_ASSERT((ptrBits & 0x7) == 0); + return reinterpret_cast(ptrBits); +#endif + } + + GCCellPtr toGCCellPtr() const { + return GCCellPtr(toGCThing(), traceKind()); + } + + bool toBoolean() const { + MOZ_ASSERT(isBoolean()); +#if defined(JS_NUNBOX32) + return bool(data.s.payload.boo); +#elif defined(JS_PUNBOX64) + return bool(data.asBits & JSVAL_PAYLOAD_MASK); +#endif + } + + uint32_t payloadAsRawUint32() const { + MOZ_ASSERT(!isDouble()); + return data.s.payload.u32; + } + + uint64_t asRawBits() const { + return data.asBits; + } + + JSValueType extractNonDoubleType() const { + uint32_t type = toTag() & 0xF; + MOZ_ASSERT(type > JSVAL_TYPE_DOUBLE); + return JSValueType(type); + } + + /* + * Private API + * + * Private setters/getters allow the caller to read/write arbitrary types + * that fit in the 64-bit payload. It is the caller's responsibility, after + * storing to a value with setPrivateX to read only using getPrivateX. + * Privates values are given a type which ensures they are not marked. + */ + + void setPrivate(void* ptr) { + MOZ_ASSERT((uintptr_t(ptr) & 1) == 0); +#if defined(JS_NUNBOX32) + data.s.tag = JSValueTag(0); + data.s.payload.ptr = ptr; +#elif defined(JS_PUNBOX64) + data.asBits = uintptr_t(ptr) >> 1; +#endif + MOZ_ASSERT(isDouble()); + } + + void* toPrivate() const { + MOZ_ASSERT(isDouble()); +#if defined(JS_NUNBOX32) + return data.s.payload.ptr; +#elif defined(JS_PUNBOX64) + MOZ_ASSERT((data.asBits & 0x8000000000000000ULL) == 0); + return reinterpret_cast(data.asBits << 1); +#endif + } + + void setPrivateUint32(uint32_t ui) { + MOZ_ASSERT(uint32_t(int32_t(ui)) == ui); + setInt32(int32_t(ui)); + } + + uint32_t toPrivateUint32() const { + return uint32_t(toInt32()); + } + + /* + * Private GC Thing API + * + * Non-JSObject, JSString, and JS::Symbol cells may be put into the 64-bit + * payload as private GC things. Such Values are considered isGCThing(), and + * as such, automatically marked. Their traceKind() is gotten via their + * cells. + */ + + void setPrivateGCThing(js::gc::Cell* cell) { + MOZ_ASSERT(JS::GCThingTraceKind(cell) != JS::TraceKind::String, + "Private GC thing Values must not be strings. Make a StringValue instead."); + MOZ_ASSERT(JS::GCThingTraceKind(cell) != JS::TraceKind::Symbol, + "Private GC thing Values must not be symbols. Make a SymbolValue instead."); + MOZ_ASSERT(JS::GCThingTraceKind(cell) != JS::TraceKind::Object, + "Private GC thing Values must not be objects. Make an ObjectValue instead."); + + MOZ_ASSERT(uintptr_t(cell) > 0x1000); +#if defined(JS_PUNBOX64) + // VisualStudio cannot contain parenthesized C++ style cast and shift + // inside decltype in template parameter: + // AssertionConditionType> 1))> + // It throws syntax error. + MOZ_ASSERT((((uintptr_t)cell) >> JSVAL_TAG_SHIFT) == 0); +#endif + data.asBits = bitsFromTagAndPayload(JSVAL_TAG_PRIVATE_GCTHING, PayloadType(cell)); + } + + bool isPrivateGCThing() const { + return toTag() == JSVAL_TAG_PRIVATE_GCTHING; + } + + const size_t* payloadWord() const { +#if defined(JS_NUNBOX32) + return &data.s.payload.word; +#elif defined(JS_PUNBOX64) + return &data.asWord; +#endif + } + + const uintptr_t* payloadUIntPtr() const { +#if defined(JS_NUNBOX32) + return &data.s.payload.uintptr; +#elif defined(JS_PUNBOX64) + return &data.asUIntPtr; +#endif + } + +#if !defined(_MSC_VER) && !defined(__sparc) + // Value must be POD so that MSVC will pass it by value and not in memory + // (bug 689101); the same is true for SPARC as well (bug 737344). More + // precisely, we don't want Value return values compiled as out params. + private: +#endif + +#if MOZ_LITTLE_ENDIAN +# if defined(JS_NUNBOX32) + union layout { + uint64_t asBits; + struct { + union { + int32_t i32; + uint32_t u32; + uint32_t boo; // Don't use |bool| -- it must be four bytes. + JSString* str; + JS::Symbol* sym; + JSObject* obj; + js::gc::Cell* cell; + void* ptr; + JSWhyMagic why; + size_t word; + uintptr_t uintptr; + } payload; + JSValueTag tag; + } s; + double asDouble; + void* asPtr; + + layout() = default; + explicit constexpr layout(uint64_t bits) : asBits(bits) {} + explicit constexpr layout(double d) : asDouble(d) {} + } data; +# elif defined(JS_PUNBOX64) + union layout { + uint64_t asBits; +#if !defined(_WIN64) + /* MSVC does not pack these correctly :-( */ + struct { + uint64_t payload47 : 47; + JSValueTag tag : 17; + } debugView; +#endif + struct { + union { + int32_t i32; + uint32_t u32; + JSWhyMagic why; + } payload; + } s; + double asDouble; + void* asPtr; + size_t asWord; + uintptr_t asUIntPtr; + + layout() = default; + explicit constexpr layout(uint64_t bits) : asBits(bits) {} + explicit constexpr layout(double d) : asDouble(d) {} + } data; +# endif /* JS_PUNBOX64 */ +#else /* MOZ_LITTLE_ENDIAN */ +# if defined(JS_NUNBOX32) + union layout { + uint64_t asBits; + struct { + JSValueTag tag; + union { + int32_t i32; + uint32_t u32; + uint32_t boo; // Don't use |bool| -- it must be four bytes. + JSString* str; + JS::Symbol* sym; + JSObject* obj; + js::gc::Cell* cell; + void* ptr; + JSWhyMagic why; + size_t word; + uintptr_t uintptr; + } payload; + } s; + double asDouble; + void* asPtr; + + layout() = default; + explicit constexpr layout(uint64_t bits) : asBits(bits) {} + explicit constexpr layout(double d) : asDouble(d) {} + } data; +# elif defined(JS_PUNBOX64) + union layout { + uint64_t asBits; + struct { + JSValueTag tag : 17; + uint64_t payload47 : 47; + } debugView; + struct { + uint32_t padding; + union { + int32_t i32; + uint32_t u32; + JSWhyMagic why; + } payload; + } s; + double asDouble; + void* asPtr; + size_t asWord; + uintptr_t asUIntPtr; + + layout() = default; + explicit constexpr layout(uint64_t bits) : asBits(bits) {} + explicit constexpr layout(double d) : asDouble(d) {} + } data; +# endif /* JS_PUNBOX64 */ +#endif /* MOZ_LITTLE_ENDIAN */ + + private: + explicit constexpr Value(uint64_t asBits) : data(asBits) {} + explicit constexpr Value(double d) : data(d) {} + + void staticAssertions() { + JS_STATIC_ASSERT(sizeof(JSValueType) == 1); + JS_STATIC_ASSERT(sizeof(JSValueTag) == 4); + JS_STATIC_ASSERT(sizeof(JSWhyMagic) <= 4); + JS_STATIC_ASSERT(sizeof(Value) == 8); + } + + friend constexpr Value JS::UndefinedValue(); + + public: + static constexpr uint64_t + bitsFromTagAndPayload(JSValueTag tag, PayloadType payload) + { +#if defined(JS_NUNBOX32) + return (uint64_t(uint32_t(tag)) << 32) | payload; +#elif defined(JS_PUNBOX64) + return (uint64_t(uint32_t(tag)) << JSVAL_TAG_SHIFT) | payload; +#endif + } + + static constexpr Value + fromTagAndPayload(JSValueTag tag, PayloadType payload) + { + return fromRawBits(bitsFromTagAndPayload(tag, payload)); + } + + static constexpr Value + fromRawBits(uint64_t asBits) { + return Value(asBits); + } + + static constexpr Value + fromInt32(int32_t i) { + return fromTagAndPayload(JSVAL_TAG_INT32, uint32_t(i)); + } + + static constexpr Value + fromDouble(double d) { + return Value(d); + } +} JS_HAZ_GC_POINTER; + +static_assert(sizeof(Value) == 8, "Value size must leave three tag bits, be a binary power, and is ubiquitously depended upon everywhere"); + +inline bool +IsOptimizedPlaceholderMagicValue(const Value& v) +{ + if (v.isMagic()) { + MOZ_ASSERT(v.whyMagic() == JS_OPTIMIZED_ARGUMENTS || v.whyMagic() == JS_OPTIMIZED_OUT); + return true; + } + return false; +} + +static MOZ_ALWAYS_INLINE void +ExposeValueToActiveJS(const Value& v) +{ +#ifdef DEBUG + Value tmp = v; + MOZ_ASSERT(!js::gc::EdgeNeedsSweepUnbarrieredSlow(&tmp)); +#endif + if (v.isGCThing()) + js::gc::ExposeGCThingToActiveJS(GCCellPtr(v)); +} + +/************************************************************************/ + +static inline MOZ_MAY_CALL_AFTER_MUST_RETURN Value +NullValue() +{ + Value v; + v.setNull(); + return v; +} + +static inline constexpr Value +UndefinedValue() +{ + return Value::fromTagAndPayload(JSVAL_TAG_UNDEFINED, 0); +} + +static inline constexpr Value +Int32Value(int32_t i32) +{ + return Value::fromInt32(i32); +} + +static inline Value +DoubleValue(double dbl) +{ + Value v; + v.setDouble(dbl); + return v; +} + +static inline Value +CanonicalizedDoubleValue(double d) +{ + return MOZ_UNLIKELY(mozilla::IsNaN(d)) + ? Value::fromRawBits(detail::CanonicalizedNaNBits) + : Value::fromDouble(d); +} + +static inline bool +IsCanonicalized(double d) +{ + if (mozilla::IsInfinite(d) || mozilla::IsFinite(d)) + return true; + + uint64_t bits; + mozilla::BitwiseCast(d, &bits); + return (bits & ~mozilla::DoubleTypeTraits::kSignBit) == detail::CanonicalizedNaNBits; +} + +static inline Value +DoubleNaNValue() +{ + Value v; + v.setNaN(); + return v; +} + +static inline Value +Float32Value(float f) +{ + Value v; + v.setDouble(f); + return v; +} + +static inline Value +StringValue(JSString* str) +{ + Value v; + v.setString(str); + return v; +} + +static inline Value +SymbolValue(JS::Symbol* sym) +{ + Value v; + v.setSymbol(sym); + return v; +} + +static inline Value +BooleanValue(bool boo) +{ + Value v; + v.setBoolean(boo); + return v; +} + +static inline Value +TrueValue() +{ + Value v; + v.setBoolean(true); + return v; +} + +static inline Value +FalseValue() +{ + Value v; + v.setBoolean(false); + return v; +} + +static inline Value +ObjectValue(JSObject& obj) +{ + Value v; + v.setObject(obj); + return v; +} + +static inline Value +MagicValue(JSWhyMagic why) +{ + Value v; + v.setMagic(why); + return v; +} + +static inline Value +MagicValueUint32(uint32_t payload) +{ + Value v; + v.setMagicUint32(payload); + return v; +} + +static inline Value +NumberValue(float f) +{ + Value v; + v.setNumber(f); + return v; +} + +static inline Value +NumberValue(double dbl) +{ + Value v; + v.setNumber(dbl); + return v; +} + +static inline Value +NumberValue(int8_t i) +{ + return Int32Value(i); +} + +static inline Value +NumberValue(uint8_t i) +{ + return Int32Value(i); +} + +static inline Value +NumberValue(int16_t i) +{ + return Int32Value(i); +} + +static inline Value +NumberValue(uint16_t i) +{ + return Int32Value(i); +} + +static inline Value +NumberValue(int32_t i) +{ + return Int32Value(i); +} + +static inline constexpr Value +NumberValue(uint32_t i) +{ + return i <= JSVAL_INT_MAX + ? Int32Value(int32_t(i)) + : Value::fromDouble(double(i)); +} + +namespace detail { + +template +class MakeNumberValue +{ + public: + template + static inline Value create(const T t) + { + Value v; + if (JSVAL_INT_MIN <= t && t <= JSVAL_INT_MAX) + v.setInt32(int32_t(t)); + else + v.setDouble(double(t)); + return v; + } +}; + +template <> +class MakeNumberValue +{ + public: + template + static inline Value create(const T t) + { + Value v; + if (t <= JSVAL_INT_MAX) + v.setInt32(int32_t(t)); + else + v.setDouble(double(t)); + return v; + } +}; + +} // namespace detail + +template +static inline Value +NumberValue(const T t) +{ + MOZ_ASSERT(Value::isNumberRepresentable(t), "value creation would be lossy"); + return detail::MakeNumberValue::is_signed>::create(t); +} + +static inline Value +ObjectOrNullValue(JSObject* obj) +{ + Value v; + v.setObjectOrNull(obj); + return v; +} + +static inline Value +PrivateValue(void* ptr) +{ + Value v; + v.setPrivate(ptr); + return v; +} + +static inline Value +PrivateUint32Value(uint32_t ui) +{ + Value v; + v.setPrivateUint32(ui); + return v; +} + +static inline Value +PrivateGCThingValue(js::gc::Cell* cell) +{ + Value v; + v.setPrivateGCThing(cell); + return v; +} + +inline bool +SameType(const Value& lhs, const Value& rhs) +{ +#if defined(JS_NUNBOX32) + JSValueTag ltag = lhs.toTag(), rtag = rhs.toTag(); + return ltag == rtag || (ltag < JSVAL_TAG_CLEAR && rtag < JSVAL_TAG_CLEAR); +#elif defined(JS_PUNBOX64) + return (lhs.isDouble() && rhs.isDouble()) || + (((lhs.data.asBits ^ rhs.data.asBits) & 0xFFFF800000000000ULL) == 0); +#endif +} + +} // namespace JS + +/************************************************************************/ + +namespace JS { +JS_PUBLIC_API(void) HeapValuePostBarrier(Value* valuep, const Value& prev, const Value& next); + +template <> +struct GCPolicy +{ + static Value initial() { return UndefinedValue(); } + static void trace(JSTracer* trc, Value* v, const char* name) { + js::UnsafeTraceManuallyBarrieredEdge(trc, v, name); + } + static bool isTenured(const Value& thing) { + return !thing.isGCThing() || !IsInsideNursery(thing.toGCThing()); + } +}; + +} // namespace JS + +namespace js { + +template <> +struct BarrierMethods +{ + static gc::Cell* asGCThingOrNull(const JS::Value& v) { + return v.isGCThing() ? v.toGCThing() : nullptr; + } + static void postBarrier(JS::Value* v, const JS::Value& prev, const JS::Value& next) { + JS::HeapValuePostBarrier(v, prev, next); + } + static void exposeToJS(const JS::Value& v) { + JS::ExposeValueToActiveJS(v); + } +}; + +template class MutableValueOperations; + +/** + * A class designed for CRTP use in implementing the non-mutating parts of the + * Value interface in Value-like classes. Wrapper must be a class inheriting + * ValueOperations with a visible get() method returning a const + * reference to the Value abstracted by Wrapper. + */ +template +class WrappedPtrOperations +{ + const JS::Value& value() const { return static_cast(this)->get(); } + + public: + bool isUndefined() const { return value().isUndefined(); } + bool isNull() const { return value().isNull(); } + bool isBoolean() const { return value().isBoolean(); } + bool isTrue() const { return value().isTrue(); } + bool isFalse() const { return value().isFalse(); } + bool isNumber() const { return value().isNumber(); } + bool isInt32() const { return value().isInt32(); } + bool isInt32(int32_t i32) const { return value().isInt32(i32); } + bool isDouble() const { return value().isDouble(); } + bool isString() const { return value().isString(); } + bool isSymbol() const { return value().isSymbol(); } + bool isObject() const { return value().isObject(); } + bool isMagic() const { return value().isMagic(); } + bool isMagic(JSWhyMagic why) const { return value().isMagic(why); } + bool isGCThing() const { return value().isGCThing(); } + bool isPrimitive() const { return value().isPrimitive(); } + + bool isNullOrUndefined() const { return value().isNullOrUndefined(); } + bool isObjectOrNull() const { return value().isObjectOrNull(); } + + bool toBoolean() const { return value().toBoolean(); } + double toNumber() const { return value().toNumber(); } + int32_t toInt32() const { return value().toInt32(); } + double toDouble() const { return value().toDouble(); } + JSString* toString() const { return value().toString(); } + JS::Symbol* toSymbol() const { return value().toSymbol(); } + JSObject& toObject() const { return value().toObject(); } + JSObject* toObjectOrNull() const { return value().toObjectOrNull(); } + gc::Cell* toGCThing() const { return value().toGCThing(); } + JS::TraceKind traceKind() const { return value().traceKind(); } + void* toPrivate() const { return value().toPrivate(); } + uint32_t toPrivateUint32() const { return value().toPrivateUint32(); } + + uint64_t asRawBits() const { return value().asRawBits(); } + JSValueType extractNonDoubleType() const { return value().extractNonDoubleType(); } + + JSWhyMagic whyMagic() const { return value().whyMagic(); } + uint32_t magicUint32() const { return value().magicUint32(); } +}; + +/** + * A class designed for CRTP use in implementing all the mutating parts of the + * Value interface in Value-like classes. Wrapper must be a class inheriting + * MutableWrappedPtrOperations with visible get() methods returning const and + * non-const references to the Value abstracted by Wrapper. + */ +template +class MutableWrappedPtrOperations : public WrappedPtrOperations +{ + JS::Value& value() { return static_cast(this)->get(); } + + public: + void setNull() { value().setNull(); } + void setUndefined() { value().setUndefined(); } + void setInt32(int32_t i) { value().setInt32(i); } + void setDouble(double d) { value().setDouble(d); } + void setNaN() { setDouble(JS::GenericNaN()); } + void setBoolean(bool b) { value().setBoolean(b); } + void setMagic(JSWhyMagic why) { value().setMagic(why); } + bool setNumber(uint32_t ui) { return value().setNumber(ui); } + bool setNumber(double d) { return value().setNumber(d); } + void setString(JSString* str) { this->value().setString(str); } + void setSymbol(JS::Symbol* sym) { this->value().setSymbol(sym); } + void setObject(JSObject& obj) { this->value().setObject(obj); } + void setObjectOrNull(JSObject* arg) { this->value().setObjectOrNull(arg); } + void setPrivate(void* ptr) { this->value().setPrivate(ptr); } + void setPrivateUint32(uint32_t ui) { this->value().setPrivateUint32(ui); } + void setPrivateGCThing(js::gc::Cell* cell) { this->value().setPrivateGCThing(cell); } +}; + +/* + * Augment the generic Heap interface when T = Value with + * type-querying, value-extracting, and mutating operations. + */ +template +class HeapBase : public WrappedPtrOperations +{ + void setBarriered(const JS::Value& v) { + *static_cast*>(this) = v; + } + + public: + void setNull() { setBarriered(JS::NullValue()); } + void setUndefined() { setBarriered(JS::UndefinedValue()); } + void setInt32(int32_t i) { setBarriered(JS::Int32Value(i)); } + void setDouble(double d) { setBarriered(JS::DoubleValue(d)); } + void setNaN() { setDouble(JS::GenericNaN()); } + void setBoolean(bool b) { setBarriered(JS::BooleanValue(b)); } + void setMagic(JSWhyMagic why) { setBarriered(JS::MagicValue(why)); } + void setString(JSString* str) { setBarriered(JS::StringValue(str)); } + void setSymbol(JS::Symbol* sym) { setBarriered(JS::SymbolValue(sym)); } + void setObject(JSObject& obj) { setBarriered(JS::ObjectValue(obj)); } + void setPrivateGCThing(js::gc::Cell* cell) { setBarriered(JS::PrivateGCThingValue(cell)); } + + bool setNumber(uint32_t ui) { + if (ui > JSVAL_INT_MAX) { + setDouble((double)ui); + return false; + } else { + setInt32((int32_t)ui); + return true; + } + } + + bool setNumber(double d) { + int32_t i; + if (mozilla::NumberIsInt32(d, &i)) { + setInt32(i); + return true; + } + + setDouble(d); + return false; + } + + void setObjectOrNull(JSObject* arg) { + if (arg) + setObject(*arg); + else + setNull(); + } +}; + +/* + * If the Value is a GC pointer type, convert to that type and call |f| with + * the pointer. If the Value is not a GC type, calls F::defaultValue. + */ +template +auto +DispatchTyped(F f, const JS::Value& val, Args&&... args) + -> decltype(f(static_cast(nullptr), mozilla::Forward(args)...)) +{ + if (val.isString()) + return f(val.toString(), mozilla::Forward(args)...); + if (val.isObject()) + return f(&val.toObject(), mozilla::Forward(args)...); + if (val.isSymbol()) + return f(val.toSymbol(), mozilla::Forward(args)...); + if (MOZ_UNLIKELY(val.isPrivateGCThing())) + return DispatchTyped(f, val.toGCCellPtr(), mozilla::Forward(args)...); + MOZ_ASSERT(!val.isGCThing()); + return F::defaultValue(val); +} + +template struct VoidDefaultAdaptor { static void defaultValue(const S&) {} }; +template struct IdentityDefaultAdaptor { static S defaultValue(const S& v) {return v;} }; +template struct BoolDefaultAdaptor { static bool defaultValue(const S&) { return v; } }; + +static inline JS::Value +PoisonedObjectValue(uintptr_t poison) +{ + JS::Value v; + v.setObjectNoCheck(reinterpret_cast(poison)); + return v; +} + +} // namespace js + +#ifdef DEBUG +namespace JS { + +MOZ_ALWAYS_INLINE bool +ValueIsNotGray(const Value& value) +{ + if (!value.isGCThing()) + return true; + + return CellIsNotGray(value.toGCThing()); +} + +MOZ_ALWAYS_INLINE bool +ValueIsNotGray(const Heap& value) +{ + return ValueIsNotGray(value.unbarrieredGet()); +} + +} // namespace JS +#endif + +/************************************************************************/ + +namespace JS { + +extern JS_PUBLIC_DATA(const HandleValue) NullHandleValue; +extern JS_PUBLIC_DATA(const HandleValue) UndefinedHandleValue; +extern JS_PUBLIC_DATA(const HandleValue) TrueHandleValue; +extern JS_PUBLIC_DATA(const HandleValue) FalseHandleValue; + +} // namespace JS + +#endif /* js_Value_h */ diff --git a/gecko/include/jsalloc.h b/gecko/include/jsalloc.h new file mode 100644 index 0000000..d81ff5c --- /dev/null +++ b/gecko/include/jsalloc.h @@ -0,0 +1,140 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * JS allocation policies. + * + * The allocators here are for system memory with lifetimes which are not + * managed by the GC. See the comment at the top of vm/MallocProvider.h. + */ + +#ifndef jsalloc_h +#define jsalloc_h + +#include "js/TypeDecls.h" +#include "js/Utility.h" + +extern JS_PUBLIC_API(void) JS_ReportOutOfMemory(JSContext* cx); + +namespace js { + +enum class AllocFunction { + Malloc, + Calloc, + Realloc +}; +/* Policy for using system memory functions and doing no error reporting. */ +class SystemAllocPolicy +{ + public: + template T* maybe_pod_malloc(size_t numElems) { return js_pod_malloc(numElems); } + template T* maybe_pod_calloc(size_t numElems) { return js_pod_calloc(numElems); } + template T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) { + return js_pod_realloc(p, oldSize, newSize); + } + template T* pod_malloc(size_t numElems) { return maybe_pod_malloc(numElems); } + template T* pod_calloc(size_t numElems) { return maybe_pod_calloc(numElems); } + template T* pod_realloc(T* p, size_t oldSize, size_t newSize) { + return maybe_pod_realloc(p, oldSize, newSize); + } + void free_(void* p) { js_free(p); } + void reportAllocOverflow() const {} + bool checkSimulatedOOM() const { + return !js::oom::ShouldFailWithOOM(); + } +}; + +void ReportOutOfMemory(JSContext* cx); + +/* + * Allocation policy that calls the system memory functions and reports errors + * to the context. Since the JSContext given on construction is stored for + * the lifetime of the container, this policy may only be used for containers + * whose lifetime is a shorter than the given JSContext. + * + * FIXME bug 647103 - rewrite this in terms of temporary allocation functions, + * not the system ones. + */ +class TempAllocPolicy +{ + JSContext* const cx_; + + /* + * Non-inline helper to call JSRuntime::onOutOfMemory with minimal + * code bloat. + */ + JS_FRIEND_API(void*) onOutOfMemory(AllocFunction allocFunc, size_t nbytes, + void* reallocPtr = nullptr); + + template + T* onOutOfMemoryTyped(AllocFunction allocFunc, size_t numElems, void* reallocPtr = nullptr) { + size_t bytes; + if (MOZ_UNLIKELY(!CalculateAllocSize(numElems, &bytes))) + return nullptr; + return static_cast(onOutOfMemory(allocFunc, bytes, reallocPtr)); + } + + public: + MOZ_IMPLICIT TempAllocPolicy(JSContext* cx) : cx_(cx) {} + + template + T* maybe_pod_malloc(size_t numElems) { + return js_pod_malloc(numElems); + } + + template + T* maybe_pod_calloc(size_t numElems) { + return js_pod_calloc(numElems); + } + + template + T* maybe_pod_realloc(T* prior, size_t oldSize, size_t newSize) { + return js_pod_realloc(prior, oldSize, newSize); + } + + template + T* pod_malloc(size_t numElems) { + T* p = maybe_pod_malloc(numElems); + if (MOZ_UNLIKELY(!p)) + p = onOutOfMemoryTyped(AllocFunction::Malloc, numElems); + return p; + } + + template + T* pod_calloc(size_t numElems) { + T* p = maybe_pod_calloc(numElems); + if (MOZ_UNLIKELY(!p)) + p = onOutOfMemoryTyped(AllocFunction::Calloc, numElems); + return p; + } + + template + T* pod_realloc(T* prior, size_t oldSize, size_t newSize) { + T* p2 = maybe_pod_realloc(prior, oldSize, newSize); + if (MOZ_UNLIKELY(!p2)) + p2 = onOutOfMemoryTyped(AllocFunction::Realloc, newSize, prior); + return p2; + } + + void free_(void* p) { + js_free(p); + } + + JS_FRIEND_API(void) reportAllocOverflow() const; + + bool checkSimulatedOOM() const { + if (js::oom::ShouldFailWithOOM()) { + ReportOutOfMemory(cx_); + return false; + } + + return true; + } +}; + +} /* namespace js */ + +#endif /* jsalloc_h */ diff --git a/gecko/include/malloc_decls.h b/gecko/include/malloc_decls.h index 14dc340..5a1e361 100644 --- a/gecko/include/malloc_decls.h +++ b/gecko/include/malloc_decls.h @@ -53,9 +53,50 @@ MALLOC_DECL(malloc_good_size, size_t, size_t) # endif # if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC MALLOC_DECL_VOID(jemalloc_stats, jemalloc_stats_t *) +/* + * On some operating systems (Mac), we use madvise(MADV_FREE) to hand pages + * back to the operating system. On Mac, the operating system doesn't take + * this memory back immediately; instead, the OS takes it back only when the + * machine is running out of physical memory. + * + * This is great from the standpoint of efficiency, but it makes measuring our + * actual RSS difficult, because pages which we've MADV_FREE'd shouldn't count + * against our RSS. + * + * This function explicitly purges any MADV_FREE'd pages from physical memory, + * causing our reported RSS match the amount of memory we're actually using. + * + * Note that this call is expensive in two ways. First, it may be slow to + * execute, because it may make a number of slow syscalls to free memory. This + * function holds the big jemalloc locks, so basically all threads are blocked + * while this function runs. + * + * This function is also expensive in that the next time we go to access a page + * which we've just explicitly decommitted, the operating system has to attach + * to it a physical page! If we hadn't run this function, the OS would have + * less work to do. + * + * If MALLOC_DOUBLE_PURGE is not defined, this function does nothing. + */ MALLOC_DECL_VOID(jemalloc_purge_freed_pages) + +/* + * Free all unused dirty pages in all arenas. Calling this function will slow + * down subsequent allocations so it is recommended to use it only when + * memory needs to be reclaimed at all costs (see bug 805855). This function + * provides functionality similar to mallctl("arenas.purge") in jemalloc 3. + */ MALLOC_DECL_VOID(jemalloc_free_dirty_pages) + +/* + * Opt in or out of a thread local arena (bool argument is whether to opt-in + * (true) or out (false)). + */ MALLOC_DECL_VOID(jemalloc_thread_local_arena, bool) + +/* + * Provide information about any allocation enclosing the given address. + */ MALLOC_DECL_VOID(jemalloc_ptr_info, const void*, jemalloc_ptr_info_t*) # endif diff --git a/gecko/include/mozilla/Opaque.h b/gecko/include/mozilla/Opaque.h new file mode 100644 index 0000000..d7239ee --- /dev/null +++ b/gecko/include/mozilla/Opaque.h @@ -0,0 +1,44 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* An opaque integral type supporting only comparison operators. */ + +#ifndef mozilla_Opaque_h +#define mozilla_Opaque_h + +#include "mozilla/TypeTraits.h" + +namespace mozilla { + +/** + * Opaque is a replacement for integral T in cases where only comparisons + * must be supported, and it's desirable to prevent accidental dependency on + * exact values. + */ +template +class Opaque final +{ + static_assert(mozilla::IsIntegral::value, + "mozilla::Opaque only supports integral types"); + + T mValue; + +public: + Opaque() {} + explicit Opaque(T aValue) : mValue(aValue) {} + + bool operator==(const Opaque& aOther) const { + return mValue == aOther.mValue; + } + + bool operator!=(const Opaque& aOther) const { + return !(*this == aOther); + } +}; + +} // namespace mozilla + +#endif /* mozilla_Opaque_h */ diff --git a/gecko/include/mozilla/Telemetry.h b/gecko/include/mozilla/Telemetry.h index 3b31cbc..5e5d1b3 100644 --- a/gecko/include/mozilla/Telemetry.h +++ b/gecko/include/mozilla/Telemetry.h @@ -34,8 +34,8 @@ namespace HangMonitor { } // namespace HangMonitor namespace Telemetry { -struct Accumulation; -struct KeyedAccumulation; +struct HistogramAccumulation; +struct KeyedHistogramAccumulation; struct ScalarAction; struct KeyedScalarAction; struct ChildEventData; diff --git a/gecko/include/mozilla/TelemetryHistogramEnums.h b/gecko/include/mozilla/TelemetryHistogramEnums.h index abf9ead..79b26c0 100644 --- a/gecko/include/mozilla/TelemetryHistogramEnums.h +++ b/gecko/include/mozilla/TelemetryHistogramEnums.h @@ -1819,8 +1819,6 @@ enum HistogramID : uint32_t { USE_COUNTER2_DEPRECATED_ChromeUseOfDOM3LoadMethod_PAGE, USE_COUNTER2_DEPRECATED_ShowModalDialog_DOCUMENT, USE_COUNTER2_DEPRECATED_ShowModalDialog_PAGE, - USE_COUNTER2_DEPRECATED_Window_Content_DOCUMENT, - USE_COUNTER2_DEPRECATED_Window_Content_PAGE, USE_COUNTER2_DEPRECATED_SyncXMLHttpRequest_DOCUMENT, USE_COUNTER2_DEPRECATED_SyncXMLHttpRequest_PAGE, USE_COUNTER2_DEPRECATED_Window_Cc_ontrollers_DOCUMENT, diff --git a/gecko/include/mozilla/ThreadLocal.h b/gecko/include/mozilla/ThreadLocal.h index 0ec059c..7af386a 100644 --- a/gecko/include/mozilla/ThreadLocal.h +++ b/gecko/include/mozilla/ThreadLocal.h @@ -29,10 +29,6 @@ namespace detail { # endif #endif -#if defined(HAVE_THREAD_TLS_KEYWORD) || defined(XP_WIN) || defined(MACOSX_HAS_THREAD_LOCAL) -#define MOZ_HAS_THREAD_LOCAL -#endif - /* * Thread Local Storage helpers. * @@ -72,47 +68,120 @@ namespace detail { * // Get the TLS value * int value = tlsKey.get(); */ + +// Integral types narrower than void* must be extended to avoid +// warnings from valgrind on some platforms. This helper type +// achieves that without penalizing the common case of ThreadLocals +// instantiated using a pointer type. +template +struct Helper +{ + typedef uintptr_t Type; +}; + +template +struct Helper +{ + typedef S *Type; +}; + +#ifdef XP_WIN +/* Despite not being used for MOZ_THREAD_LOCAL, we expose an implementation for + * Windows for cases where it's not desirable to use thread_local */ template -class ThreadLocal +class ThreadLocalKeyStorage { -#ifndef MOZ_HAS_THREAD_LOCAL - typedef pthread_key_t key_t; - - // Integral types narrower than void* must be extended to avoid - // warnings from valgrind on some platforms. This helper type - // achieves that without penalizing the common case of ThreadLocals - // instantiated using a pointer type. - template - struct Helper - { - typedef uintptr_t Type; - }; - - template - struct Helper - { - typedef S *Type; - }; -#endif +public: + ThreadLocalKeyStorage() + : mKey(TLS_OUT_OF_INDEXES) + {} + + inline bool initialized() const { + return mKey != TLS_OUT_OF_INDEXES; + } + + inline void init() { + mKey = TlsAlloc(); + } + inline T get() const { + void* h = TlsGetValue(mKey); + return static_cast(reinterpret_cast::Type>(h)); + } + + inline bool set(const T aValue) { + void* h = reinterpret_cast(static_cast::Type>(aValue)); + return TlsSetValue(mKey, h); + } + +private: + unsigned long mKey; +}; +#else +template +class ThreadLocalKeyStorage +{ public: - // __thread does not allow non-trivial constructors, but we can - // instead rely on zero-initialization. -#ifndef MOZ_HAS_THREAD_LOCAL - ThreadLocal() + ThreadLocalKeyStorage() : mKey(0), mInited(false) {} -#endif - bool initialized() const { -#ifdef MOZ_HAS_THREAD_LOCAL - return true; -#else + inline bool initialized() const { return mInited; + } + + inline void init() { + mInited = !pthread_key_create(&mKey, nullptr); + } + + inline T get() const { + void* h = pthread_getspecific(mKey); + return static_cast(reinterpret_cast::Type>(h)); + } + + inline bool set(const T aValue) { + void* h = reinterpret_cast(static_cast::Type>(aValue)); + return !pthread_setspecific(mKey, h); + } + +private: + pthread_key_t mKey; + bool mInited; +}; #endif + +template +class ThreadLocalNativeStorage +{ +public: + // __thread does not allow non-trivial constructors, but we can + // instead rely on zero-initialization. + inline bool initialized() const { + return true; + } + + inline void init() { } + inline T get() const { + return mValue; + } + + inline bool set(const T aValue) { + mValue = aValue; + return true; + } + +private: + T mValue; +}; + +template class Storage> +class ThreadLocal: public Storage +{ +public: MOZ_MUST_USE inline bool init(); + void infallibleInit() { MOZ_RELEASE_ASSERT(init(), "Infallible TLS initialization failed"); } @@ -120,19 +189,11 @@ class ThreadLocal inline T get() const; inline void set(const T aValue); - -private: -#ifdef MOZ_HAS_THREAD_LOCAL - T mValue; -#else - key_t mKey; - bool mInited; -#endif }; -template +template class Storage> inline bool -ThreadLocal::init() +ThreadLocal::init() { static_assert(mozilla::IsPointer::value || mozilla::IsIntegral::value, "mozilla::ThreadLocal must be used with a pointer or " @@ -141,54 +202,37 @@ ThreadLocal::init() "mozilla::ThreadLocal can't be used for types larger than " "a pointer"); -#ifdef MOZ_HAS_THREAD_LOCAL - return true; -#else - if (!initialized()) { - mInited = !pthread_key_create(&mKey, nullptr); + if (!Storage::initialized()) { + Storage::init(); } - return mInited; -#endif + return Storage::initialized(); } -template +template class Storage> inline T -ThreadLocal::get() const +ThreadLocal::get() const { -#ifdef MOZ_HAS_THREAD_LOCAL - return mValue; -#else - MOZ_ASSERT(initialized()); - void* h; - h = pthread_getspecific(mKey); - return static_cast(reinterpret_cast::Type>(h)); -#endif + MOZ_ASSERT(Storage::initialized()); + return Storage::get(); } -template +template class Storage> inline void -ThreadLocal::set(const T aValue) +ThreadLocal::set(const T aValue) { -#ifdef MOZ_HAS_THREAD_LOCAL - mValue = aValue; -#else - MOZ_ASSERT(initialized()); - void* h = reinterpret_cast(static_cast::Type>(aValue)); - bool succeeded = !pthread_setspecific(mKey, h); + MOZ_ASSERT(Storage::initialized()); + bool succeeded = Storage::set(aValue); if (!succeeded) { MOZ_CRASH(); } -#endif } -#ifdef MOZ_HAS_THREAD_LOCAL #if defined(XP_WIN) || defined(MACOSX_HAS_THREAD_LOCAL) -#define MOZ_THREAD_LOCAL(TYPE) thread_local mozilla::detail::ThreadLocal -#else -#define MOZ_THREAD_LOCAL(TYPE) __thread mozilla::detail::ThreadLocal -#endif +#define MOZ_THREAD_LOCAL(TYPE) thread_local mozilla::detail::ThreadLocal +#elif defined(HAVE_THREAD_TLS_KEYWORD) +#define MOZ_THREAD_LOCAL(TYPE) __thread mozilla::detail::ThreadLocal #else -#define MOZ_THREAD_LOCAL(TYPE) mozilla::detail::ThreadLocal +#define MOZ_THREAD_LOCAL(TYPE) mozilla::detail::ThreadLocal #endif } // namespace detail diff --git a/gecko/include/mozilla/gfx/BasePoint3D.h b/gecko/include/mozilla/gfx/BasePoint3D.h index 41e3875..298e06c 100644 --- a/gecko/include/mozilla/gfx/BasePoint3D.h +++ b/gecko/include/mozilla/gfx/BasePoint3D.h @@ -114,6 +114,10 @@ struct BasePoint3D { void Normalize() { *this /= Length(); } + + friend std::ostream& operator<<(std::ostream& stream, const BasePoint3D& aPoint) { + return stream << '(' << aPoint.x << ',' << aPoint.y << ',' << aPoint.z << ')'; + } }; } // namespace gfx diff --git a/gecko/include/nsIPrincipal.h b/gecko/include/nsIPrincipal.h new file mode 100644 index 0000000..72f4d3e --- /dev/null +++ b/gecko/include/nsIPrincipal.h @@ -0,0 +1,425 @@ +/* + * DO NOT EDIT. THIS FILE IS GENERATED FROM ../../../dist/idl/nsIPrincipal.idl + */ + +#ifndef __gen_nsIPrincipal_h__ +#define __gen_nsIPrincipal_h__ + + +#ifndef __gen_nsISerializable_h__ +#include "nsISerializable.h" +#endif + +#include "js/Value.h" + +#include "mozilla/Assertions.h" +#include "mozilla/DebugOnly.h" + +/* For IDL files that don't want to include root IDL files. */ +#ifndef NS_NO_VTABLE +#define NS_NO_VTABLE +#endif +struct JSPrincipals; +#include "nsCOMPtr.h" +#include "nsTArray.h" +#include "mozilla/DebugOnly.h" +namespace mozilla { +class OriginAttributes; +} +/** + * Some methods have a fast path for the case when we're comparing a principal + * to itself. The situation may happen for example with about:blank documents. + */ +#define DECL_FAST_INLINE_HELPER(method_) \ + inline bool method_(nsIPrincipal* aOther) \ + { \ + mozilla::DebugOnly val = false; \ + MOZ_ASSERT_IF(this == aOther, \ + NS_SUCCEEDED(method_(aOther, &val)) && val); \ + \ + bool retVal = false; \ + return \ + this == aOther || \ + (NS_SUCCEEDED(method_(aOther, &retVal)) && retVal); \ + } +class nsIURI; /* forward declaration */ + +class nsIContentSecurityPolicy; /* forward declaration */ + +class nsIDOMDocument; /* forward declaration */ + + +/* starting interface: nsIPrincipal */ +#define NS_IPRINCIPAL_IID_STR "f75f502d-79fd-48be-a079-e5a7b8f80c8b" + +#define NS_IPRINCIPAL_IID \ + {0xf75f502d, 0x79fd, 0x48be, \ + { 0xa0, 0x79, 0xe5, 0xa7, 0xb8, 0xf8, 0x0c, 0x8b }} + +class nsIPrincipal : public nsISerializable { + public: + + NS_DECLARE_STATIC_IID_ACCESSOR(NS_IPRINCIPAL_IID) + + /* boolean equals (in nsIPrincipal other); */ + NS_IMETHOD Equals(nsIPrincipal *other, bool *_retval) = 0; + + /* boolean equalsConsideringDomain (in nsIPrincipal other); */ + NS_IMETHOD EqualsConsideringDomain(nsIPrincipal *other, bool *_retval) = 0; + + DECL_FAST_INLINE_HELPER(Equals) + DECL_FAST_INLINE_HELPER(EqualsConsideringDomain) + /* [noscript] readonly attribute unsigned long hashValue; */ + NS_IMETHOD GetHashValue(uint32_t *aHashValue) = 0; + + /* readonly attribute nsIURI URI; */ + NS_IMETHOD GetURI(nsIURI * *aURI) = 0; + + /* [noscript] attribute nsIURI domain; */ + NS_IMETHOD GetDomain(nsIURI * *aDomain) = 0; + NS_IMETHOD SetDomain(nsIURI *aDomain) = 0; + + /* boolean subsumes (in nsIPrincipal other); */ + NS_IMETHOD Subsumes(nsIPrincipal *other, bool *_retval) = 0; + + /* boolean subsumesConsideringDomain (in nsIPrincipal other); */ + NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal *other, bool *_retval) = 0; + + /* boolean subsumesConsideringDomainIgnoringFPD (in nsIPrincipal other); */ + NS_IMETHOD SubsumesConsideringDomainIgnoringFPD(nsIPrincipal *other, bool *_retval) = 0; + + DECL_FAST_INLINE_HELPER(Subsumes) + DECL_FAST_INLINE_HELPER(SubsumesConsideringDomain) + DECL_FAST_INLINE_HELPER(SubsumesConsideringDomainIgnoringFPD) +#undef DECL_FAST_INLINE_HELPER + /* void checkMayLoad (in nsIURI uri, in boolean report, in boolean allowIfInheritsPrincipal); */ + NS_IMETHOD CheckMayLoad(nsIURI *uri, bool report, bool allowIfInheritsPrincipal) = 0; + + /* [noscript] attribute nsIContentSecurityPolicy csp; */ + NS_IMETHOD GetCsp(nsIContentSecurityPolicy * *aCsp) = 0; + NS_IMETHOD SetCsp(nsIContentSecurityPolicy *aCsp) = 0; + + /* [noscript] nsIContentSecurityPolicy ensureCSP (in nsIDOMDocument aDocument); */ + NS_IMETHOD EnsureCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) = 0; + + /* [noscript] readonly attribute nsIContentSecurityPolicy preloadCsp; */ + NS_IMETHOD GetPreloadCsp(nsIContentSecurityPolicy * *aPreloadCsp) = 0; + + /* [noscript] nsIContentSecurityPolicy ensurePreloadCSP (in nsIDOMDocument aDocument); */ + NS_IMETHOD EnsurePreloadCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) = 0; + + /* readonly attribute AString cspJSON; */ + NS_IMETHOD GetCspJSON(nsAString & aCspJSON) = 0; + + /* [implicit_jscontext] readonly attribute jsval originAttributes; */ + NS_IMETHOD GetOriginAttributes(JSContext* cx, JS::MutableHandleValue aOriginAttributes) = 0; + + /* [binaryname(OriginAttributesRef),noscript,nostdcall,notxpcom] const_OriginAttributes OriginAttributesRef (); */ + virtual const mozilla::OriginAttributes & OriginAttributesRef(void) = 0; + + /* readonly attribute ACString origin; */ + NS_IMETHOD GetOrigin(nsACString & aOrigin) = 0; + + /* readonly attribute ACString originNoSuffix; */ + NS_IMETHOD GetOriginNoSuffix(nsACString & aOriginNoSuffix) = 0; + + /* readonly attribute AUTF8String originSuffix; */ + NS_IMETHOD GetOriginSuffix(nsACString & aOriginSuffix) = 0; + + /* readonly attribute ACString baseDomain; */ + NS_IMETHOD GetBaseDomain(nsACString & aBaseDomain) = 0; + + /* [infallible] readonly attribute unsigned long appId; */ + NS_IMETHOD GetAppId(uint32_t *aAppId) = 0; + inline uint32_t GetAppId() + { + uint32_t result; + mozilla::DebugOnly rv = GetAppId(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* readonly attribute AString addonId; */ + NS_IMETHOD GetAddonId(nsAString & aAddonId) = 0; + + /* readonly attribute nsISupports addonPolicy; */ + NS_IMETHOD GetAddonPolicy(nsISupports * *aAddonPolicy) = 0; + + /* [infallible] readonly attribute unsigned long userContextId; */ + NS_IMETHOD GetUserContextId(uint32_t *aUserContextId) = 0; + inline uint32_t GetUserContextId() + { + uint32_t result; + mozilla::DebugOnly rv = GetUserContextId(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* [infallible] readonly attribute unsigned long privateBrowsingId; */ + NS_IMETHOD GetPrivateBrowsingId(uint32_t *aPrivateBrowsingId) = 0; + inline uint32_t GetPrivateBrowsingId() + { + uint32_t result; + mozilla::DebugOnly rv = GetPrivateBrowsingId(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* [infallible] readonly attribute boolean isInIsolatedMozBrowserElement; */ + NS_IMETHOD GetIsInIsolatedMozBrowserElement(bool *aIsInIsolatedMozBrowserElement) = 0; + inline bool GetIsInIsolatedMozBrowserElement() + { + bool result; + mozilla::DebugOnly rv = GetIsInIsolatedMozBrowserElement(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* [infallible] readonly attribute boolean isNullPrincipal; */ + NS_IMETHOD GetIsNullPrincipal(bool *aIsNullPrincipal) = 0; + inline bool GetIsNullPrincipal() + { + bool result; + mozilla::DebugOnly rv = GetIsNullPrincipal(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* [infallible] readonly attribute boolean isCodebasePrincipal; */ + NS_IMETHOD GetIsCodebasePrincipal(bool *aIsCodebasePrincipal) = 0; + inline bool GetIsCodebasePrincipal() + { + bool result; + mozilla::DebugOnly rv = GetIsCodebasePrincipal(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* [infallible] readonly attribute boolean isExpandedPrincipal; */ + NS_IMETHOD GetIsExpandedPrincipal(bool *aIsExpandedPrincipal) = 0; + inline bool GetIsExpandedPrincipal() + { + bool result; + mozilla::DebugOnly rv = GetIsExpandedPrincipal(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + + /* [infallible] readonly attribute boolean isSystemPrincipal; */ + NS_IMETHOD GetIsSystemPrincipal(bool *aIsSystemPrincipal) = 0; + inline bool GetIsSystemPrincipal() + { + bool result; + mozilla::DebugOnly rv = GetIsSystemPrincipal(&result); + MOZ_ASSERT(NS_SUCCEEDED(rv)); + return result; + } + +}; + + NS_DEFINE_STATIC_IID_ACCESSOR(nsIPrincipal, NS_IPRINCIPAL_IID) + +/* Use this macro when declaring classes that implement this interface. */ +#define NS_DECL_NSIPRINCIPAL \ + NS_IMETHOD Equals(nsIPrincipal *other, bool *_retval) override; \ + NS_IMETHOD EqualsConsideringDomain(nsIPrincipal *other, bool *_retval) override; \ + NS_IMETHOD GetHashValue(uint32_t *aHashValue) override; \ + NS_IMETHOD GetURI(nsIURI * *aURI) override; \ + NS_IMETHOD GetDomain(nsIURI * *aDomain) override; \ + NS_IMETHOD SetDomain(nsIURI *aDomain) override; \ + NS_IMETHOD Subsumes(nsIPrincipal *other, bool *_retval) override; \ + NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal *other, bool *_retval) override; \ + NS_IMETHOD SubsumesConsideringDomainIgnoringFPD(nsIPrincipal *other, bool *_retval) override; \ + NS_IMETHOD CheckMayLoad(nsIURI *uri, bool report, bool allowIfInheritsPrincipal) override; \ + NS_IMETHOD GetCsp(nsIContentSecurityPolicy * *aCsp) override; \ + NS_IMETHOD SetCsp(nsIContentSecurityPolicy *aCsp) override; \ + NS_IMETHOD EnsureCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) override; \ + NS_IMETHOD GetPreloadCsp(nsIContentSecurityPolicy * *aPreloadCsp) override; \ + NS_IMETHOD EnsurePreloadCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) override; \ + NS_IMETHOD GetCspJSON(nsAString & aCspJSON) override; \ + NS_IMETHOD GetOriginAttributes(JSContext* cx, JS::MutableHandleValue aOriginAttributes) override; \ + virtual const mozilla::OriginAttributes & OriginAttributesRef(void) override; \ + NS_IMETHOD GetOrigin(nsACString & aOrigin) override; \ + NS_IMETHOD GetOriginNoSuffix(nsACString & aOriginNoSuffix) override; \ + NS_IMETHOD GetOriginSuffix(nsACString & aOriginSuffix) override; \ + NS_IMETHOD GetBaseDomain(nsACString & aBaseDomain) override; \ + using nsIPrincipal::GetAppId; \ + NS_IMETHOD GetAppId(uint32_t *aAppId) override; \ + NS_IMETHOD GetAddonId(nsAString & aAddonId) override; \ + NS_IMETHOD GetAddonPolicy(nsISupports * *aAddonPolicy) override; \ + using nsIPrincipal::GetUserContextId; \ + NS_IMETHOD GetUserContextId(uint32_t *aUserContextId) override; \ + using nsIPrincipal::GetPrivateBrowsingId; \ + NS_IMETHOD GetPrivateBrowsingId(uint32_t *aPrivateBrowsingId) override; \ + using nsIPrincipal::GetIsInIsolatedMozBrowserElement; \ + NS_IMETHOD GetIsInIsolatedMozBrowserElement(bool *aIsInIsolatedMozBrowserElement) override; \ + using nsIPrincipal::GetIsNullPrincipal; \ + NS_IMETHOD GetIsNullPrincipal(bool *aIsNullPrincipal) override; \ + using nsIPrincipal::GetIsCodebasePrincipal; \ + NS_IMETHOD GetIsCodebasePrincipal(bool *aIsCodebasePrincipal) override; \ + using nsIPrincipal::GetIsExpandedPrincipal; \ + NS_IMETHOD GetIsExpandedPrincipal(bool *aIsExpandedPrincipal) override; \ + using nsIPrincipal::GetIsSystemPrincipal; \ + NS_IMETHOD GetIsSystemPrincipal(bool *aIsSystemPrincipal) override; + +/* Use this macro when declaring the members of this interface when the + class doesn't implement the interface. This is useful for forwarding. */ +#define NS_DECL_NON_VIRTUAL_NSIPRINCIPAL \ + nsresult Equals(nsIPrincipal *other, bool *_retval); \ + nsresult EqualsConsideringDomain(nsIPrincipal *other, bool *_retval); \ + nsresult GetHashValue(uint32_t *aHashValue); \ + nsresult GetURI(nsIURI * *aURI); \ + nsresult GetDomain(nsIURI * *aDomain); \ + nsresult SetDomain(nsIURI *aDomain); \ + nsresult Subsumes(nsIPrincipal *other, bool *_retval); \ + nsresult SubsumesConsideringDomain(nsIPrincipal *other, bool *_retval); \ + nsresult SubsumesConsideringDomainIgnoringFPD(nsIPrincipal *other, bool *_retval); \ + nsresult CheckMayLoad(nsIURI *uri, bool report, bool allowIfInheritsPrincipal); \ + nsresult GetCsp(nsIContentSecurityPolicy * *aCsp); \ + nsresult SetCsp(nsIContentSecurityPolicy *aCsp); \ + nsresult EnsureCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval); \ + nsresult GetPreloadCsp(nsIContentSecurityPolicy * *aPreloadCsp); \ + nsresult EnsurePreloadCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval); \ + nsresult GetCspJSON(nsAString & aCspJSON); \ + nsresult GetOriginAttributes(JSContext* cx, JS::MutableHandleValue aOriginAttributes); \ + const mozilla::OriginAttributes & OriginAttributesRef(void); \ + nsresult GetOrigin(nsACString & aOrigin); \ + nsresult GetOriginNoSuffix(nsACString & aOriginNoSuffix); \ + nsresult GetOriginSuffix(nsACString & aOriginSuffix); \ + nsresult GetBaseDomain(nsACString & aBaseDomain); \ + using nsIPrincipal::GetAppId; \ + nsresult GetAppId(uint32_t *aAppId); \ + nsresult GetAddonId(nsAString & aAddonId); \ + nsresult GetAddonPolicy(nsISupports * *aAddonPolicy); \ + using nsIPrincipal::GetUserContextId; \ + nsresult GetUserContextId(uint32_t *aUserContextId); \ + using nsIPrincipal::GetPrivateBrowsingId; \ + nsresult GetPrivateBrowsingId(uint32_t *aPrivateBrowsingId); \ + using nsIPrincipal::GetIsInIsolatedMozBrowserElement; \ + nsresult GetIsInIsolatedMozBrowserElement(bool *aIsInIsolatedMozBrowserElement); \ + using nsIPrincipal::GetIsNullPrincipal; \ + nsresult GetIsNullPrincipal(bool *aIsNullPrincipal); \ + using nsIPrincipal::GetIsCodebasePrincipal; \ + nsresult GetIsCodebasePrincipal(bool *aIsCodebasePrincipal); \ + using nsIPrincipal::GetIsExpandedPrincipal; \ + nsresult GetIsExpandedPrincipal(bool *aIsExpandedPrincipal); \ + using nsIPrincipal::GetIsSystemPrincipal; \ + nsresult GetIsSystemPrincipal(bool *aIsSystemPrincipal); + +/* Use this macro to declare functions that forward the behavior of this interface to another object. */ +#define NS_FORWARD_NSIPRINCIPAL(_to) \ + NS_IMETHOD Equals(nsIPrincipal *other, bool *_retval) override { return _to Equals(other, _retval); } \ + NS_IMETHOD EqualsConsideringDomain(nsIPrincipal *other, bool *_retval) override { return _to EqualsConsideringDomain(other, _retval); } \ + NS_IMETHOD GetHashValue(uint32_t *aHashValue) override { return _to GetHashValue(aHashValue); } \ + NS_IMETHOD GetURI(nsIURI * *aURI) override { return _to GetURI(aURI); } \ + NS_IMETHOD GetDomain(nsIURI * *aDomain) override { return _to GetDomain(aDomain); } \ + NS_IMETHOD SetDomain(nsIURI *aDomain) override { return _to SetDomain(aDomain); } \ + NS_IMETHOD Subsumes(nsIPrincipal *other, bool *_retval) override { return _to Subsumes(other, _retval); } \ + NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal *other, bool *_retval) override { return _to SubsumesConsideringDomain(other, _retval); } \ + NS_IMETHOD SubsumesConsideringDomainIgnoringFPD(nsIPrincipal *other, bool *_retval) override { return _to SubsumesConsideringDomainIgnoringFPD(other, _retval); } \ + NS_IMETHOD CheckMayLoad(nsIURI *uri, bool report, bool allowIfInheritsPrincipal) override { return _to CheckMayLoad(uri, report, allowIfInheritsPrincipal); } \ + NS_IMETHOD GetCsp(nsIContentSecurityPolicy * *aCsp) override { return _to GetCsp(aCsp); } \ + NS_IMETHOD SetCsp(nsIContentSecurityPolicy *aCsp) override { return _to SetCsp(aCsp); } \ + NS_IMETHOD EnsureCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) override { return _to EnsureCSP(aDocument, _retval); } \ + NS_IMETHOD GetPreloadCsp(nsIContentSecurityPolicy * *aPreloadCsp) override { return _to GetPreloadCsp(aPreloadCsp); } \ + NS_IMETHOD EnsurePreloadCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) override { return _to EnsurePreloadCSP(aDocument, _retval); } \ + NS_IMETHOD GetCspJSON(nsAString & aCspJSON) override { return _to GetCspJSON(aCspJSON); } \ + NS_IMETHOD GetOriginAttributes(JSContext* cx, JS::MutableHandleValue aOriginAttributes) override { return _to GetOriginAttributes(cx, aOriginAttributes); } \ + virtual const mozilla::OriginAttributes & OriginAttributesRef(void) override { return _to OriginAttributesRef(); } \ + NS_IMETHOD GetOrigin(nsACString & aOrigin) override { return _to GetOrigin(aOrigin); } \ + NS_IMETHOD GetOriginNoSuffix(nsACString & aOriginNoSuffix) override { return _to GetOriginNoSuffix(aOriginNoSuffix); } \ + NS_IMETHOD GetOriginSuffix(nsACString & aOriginSuffix) override { return _to GetOriginSuffix(aOriginSuffix); } \ + NS_IMETHOD GetBaseDomain(nsACString & aBaseDomain) override { return _to GetBaseDomain(aBaseDomain); } \ + using nsIPrincipal::GetAppId; \ + NS_IMETHOD GetAppId(uint32_t *aAppId) override { return _to GetAppId(aAppId); } \ + NS_IMETHOD GetAddonId(nsAString & aAddonId) override { return _to GetAddonId(aAddonId); } \ + NS_IMETHOD GetAddonPolicy(nsISupports * *aAddonPolicy) override { return _to GetAddonPolicy(aAddonPolicy); } \ + using nsIPrincipal::GetUserContextId; \ + NS_IMETHOD GetUserContextId(uint32_t *aUserContextId) override { return _to GetUserContextId(aUserContextId); } \ + using nsIPrincipal::GetPrivateBrowsingId; \ + NS_IMETHOD GetPrivateBrowsingId(uint32_t *aPrivateBrowsingId) override { return _to GetPrivateBrowsingId(aPrivateBrowsingId); } \ + using nsIPrincipal::GetIsInIsolatedMozBrowserElement; \ + NS_IMETHOD GetIsInIsolatedMozBrowserElement(bool *aIsInIsolatedMozBrowserElement) override { return _to GetIsInIsolatedMozBrowserElement(aIsInIsolatedMozBrowserElement); } \ + using nsIPrincipal::GetIsNullPrincipal; \ + NS_IMETHOD GetIsNullPrincipal(bool *aIsNullPrincipal) override { return _to GetIsNullPrincipal(aIsNullPrincipal); } \ + using nsIPrincipal::GetIsCodebasePrincipal; \ + NS_IMETHOD GetIsCodebasePrincipal(bool *aIsCodebasePrincipal) override { return _to GetIsCodebasePrincipal(aIsCodebasePrincipal); } \ + using nsIPrincipal::GetIsExpandedPrincipal; \ + NS_IMETHOD GetIsExpandedPrincipal(bool *aIsExpandedPrincipal) override { return _to GetIsExpandedPrincipal(aIsExpandedPrincipal); } \ + using nsIPrincipal::GetIsSystemPrincipal; \ + NS_IMETHOD GetIsSystemPrincipal(bool *aIsSystemPrincipal) override { return _to GetIsSystemPrincipal(aIsSystemPrincipal); } + +/* Use this macro to declare functions that forward the behavior of this interface to another object in a safe way. */ +#define NS_FORWARD_SAFE_NSIPRINCIPAL(_to) \ + NS_IMETHOD Equals(nsIPrincipal *other, bool *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->Equals(other, _retval); } \ + NS_IMETHOD EqualsConsideringDomain(nsIPrincipal *other, bool *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->EqualsConsideringDomain(other, _retval); } \ + NS_IMETHOD GetHashValue(uint32_t *aHashValue) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetHashValue(aHashValue); } \ + NS_IMETHOD GetURI(nsIURI * *aURI) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetURI(aURI); } \ + NS_IMETHOD GetDomain(nsIURI * *aDomain) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetDomain(aDomain); } \ + NS_IMETHOD SetDomain(nsIURI *aDomain) override { return !_to ? NS_ERROR_NULL_POINTER : _to->SetDomain(aDomain); } \ + NS_IMETHOD Subsumes(nsIPrincipal *other, bool *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->Subsumes(other, _retval); } \ + NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal *other, bool *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->SubsumesConsideringDomain(other, _retval); } \ + NS_IMETHOD SubsumesConsideringDomainIgnoringFPD(nsIPrincipal *other, bool *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->SubsumesConsideringDomainIgnoringFPD(other, _retval); } \ + NS_IMETHOD CheckMayLoad(nsIURI *uri, bool report, bool allowIfInheritsPrincipal) override { return !_to ? NS_ERROR_NULL_POINTER : _to->CheckMayLoad(uri, report, allowIfInheritsPrincipal); } \ + NS_IMETHOD GetCsp(nsIContentSecurityPolicy * *aCsp) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetCsp(aCsp); } \ + NS_IMETHOD SetCsp(nsIContentSecurityPolicy *aCsp) override { return !_to ? NS_ERROR_NULL_POINTER : _to->SetCsp(aCsp); } \ + NS_IMETHOD EnsureCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->EnsureCSP(aDocument, _retval); } \ + NS_IMETHOD GetPreloadCsp(nsIContentSecurityPolicy * *aPreloadCsp) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetPreloadCsp(aPreloadCsp); } \ + NS_IMETHOD EnsurePreloadCSP(nsIDOMDocument *aDocument, nsIContentSecurityPolicy * *_retval) override { return !_to ? NS_ERROR_NULL_POINTER : _to->EnsurePreloadCSP(aDocument, _retval); } \ + NS_IMETHOD GetCspJSON(nsAString & aCspJSON) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetCspJSON(aCspJSON); } \ + NS_IMETHOD GetOriginAttributes(JSContext* cx, JS::MutableHandleValue aOriginAttributes) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetOriginAttributes(cx, aOriginAttributes); } \ + virtual const mozilla::OriginAttributes & OriginAttributesRef(void) override; \ + NS_IMETHOD GetOrigin(nsACString & aOrigin) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetOrigin(aOrigin); } \ + NS_IMETHOD GetOriginNoSuffix(nsACString & aOriginNoSuffix) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetOriginNoSuffix(aOriginNoSuffix); } \ + NS_IMETHOD GetOriginSuffix(nsACString & aOriginSuffix) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetOriginSuffix(aOriginSuffix); } \ + NS_IMETHOD GetBaseDomain(nsACString & aBaseDomain) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetBaseDomain(aBaseDomain); } \ + NS_IMETHOD GetAppId(uint32_t *aAppId) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetAppId(aAppId); } \ + NS_IMETHOD GetAddonId(nsAString & aAddonId) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetAddonId(aAddonId); } \ + NS_IMETHOD GetAddonPolicy(nsISupports * *aAddonPolicy) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetAddonPolicy(aAddonPolicy); } \ + NS_IMETHOD GetUserContextId(uint32_t *aUserContextId) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetUserContextId(aUserContextId); } \ + NS_IMETHOD GetPrivateBrowsingId(uint32_t *aPrivateBrowsingId) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetPrivateBrowsingId(aPrivateBrowsingId); } \ + NS_IMETHOD GetIsInIsolatedMozBrowserElement(bool *aIsInIsolatedMozBrowserElement) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetIsInIsolatedMozBrowserElement(aIsInIsolatedMozBrowserElement); } \ + NS_IMETHOD GetIsNullPrincipal(bool *aIsNullPrincipal) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetIsNullPrincipal(aIsNullPrincipal); } \ + NS_IMETHOD GetIsCodebasePrincipal(bool *aIsCodebasePrincipal) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetIsCodebasePrincipal(aIsCodebasePrincipal); } \ + NS_IMETHOD GetIsExpandedPrincipal(bool *aIsExpandedPrincipal) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetIsExpandedPrincipal(aIsExpandedPrincipal); } \ + NS_IMETHOD GetIsSystemPrincipal(bool *aIsSystemPrincipal) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetIsSystemPrincipal(aIsSystemPrincipal); } + + +/* starting interface: nsIExpandedPrincipal */ +#define NS_IEXPANDEDPRINCIPAL_IID_STR "f3e177df-6a5e-489f-80a7-2dd1481471d8" + +#define NS_IEXPANDEDPRINCIPAL_IID \ + {0xf3e177df, 0x6a5e, 0x489f, \ + { 0x80, 0xa7, 0x2d, 0xd1, 0x48, 0x14, 0x71, 0xd8 }} + +class NS_NO_VTABLE nsIExpandedPrincipal : public nsISupports { + public: + + NS_DECLARE_STATIC_IID_ACCESSOR(NS_IEXPANDEDPRINCIPAL_IID) + + /* [noscript] readonly attribute PrincipalArray whiteList; */ + NS_IMETHOD GetWhiteList(nsTArray > **aWhiteList) = 0; + +}; + + NS_DEFINE_STATIC_IID_ACCESSOR(nsIExpandedPrincipal, NS_IEXPANDEDPRINCIPAL_IID) + +/* Use this macro when declaring classes that implement this interface. */ +#define NS_DECL_NSIEXPANDEDPRINCIPAL \ + NS_IMETHOD GetWhiteList(nsTArray > **aWhiteList) override; + +/* Use this macro when declaring the members of this interface when the + class doesn't implement the interface. This is useful for forwarding. */ +#define NS_DECL_NON_VIRTUAL_NSIEXPANDEDPRINCIPAL \ + nsresult GetWhiteList(nsTArray > **aWhiteList); + +/* Use this macro to declare functions that forward the behavior of this interface to another object. */ +#define NS_FORWARD_NSIEXPANDEDPRINCIPAL(_to) \ + NS_IMETHOD GetWhiteList(nsTArray > **aWhiteList) override { return _to GetWhiteList(aWhiteList); } + +/* Use this macro to declare functions that forward the behavior of this interface to another object in a safe way. */ +#define NS_FORWARD_SAFE_NSIEXPANDEDPRINCIPAL(_to) \ + NS_IMETHOD GetWhiteList(nsTArray > **aWhiteList) override { return !_to ? NS_ERROR_NULL_POINTER : _to->GetWhiteList(aWhiteList); } + + +#endif /* __gen_nsIPrincipal_h__ */ diff --git a/gecko/include/nsISerializable.h b/gecko/include/nsISerializable.h new file mode 100644 index 0000000..21a17ac --- /dev/null +++ b/gecko/include/nsISerializable.h @@ -0,0 +1,66 @@ +/* + * DO NOT EDIT. THIS FILE IS GENERATED FROM ../../../dist/idl/nsISerializable.idl + */ + +#ifndef __gen_nsISerializable_h__ +#define __gen_nsISerializable_h__ + + +#ifndef __gen_nsISupports_h__ +#include "nsISupports.h" +#endif + +/* For IDL files that don't want to include root IDL files. */ +#ifndef NS_NO_VTABLE +#define NS_NO_VTABLE +#endif +class nsIObjectInputStream; /* forward declaration */ + +class nsIObjectOutputStream; /* forward declaration */ + + +/* starting interface: nsISerializable */ +#define NS_ISERIALIZABLE_IID_STR "91cca981-c26d-44a8-bebe-d9ed4891503a" + +#define NS_ISERIALIZABLE_IID \ + {0x91cca981, 0xc26d, 0x44a8, \ + { 0xbe, 0xbe, 0xd9, 0xed, 0x48, 0x91, 0x50, 0x3a }} + +class NS_NO_VTABLE nsISerializable : public nsISupports { + public: + + NS_DECLARE_STATIC_IID_ACCESSOR(NS_ISERIALIZABLE_IID) + + /* void read (in nsIObjectInputStream aInputStream); */ + NS_IMETHOD Read(nsIObjectInputStream *aInputStream) = 0; + + /* void write (in nsIObjectOutputStream aOutputStream); */ + NS_IMETHOD Write(nsIObjectOutputStream *aOutputStream) = 0; + +}; + + NS_DEFINE_STATIC_IID_ACCESSOR(nsISerializable, NS_ISERIALIZABLE_IID) + +/* Use this macro when declaring classes that implement this interface. */ +#define NS_DECL_NSISERIALIZABLE \ + NS_IMETHOD Read(nsIObjectInputStream *aInputStream) override; \ + NS_IMETHOD Write(nsIObjectOutputStream *aOutputStream) override; + +/* Use this macro when declaring the members of this interface when the + class doesn't implement the interface. This is useful for forwarding. */ +#define NS_DECL_NON_VIRTUAL_NSISERIALIZABLE \ + nsresult Read(nsIObjectInputStream *aInputStream); \ + nsresult Write(nsIObjectOutputStream *aOutputStream); + +/* Use this macro to declare functions that forward the behavior of this interface to another object. */ +#define NS_FORWARD_NSISERIALIZABLE(_to) \ + NS_IMETHOD Read(nsIObjectInputStream *aInputStream) override { return _to Read(aInputStream); } \ + NS_IMETHOD Write(nsIObjectOutputStream *aOutputStream) override { return _to Write(aOutputStream); } + +/* Use this macro to declare functions that forward the behavior of this interface to another object in a safe way. */ +#define NS_FORWARD_SAFE_NSISERIALIZABLE(_to) \ + NS_IMETHOD Read(nsIObjectInputStream *aInputStream) override { return !_to ? NS_ERROR_NULL_POINTER : _to->Read(aInputStream); } \ + NS_IMETHOD Write(nsIObjectOutputStream *aOutputStream) override { return !_to ? NS_ERROR_NULL_POINTER : _to->Write(aOutputStream); } + + +#endif /* __gen_nsISerializable_h__ */ diff --git a/gecko/include/nsProxyRelease.h b/gecko/include/nsProxyRelease.h new file mode 100644 index 0000000..bae15cf --- /dev/null +++ b/gecko/include/nsProxyRelease.h @@ -0,0 +1,415 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef nsProxyRelease_h__ +#define nsProxyRelease_h__ + +#include "nsIEventTarget.h" +#include "nsIThread.h" +#include "nsCOMPtr.h" +#include "nsAutoPtr.h" +#include "MainThreadUtils.h" +#include "nsPrintfCString.h" +#include "nsThreadUtils.h" +#include "mozilla/Likely.h" +#include "mozilla/Move.h" +#include "mozilla/SystemGroup.h" +#include "mozilla/TypeTraits.h" +#include "mozilla/Unused.h" + +#ifdef XPCOM_GLUE_AVOID_NSPR +#error NS_ProxyRelease implementation depends on NSPR. +#endif + +namespace detail { + +template +class ProxyReleaseEvent : public mozilla::Runnable +{ +public: + ProxyReleaseEvent(const char* aName, already_AddRefed aDoomed) + : Runnable(aName), mDoomed(aDoomed.take()) {} + + NS_IMETHOD Run() override + { + NS_IF_RELEASE(mDoomed); + return NS_OK; + } + + NS_IMETHOD GetName(nsACString& aName) override + { +#ifdef RELEASE_OR_BETA + aName.Truncate(); +#else + if (mName) { + aName.Append(nsPrintfCString("ProxyReleaseEvent for %s", mName)); + } else { + aName.AssignLiteral("ProxyReleaseEvent"); + } +#endif + return NS_OK; + } + +private: + T* MOZ_OWNING_REF mDoomed; +}; + +template +void +ProxyRelease(const char* aName, nsIEventTarget* aTarget, + already_AddRefed aDoomed, bool aAlwaysProxy) +{ + // Auto-managing release of the pointer. + RefPtr doomed = aDoomed; + nsresult rv; + + if (!doomed || !aTarget) { + return; + } + + if (!aAlwaysProxy) { + bool onCurrentThread = false; + rv = aTarget->IsOnCurrentThread(&onCurrentThread); + if (NS_SUCCEEDED(rv) && onCurrentThread) { + return; + } + } + + nsCOMPtr ev = new ProxyReleaseEvent(aName, doomed.forget()); + + rv = aTarget->Dispatch(ev, NS_DISPATCH_NORMAL); + if (NS_FAILED(rv)) { + NS_WARNING("failed to post proxy release event, leaking!"); + // It is better to leak the aDoomed object than risk crashing as + // a result of deleting it on the wrong thread. + } +} + +template +struct ProxyReleaseChooser +{ + template + static void ProxyRelease(const char* aName, + nsIEventTarget* aTarget, + already_AddRefed aDoomed, + bool aAlwaysProxy) + { + ::detail::ProxyRelease(aName, aTarget, mozilla::Move(aDoomed), aAlwaysProxy); + } +}; + +template<> +struct ProxyReleaseChooser +{ + // We need an intermediate step for handling classes with ambiguous + // inheritance to nsISupports. + template + static void ProxyRelease(const char* aName, + nsIEventTarget* aTarget, + already_AddRefed aDoomed, + bool aAlwaysProxy) + { + ProxyReleaseISupports(aName, aTarget, ToSupports(aDoomed.take()), aAlwaysProxy); + } + + static void ProxyReleaseISupports(const char* aName, + nsIEventTarget* aTarget, + nsISupports* aDoomed, + bool aAlwaysProxy); +}; + +} // namespace detail + +/** + * Ensures that the delete of a smart pointer occurs on the target thread. + * + * @param aName + * the labelling name of the runnable involved in the releasing + * @param aTarget + * the target thread where the doomed object should be released. + * @param aDoomed + * the doomed object; the object to be released on the target thread. + * @param aAlwaysProxy + * normally, if NS_ProxyRelease is called on the target thread, then the + * doomed object will be released directly. However, if this parameter is + * true, then an event will always be posted to the target thread for + * asynchronous release. + */ +template +inline NS_HIDDEN_(void) +NS_ProxyRelease(const char* aName, nsIEventTarget* aTarget, + already_AddRefed aDoomed, bool aAlwaysProxy = false) +{ + ::detail::ProxyReleaseChooser::value> + ::ProxyRelease(aName, aTarget, mozilla::Move(aDoomed), aAlwaysProxy); +} + +/** + * Ensures that the delete of a smart pointer occurs on the main thread. + * + * @param aName + * the labelling name of the runnable involved in the releasing + * @param aDoomed + * the doomed object; the object to be released on the main thread. + * @param aAlwaysProxy + * normally, if NS_ReleaseOnMainThreadSystemGroup is called on the main + * thread, then the doomed object will be released directly. However, if + * this parameter is true, then an event will always be posted to the + * main thread for asynchronous release. + */ +template +inline NS_HIDDEN_(void) +NS_ReleaseOnMainThreadSystemGroup(const char* aName, + already_AddRefed aDoomed, + bool aAlwaysProxy = false) +{ + // NS_ProxyRelease treats a null event target as "the current thread". So a + // handle on the main thread is only necessary when we're not already on the + // main thread or the release must happen asynchronously. + nsCOMPtr systemGroupEventTarget; + if (!NS_IsMainThread() || aAlwaysProxy) { + systemGroupEventTarget = mozilla::SystemGroup::EventTargetFor(mozilla::TaskCategory::Other); + + if (!systemGroupEventTarget) { + MOZ_ASSERT_UNREACHABLE("Could not get main thread; leaking an object!"); + mozilla::Unused << aDoomed.take(); + return; + } + } + + NS_ProxyRelease(aName, systemGroupEventTarget, mozilla::Move(aDoomed), + aAlwaysProxy); +} + +template +inline NS_HIDDEN_(void) +NS_ReleaseOnMainThreadSystemGroup(already_AddRefed aDoomed, + bool aAlwaysProxy = false) +{ + NS_ReleaseOnMainThreadSystemGroup("NS_ReleaseOnMainThreadSystemGroup", + mozilla::Move(aDoomed), aAlwaysProxy); +} + +/** + * Class to safely handle main-thread-only pointers off the main thread. + * + * Classes like XPCWrappedJS are main-thread-only, which means that it is + * forbidden to call methods on instances of these classes off the main thread. + * For various reasons (see bug 771074), this restriction recently began to + * apply to AddRef/Release as well. + * + * This presents a problem for consumers that wish to hold a callback alive + * on non-main-thread code. A common example of this is the proxy callback + * pattern, where non-main-thread code holds a strong-reference to the callback + * object, and dispatches new Runnables (also with a strong reference) to the + * main thread in order to execute the callback. This involves several AddRef + * and Release calls on the other thread, which is (now) verboten. + * + * The basic idea of this class is to introduce a layer of indirection. + * nsMainThreadPtrHolder is a threadsafe reference-counted class that internally + * maintains one strong reference to the main-thread-only object. It must be + * instantiated on the main thread (so that the AddRef of the underlying object + * happens on the main thread), but consumers may subsequently pass references + * to the holder anywhere they please. These references are meant to be opaque + * when accessed off-main-thread (assertions enforce this). + * + * The semantics of RefPtr > would be cumbersome, so + * we also introduce nsMainThreadPtrHandle, which is conceptually identical + * to the above (though it includes various convenience methods). The basic + * pattern is as follows. + * + * // On the main thread: + * nsCOMPtr callback = ...; + * nsMainThreadPtrHandle callbackHandle = + * new nsMainThreadPtrHolder(callback); + * // Pass callbackHandle to structs/classes that might be accessed on other + * // threads. + * + * All structs and classes that might be accessed on other threads should store + * an nsMainThreadPtrHandle rather than an nsCOMPtr. + */ +template +class nsMainThreadPtrHolder final +{ +public: + // We can only acquire a pointer on the main thread. We to fail fast for + // threading bugs, so by default we assert if our pointer is used or acquired + // off-main-thread. But some consumers need to use the same pointer for + // multiple classes, some of which are main-thread-only and some of which + // aren't. So we allow them to explicitly disable this strict checking. + nsMainThreadPtrHolder(const char* aName, T* aPtr, bool aStrict = true, + nsIEventTarget* aMainThreadEventTarget = nullptr) + : mRawPtr(nullptr) + , mStrict(aStrict) + , mMainThreadEventTarget(aMainThreadEventTarget) +#ifndef RELEASE_OR_BETA + , mName(aName) +#endif + { + // We can only AddRef our pointer on the main thread, which means that the + // holder must be constructed on the main thread. + MOZ_ASSERT(!mStrict || NS_IsMainThread()); + NS_IF_ADDREF(mRawPtr = aPtr); + } + nsMainThreadPtrHolder(const char* aName, already_AddRefed aPtr, + bool aStrict = true, + nsIEventTarget* aMainThreadEventTarget = nullptr) + : mRawPtr(aPtr.take()) + , mStrict(aStrict) + , mMainThreadEventTarget(aMainThreadEventTarget) +#ifndef RELEASE_OR_BETA + , mName(aName) +#endif + { + // Since we don't need to AddRef the pointer, this constructor is safe to + // call on any thread. + } + +private: + // We can be released on any thread. + ~nsMainThreadPtrHolder() + { + if (NS_IsMainThread()) { + NS_IF_RELEASE(mRawPtr); + } else if (mRawPtr) { + if (!mMainThreadEventTarget) { + mMainThreadEventTarget = do_GetMainThread(); + } + MOZ_ASSERT(mMainThreadEventTarget); + NS_ProxyRelease( +#ifdef RELEASE_OR_BETA + nullptr, +#else + mName, +#endif + mMainThreadEventTarget, dont_AddRef(mRawPtr)); + } + } + +public: + T* get() + { + // Nobody should be touching the raw pointer off-main-thread. + if (mStrict && MOZ_UNLIKELY(!NS_IsMainThread())) { + NS_ERROR("Can't dereference nsMainThreadPtrHolder off main thread"); + MOZ_CRASH(); + } + return mRawPtr; + } + + bool operator==(const nsMainThreadPtrHolder& aOther) const + { + return mRawPtr == aOther.mRawPtr; + } + bool operator!() const + { + return !mRawPtr; + } + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(nsMainThreadPtrHolder) + +private: + // Our wrapped pointer. + T* mRawPtr; + + // Whether to strictly enforce thread invariants in this class. + bool mStrict; + + nsCOMPtr mMainThreadEventTarget; + +#ifndef RELEASE_OR_BETA + const char* mName = nullptr; +#endif + + // Copy constructor and operator= not implemented. Once constructed, the + // holder is immutable. + T& operator=(nsMainThreadPtrHolder& aOther); + nsMainThreadPtrHolder(const nsMainThreadPtrHolder& aOther); +}; + +template +class nsMainThreadPtrHandle +{ + RefPtr> mPtr; + +public: + nsMainThreadPtrHandle() : mPtr(nullptr) {} + MOZ_IMPLICIT nsMainThreadPtrHandle(decltype(nullptr)) : mPtr(nullptr) {} + explicit nsMainThreadPtrHandle(nsMainThreadPtrHolder* aHolder) + : mPtr(aHolder) + { + } + explicit nsMainThreadPtrHandle( + already_AddRefed> aHolder) + : mPtr(aHolder) + { + } + nsMainThreadPtrHandle(const nsMainThreadPtrHandle& aOther) + : mPtr(aOther.mPtr) + { + } + nsMainThreadPtrHandle& operator=(const nsMainThreadPtrHandle& aOther) + { + mPtr = aOther.mPtr; + return *this; + } + nsMainThreadPtrHandle& operator=(nsMainThreadPtrHolder* aHolder) + { + mPtr = aHolder; + return *this; + } + + // These all call through to nsMainThreadPtrHolder, and thus implicitly + // assert that we're on the main thread. Off-main-thread consumers must treat + // these handles as opaque. + T* get() + { + if (mPtr) { + return mPtr.get()->get(); + } + return nullptr; + } + const T* get() const + { + if (mPtr) { + return mPtr.get()->get(); + } + return nullptr; + } + + operator T*() { return get(); } + T* operator->() MOZ_NO_ADDREF_RELEASE_ON_RETURN { return get(); } + + // These are safe to call on other threads with appropriate external locking. + bool operator==(const nsMainThreadPtrHandle& aOther) const + { + if (!mPtr || !aOther.mPtr) { + return mPtr == aOther.mPtr; + } + return *mPtr == *aOther.mPtr; + } + bool operator!=(const nsMainThreadPtrHandle& aOther) const + { + return !operator==(aOther); + } + bool operator==(decltype(nullptr)) const { return mPtr == nullptr; } + bool operator!=(decltype(nullptr)) const { return mPtr != nullptr; } + bool operator!() const { + return !mPtr || !*mPtr; + } +}; + +namespace mozilla { + +template +using PtrHolder = nsMainThreadPtrHolder; + +template +using PtrHandle = nsMainThreadPtrHandle; + +} // namespace mozilla + +#endif diff --git a/gecko/include/webaudio/AlignmentUtils.h b/gecko/include/webaudio/AlignmentUtils.h new file mode 100644 index 0000000..6b145a8 --- /dev/null +++ b/gecko/include/webaudio/AlignmentUtils.h @@ -0,0 +1,29 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AlignmentUtils_h__ +#define AlignmentUtils_h__ + +#define IS_ALIGNED16(ptr) ((((uintptr_t)ptr + 15) & ~0x0F) == (uintptr_t)ptr) + +#ifdef DEBUG + #define ASSERT_ALIGNED16(ptr) \ + MOZ_ASSERT(IS_ALIGNED16(ptr), \ + #ptr " has to be aligned to a 16 byte boundary"); +#else + #define ASSERT_ALIGNED16(ptr) +#endif + +#ifdef DEBUG + #define ASSERT_MULTIPLE16(v) \ + MOZ_ASSERT(v % 16 == 0, #v " has to be a a multiple of 16"); +#else + #define ASSERT_MULTIPLE16(v) +#endif + +#define ALIGNED16(ptr) (float*)(((uintptr_t)ptr + 15) & ~0x0F); + +#endif diff --git a/gecko/include/webaudio/AudioBlock.h b/gecko/include/webaudio/AudioBlock.h new file mode 100644 index 0000000..9fb1610 --- /dev/null +++ b/gecko/include/webaudio/AudioBlock.h @@ -0,0 +1,138 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIOBLOCK_H_ +#define MOZILLA_AUDIOBLOCK_H_ + +#include "AudioSegment.h" + +namespace mozilla { + +/** + * An AudioChunk whose buffer contents need to be valid only for one + * processing block iteration, after which contents can be overwritten if the + * buffer has not been passed to longer term storage or to another thread, + * which may happen though AsAudioChunk() or AsMutableChunk(). + * + * Use on graph thread only. + */ +class AudioBlock : private AudioChunk +{ +public: + AudioBlock() { + mDuration = WEBAUDIO_BLOCK_SIZE; + mBufferFormat = AUDIO_FORMAT_SILENCE; + } + // No effort is made in constructors to ensure that mBufferIsDownstreamRef + // is set because the block is expected to be a temporary and so the + // reference will be released before the next iteration. + // The custom copy constructor is required so as not to set + // mBufferIsDownstreamRef without notifying AudioBlockBuffer. + AudioBlock(const AudioBlock& aBlock) : AudioChunk(aBlock.AsAudioChunk()) {} + explicit AudioBlock(const AudioChunk& aChunk) + : AudioChunk(aChunk) + { + MOZ_ASSERT(aChunk.mDuration == WEBAUDIO_BLOCK_SIZE); + } + ~AudioBlock(); + + using AudioChunk::GetDuration; + using AudioChunk::IsNull; + using AudioChunk::ChannelCount; + using AudioChunk::ChannelData; + using AudioChunk::SizeOfExcludingThisIfUnshared; + using AudioChunk::SizeOfExcludingThis; + // mDuration is not exposed. Use GetDuration(). + // mBuffer is not exposed. Use SetBuffer(). + using AudioChunk::mChannelData; + using AudioChunk::mVolume; + using AudioChunk::mBufferFormat; + + const AudioChunk& AsAudioChunk() const { return *this; } + AudioChunk* AsMutableChunk() { + ClearDownstreamMark(); + return this; + } + + /** + * Allocates, if necessary, aChannelCount buffers of WEBAUDIO_BLOCK_SIZE float + * samples for writing. + */ + void AllocateChannels(uint32_t aChannelCount); + + /** + * ChannelFloatsForWrite() should only be used when the buffers have been + * created with AllocateChannels(). + */ + float* ChannelFloatsForWrite(size_t aChannel) + { + MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(CanWrite()); + return static_cast(const_cast(mChannelData[aChannel])); + } + + void SetBuffer(ThreadSharedObject* aNewBuffer); + void SetNull(StreamTime aDuration) { + MOZ_ASSERT(aDuration == WEBAUDIO_BLOCK_SIZE); + SetBuffer(nullptr); + mChannelData.Clear(); + mVolume = 1.0f; + mBufferFormat = AUDIO_FORMAT_SILENCE; + } + + AudioBlock& operator=(const AudioBlock& aBlock) { + // Instead of just copying, mBufferIsDownstreamRef must be first cleared + // if set. It is set again for the new mBuffer if possible. This happens + // in SetBuffer(). + return *this = aBlock.AsAudioChunk(); + } + AudioBlock& operator=(const AudioChunk& aChunk) { + MOZ_ASSERT(aChunk.mDuration == WEBAUDIO_BLOCK_SIZE); + SetBuffer(aChunk.mBuffer); + mChannelData = aChunk.mChannelData; + mVolume = aChunk.mVolume; + mBufferFormat = aChunk.mBufferFormat; + return *this; + } + + bool IsMuted() const { return mVolume == 0.0f; } + + bool IsSilentOrSubnormal() const + { + if (!mBuffer) { + return true; + } + + for (uint32_t i = 0, length = mChannelData.Length(); i < length; ++i) { + const float* channel = static_cast(mChannelData[i]); + for (StreamTime frame = 0; frame < mDuration; ++frame) { + if (fabs(channel[frame]) >= FLT_MIN) { + return false; + } + } + } + + return true; + } + +private: + void ClearDownstreamMark(); + bool CanWrite(); + + // mBufferIsDownstreamRef is set only when mBuffer references an + // AudioBlockBuffer created in a different AudioBlock. That can happen when + // this AudioBlock is on a node downstream from the node which created the + // buffer. When this is set, the AudioBlockBuffer is notified that this + // reference does not prevent the upstream node from re-using the buffer next + // iteration and modifying its contents. The AudioBlockBuffer is also + // notified when mBuffer releases this reference. + bool mBufferIsDownstreamRef = false; +}; + +} // namespace mozilla + +DECLARE_USE_COPY_CONSTRUCTORS(mozilla::AudioBlock) + +#endif // MOZILLA_AUDIOBLOCK_H_ diff --git a/gecko/include/webaudio/WebAudioUtils.h b/gecko/include/webaudio/WebAudioUtils.h new file mode 100644 index 0000000..3754a15 --- /dev/null +++ b/gecko/include/webaudio/WebAudioUtils.h @@ -0,0 +1,240 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WebAudioUtils_h_ +#define WebAudioUtils_h_ + +#include +#include +#include "mozilla/TypeTraits.h" +#include "mozilla/FloatingPoint.h" +#include "MediaSegment.h" + +// Forward declaration +typedef struct SpeexResamplerState_ SpeexResamplerState; + +namespace mozilla { + +class AudioNodeStream; + +extern LazyLogModule gWebAudioAPILog; +#define WEB_AUDIO_API_LOG(...) \ + MOZ_LOG(gWebAudioAPILog, LogLevel::Debug, (__VA_ARGS__)) + +namespace dom { + +struct AudioTimelineEvent; + +namespace WebAudioUtils { + // 32 is the minimum required by the spec for createBuffer() and + // createScriptProcessor() and matches what is used by Blink. The limit + // protects against large memory allocations. + const size_t MaxChannelCount = 32; + // AudioContext::CreateBuffer() "must support sample-rates in at least the + // range 22050 to 96000." + const uint32_t MinSampleRate = 8000; + const uint32_t MaxSampleRate = 192000; + + inline bool FuzzyEqual(float v1, float v2) + { + using namespace std; + return fabsf(v1 - v2) < 1e-7f; + } + inline bool FuzzyEqual(double v1, double v2) + { + using namespace std; + return fabs(v1 - v2) < 1e-7; + } + + /** + * Computes an exponential smoothing rate for a time based variable + * over aDuration seconds. + */ + inline double ComputeSmoothingRate(double aDuration, double aSampleRate) + { + return 1.0 - std::exp(-1.0 / (aDuration * aSampleRate)); + } + + /** + * Converts an AudioTimelineEvent's floating point time values to tick values + * with respect to a destination AudioNodeStream. + * + * This needs to be called for each AudioTimelineEvent that gets sent to an + * AudioNodeEngine, on the engine side where the AudioTimlineEvent is + * received. This means that such engines need to be aware of their + * destination streams as well. + */ + void ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent, + AudioNodeStream* aDest); + + /** + * Converts a linear value to decibels. Returns aMinDecibels if the linear + * value is 0. + */ + inline float ConvertLinearToDecibels(float aLinearValue, float aMinDecibels) + { + return aLinearValue ? 20.0f * std::log10(aLinearValue) : aMinDecibels; + } + + /** + * Converts a decibel value to a linear value. + */ + inline float ConvertDecibelsToLinear(float aDecibels) + { + return std::pow(10.0f, 0.05f * aDecibels); + } + + /** + * Converts a decibel to a linear value. + */ + inline float ConvertDecibelToLinear(float aDecibel) + { + return std::pow(10.0f, 0.05f * aDecibel); + } + + inline void FixNaN(double& aDouble) + { + if (IsNaN(aDouble) || IsInfinite(aDouble)) { + aDouble = 0.0; + } + } + + inline double DiscreteTimeConstantForSampleRate(double timeConstant, double sampleRate) + { + return 1.0 - std::exp(-1.0 / (sampleRate * timeConstant)); + } + + inline bool IsTimeValid(double aTime) + { + return aTime >= 0 && aTime <= (MEDIA_TIME_MAX >> TRACK_RATE_MAX_BITS); + } + + /** + * Converts a floating point value to an integral type in a safe and + * platform agnostic way. The following program demonstrates the kinds + * of ways things can go wrong depending on the CPU architecture you're + * compiling for: + * + * #include + * volatile float r; + * int main() + * { + * unsigned int q; + * r = 1e100; + * q = r; + * printf("%f %d\n", r, q); + * r = -1e100; + * q = r; + * printf("%f %d\n", r, q); + * r = 1e15; + * q = r; + * printf("%f %x\n", r, q); + * r = 0/0.; + * q = r; + * printf("%f %d\n", r, q); + * } + * + * This program, when compiled for unsigned int, generates the following + * results depending on the architecture: + * + * x86 and x86-64 + * --- + * inf 0 + * -inf 0 + * 999999995904.000000 -727384064 d4a50000 + * nan 0 + * + * ARM + * --- + * inf -1 + * -inf 0 + * 999999995904.000000 -1 + * nan 0 + * + * When compiled for int, this program generates the following results: + * + * x86 and x86-64 + * --- + * inf -2147483648 + * -inf -2147483648 + * 999999995904.000000 -2147483648 + * nan -2147483648 + * + * ARM + * --- + * inf 2147483647 + * -inf -2147483648 + * 999999995904.000000 2147483647 + * nan 0 + * + * Note that the caller is responsible to make sure that the value + * passed to this function is not a NaN. This function will abort if + * it sees a NaN. + */ + template + IntType TruncateFloatToInt(FloatType f) + { + using namespace std; + + static_assert(mozilla::IsIntegral::value == true, + "IntType must be an integral type"); + static_assert(mozilla::IsFloatingPoint::value == true, + "FloatType must be a floating point type"); + + if (mozilla::IsNaN(f)) { + // It is the responsibility of the caller to deal with NaN values. + // If we ever get to this point, we have a serious bug to fix. + MOZ_CRASH("We should never see a NaN here"); + } + + // If the floating point value is outside of the range of maximum + // integral value for this type, just clamp to the maximum value. + // The equality case must also return max() due to loss of precision when + // converting max() to float. + if (f >= FloatType(numeric_limits::max())) { + return numeric_limits::max(); + } + + if (f <= FloatType(numeric_limits::min())) { + // If the floating point value is outside of the range of minimum + // integral value for this type, just clamp to the minimum value. + return numeric_limits::min(); + } + + // Otherwise, this conversion must be well defined. + return IntType(f); + } + + void Shutdown(); + + int + SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const float* aIn, uint32_t* aInLen, + float* aOut, uint32_t* aOutLen); + + int + SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const int16_t* aIn, uint32_t* aInLen, + float* aOut, uint32_t* aOutLen); + + int + SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const int16_t* aIn, uint32_t* aInLen, + int16_t* aOut, uint32_t* aOutLen); + + void + LogToDeveloperConsole(uint64_t aWindowID, const char* aKey); + + } // namespace WebAudioUtils + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/gecko/src/dom/media/AudioChannelFormat.cpp b/gecko/src/dom/media/AudioChannelFormat.cpp new file mode 100644 index 0000000..2fd4dce --- /dev/null +++ b/gecko/src/dom/media/AudioChannelFormat.cpp @@ -0,0 +1,18 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioChannelFormat.h" + +#include + +namespace mozilla { + +uint32_t +GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2) +{ + return std::max(aChannels1, aChannels2); +} + +} // namespace mozilla diff --git a/gecko/src/dom/media/AudioSegment.cpp b/gecko/src/dom/media/AudioSegment.cpp new file mode 100644 index 0000000..b906f34 --- /dev/null +++ b/gecko/src/dom/media/AudioSegment.cpp @@ -0,0 +1,215 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioSegment.h" + +#include "AudioMixer.h" +#include "AudioChannelFormat.h" +#include "Latency.h" +#include + +namespace mozilla { + +const uint8_t SilentChannel::gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*SilentChannel::AUDIO_PROCESSING_FRAMES] = {0}; + +template<> +const float* SilentChannel::ZeroChannel() +{ + return reinterpret_cast(SilentChannel::gZeroChannel); +} + +template<> +const int16_t* SilentChannel::ZeroChannel() +{ + return reinterpret_cast(SilentChannel::gZeroChannel); +} + +void +AudioSegment::ApplyVolume(float aVolume) +{ + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + ci->mVolume *= aVolume; + } +} + +void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, uint32_t aInRate, uint32_t aOutRate) +{ + if (mChunks.IsEmpty()) { + return; + } + + MOZ_ASSERT(aResampler || IsNull(), "We can only be here without a resampler if this segment is null."); + + AudioSampleFormat format = AUDIO_FORMAT_SILENCE; + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + if (ci->mBufferFormat != AUDIO_FORMAT_SILENCE) { + format = ci->mBufferFormat; + } + } + + switch (format) { + // If the format is silence at this point, all the chunks are silent. The + // actual function we use does not matter, it's just a matter of changing + // the chunks duration. + case AUDIO_FORMAT_SILENCE: + case AUDIO_FORMAT_FLOAT32: + Resample(aResampler, aInRate, aOutRate); + break; + case AUDIO_FORMAT_S16: + Resample(aResampler, aInRate, aOutRate); + break; + default: + MOZ_ASSERT(false); + break; + } +} + +// This helps to to safely get a pointer to the position we want to start +// writing a planar audio buffer, depending on the channel and the offset in the +// buffer. +static AudioDataValue* +PointerForOffsetInChannel(AudioDataValue* aData, size_t aLengthSamples, + uint32_t aChannelCount, uint32_t aChannel, + uint32_t aOffsetSamples) +{ + size_t samplesPerChannel = aLengthSamples / aChannelCount; + size_t beginningOfChannel = samplesPerChannel * aChannel; + MOZ_ASSERT(aChannel * samplesPerChannel + aOffsetSamples < aLengthSamples, + "Offset request out of bounds."); + return aData + beginningOfChannel + aOffsetSamples; +} + +void +AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels, + uint32_t aSampleRate) +{ + AutoTArray + buf; + AutoTArray channelData; + uint32_t offsetSamples = 0; + uint32_t duration = GetDuration(); + + if (duration <= 0) { + MOZ_ASSERT(duration == 0); + return; + } + + uint32_t outBufferLength = duration * aOutputChannels; + buf.SetLength(outBufferLength); + + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + AudioChunk& c = *ci; + uint32_t frames = c.mDuration; + + // If the chunk is silent, simply write the right number of silence in the + // buffers. + if (c.mBufferFormat == AUDIO_FORMAT_SILENCE) { + for (uint32_t channel = 0; channel < aOutputChannels; channel++) { + AudioDataValue* ptr = + PointerForOffsetInChannel(buf.Elements(), outBufferLength, + aOutputChannels, channel, offsetSamples); + PodZero(ptr, frames); + } + } else { + // Othewise, we need to upmix or downmix appropriately, depending on the + // desired input and output channels. + channelData.SetLength(c.mChannelData.Length()); + for (uint32_t i = 0; i < channelData.Length(); ++i) { + channelData[i] = static_cast(c.mChannelData[i]); + } + if (channelData.Length() < aOutputChannels) { + // Up-mix. + AudioChannelsUpMix(&channelData, aOutputChannels, SilentChannel::ZeroChannel()); + for (uint32_t channel = 0; channel < aOutputChannels; channel++) { + AudioDataValue* ptr = + PointerForOffsetInChannel(buf.Elements(), outBufferLength, + aOutputChannels, channel, offsetSamples); + PodCopy(ptr, reinterpret_cast(channelData[channel]), + frames); + } + MOZ_ASSERT(channelData.Length() == aOutputChannels); + } else if (channelData.Length() > aOutputChannels) { + // Down mix. + AutoTArray outChannelPtrs; + outChannelPtrs.SetLength(aOutputChannels); + uint32_t offsetSamples = 0; + for (uint32_t channel = 0; channel < aOutputChannels; channel++) { + outChannelPtrs[channel] = + PointerForOffsetInChannel(buf.Elements(), outBufferLength, + aOutputChannels, channel, offsetSamples); + } + AudioChannelsDownMix(channelData, outChannelPtrs.Elements(), + aOutputChannels, frames); + } else { + // The channel count is already what we want, just copy it over. + for (uint32_t channel = 0; channel < aOutputChannels; channel++) { + AudioDataValue* ptr = + PointerForOffsetInChannel(buf.Elements(), outBufferLength, + aOutputChannels, channel, offsetSamples); + PodCopy(ptr, reinterpret_cast(channelData[channel]), + frames); + } + } + } + offsetSamples += frames; + } + + if (offsetSamples) { + MOZ_ASSERT(offsetSamples == outBufferLength / aOutputChannels, + "We forgot to write some samples?"); + aMixer.Mix(buf.Elements(), aOutputChannels, offsetSamples, aSampleRate); + } +} + +void +AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate) +{ + AutoTArray buf; + // Offset in the buffer that will be written to the mixer, in samples. + uint32_t offset = 0; + + if (GetDuration() <= 0) { + MOZ_ASSERT(GetDuration() == 0); + return; + } + + uint32_t outBufferLength = GetDuration() * aOutputChannels; + buf.SetLength(outBufferLength); + + + for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { + AudioChunk& c = *ci; + + switch (c.mBufferFormat) { + case AUDIO_FORMAT_S16: + WriteChunk(c, aOutputChannels, buf.Elements() + offset); + break; + case AUDIO_FORMAT_FLOAT32: + WriteChunk(c, aOutputChannels, buf.Elements() + offset); + break; + case AUDIO_FORMAT_SILENCE: + // The mixer is expecting interleaved data, so this is ok. + PodZero(buf.Elements() + offset, c.mDuration * aOutputChannels); + break; + default: + MOZ_ASSERT(false, "Not handled"); + } + + offset += c.mDuration * aOutputChannels; + + if (!c.mTimeStamp.IsNull()) { + TimeStamp now = TimeStamp::Now(); + // would be more efficient to c.mTimeStamp to ms on create time then pass here + LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID, + (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp); + } + } + + if (offset) { + aMixer.Mix(buf.Elements(), aOutputChannels, offset / aOutputChannels, aSampleRate); + } +} + +} // namespace mozilla diff --git a/gecko/src/dom/media/AudioStream.cpp b/gecko/src/dom/media/AudioStream.cpp index 77481cd..a52a1af 100644 --- a/gecko/src/dom/media/AudioStream.cpp +++ b/gecko/src/dom/media/AudioStream.cpp @@ -19,6 +19,9 @@ #include "nsPrintfCString.h" #include "gfxPrefs.h" #include "AudioConverter.h" +#if defined(XP_WIN) +#include "nsXULAppAPI.h" +#endif namespace mozilla { diff --git a/gecko/src/dom/media/CubebUtils.cpp b/gecko/src/dom/media/CubebUtils.cpp index 1295e1e..8645628 100644 --- a/gecko/src/dom/media/CubebUtils.cpp +++ b/gecko/src/dom/media/CubebUtils.cpp @@ -106,9 +106,9 @@ enum class CubebState { Shutdown } sCubebState = CubebState::Uninitialized; cubeb* sCubebContext; -double sVolumeScale = 1.0; -uint32_t sCubebPlaybackLatencyInMilliseconds = 1; -uint32_t sCubebMSGLatencyInFrames = 128; +double sVolumeScale; +uint32_t sCubebPlaybackLatencyInMilliseconds; +uint32_t sCubebMSGLatencyInFrames; bool sCubebPlaybackLatencyPrefSet; bool sCubebMSGLatencyPrefSet; bool sAudioStreamInitEverSucceeded = false; diff --git a/gecko/src/dom/media/webaudio/AudioBlock.cpp b/gecko/src/dom/media/webaudio/AudioBlock.cpp new file mode 100644 index 0000000..a362b85 --- /dev/null +++ b/gecko/src/dom/media/webaudio/AudioBlock.cpp @@ -0,0 +1,166 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioBlock.h" +#include "AlignmentUtils.h" + +namespace mozilla { + +/** + * Heap-allocated buffer of channels of 128-sample float arrays, with + * threadsafe refcounting. Typically you would allocate one of these, fill it + * in, and then treat it as immutable while it's shared. + * + * Downstream references are accounted specially so that the creator of the + * buffer can reuse and modify its contents next iteration if other references + * are all downstream temporary references held by AudioBlock. + * + * We guarantee 16 byte alignment of the channel data. + */ +class AudioBlockBuffer final : public ThreadSharedObject { +public: + + virtual AudioBlockBuffer* AsAudioBlockBuffer() override { return this; }; + + float* ChannelData(uint32_t aChannel) + { + float* base = reinterpret_cast(((uintptr_t)(this + 1) + 15) & ~0x0F); + ASSERT_ALIGNED16(base); + return base + aChannel * WEBAUDIO_BLOCK_SIZE; + } + + static already_AddRefed Create(uint32_t aChannelCount) + { + CheckedInt size = WEBAUDIO_BLOCK_SIZE; + size *= aChannelCount; + size *= sizeof(float); + size += sizeof(AudioBlockBuffer); + size += 15; //padding for alignment + if (!size.isValid()) { + MOZ_CRASH(); + } + + void* m = operator new(size.value()); + RefPtr p = new (m) AudioBlockBuffer(); + NS_ASSERTION((reinterpret_cast(p.get() + 1) - reinterpret_cast(p.get())) % 4 == 0, + "AudioBlockBuffers should be at least 4-byte aligned"); + return p.forget(); + } + + // Graph thread only. + void DownstreamRefAdded() { ++mDownstreamRefCount; } + void DownstreamRefRemoved() { + MOZ_ASSERT(mDownstreamRefCount > 0); + --mDownstreamRefCount; + } + // Whether this is shared by any owners that are not downstream. + // Called only from owners with a reference that is not a downstream + // reference. Graph thread only. + bool HasLastingShares() + { + // mRefCnt is atomic and so reading its value is defined even when + // modifications may happen on other threads. mDownstreamRefCount is + // not modified on any other thread. + // + // If all other references are downstream references (managed on this, the + // graph thread), then other threads are not using this buffer and cannot + // add further references. This method can safely return false. The + // buffer contents can be modified. + // + // If there are other references that are not downstream references, then + // this method will return true. The buffer will be assumed to be still + // in use and so will not be reused. + nsrefcnt count = mRefCnt; + // This test is strictly less than because the caller has a reference + // that is not a downstream reference. + MOZ_ASSERT(mDownstreamRefCount < count); + return count != mDownstreamRefCount + 1; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + AudioBlockBuffer() {} + ~AudioBlockBuffer() override { MOZ_ASSERT(mDownstreamRefCount == 0); } + + nsAutoRefCnt mDownstreamRefCount; +}; + +AudioBlock::~AudioBlock() +{ + ClearDownstreamMark(); +} + +void +AudioBlock::SetBuffer(ThreadSharedObject* aNewBuffer) +{ + if (aNewBuffer == mBuffer) { + return; + } + + ClearDownstreamMark(); + + mBuffer = aNewBuffer; + + if (!aNewBuffer) { + return; + } + + AudioBlockBuffer* buffer = aNewBuffer->AsAudioBlockBuffer(); + if (buffer) { + buffer->DownstreamRefAdded(); + mBufferIsDownstreamRef = true; + } +} + +void +AudioBlock::ClearDownstreamMark() { + if (mBufferIsDownstreamRef) { + mBuffer->AsAudioBlockBuffer()->DownstreamRefRemoved(); + mBufferIsDownstreamRef = false; + } +} + +bool +AudioBlock::CanWrite() { + // If mBufferIsDownstreamRef is set then the buffer is not ours to use. + // It may be in use by another node which is not downstream. + return !mBufferIsDownstreamRef && + !mBuffer->AsAudioBlockBuffer()->HasLastingShares(); +} + +void +AudioBlock::AllocateChannels(uint32_t aChannelCount) +{ + MOZ_ASSERT(mDuration == WEBAUDIO_BLOCK_SIZE); + + if (mBufferIsDownstreamRef) { + // This is not our buffer to re-use. + ClearDownstreamMark(); + } else if (mBuffer && ChannelCount() == aChannelCount) { + AudioBlockBuffer* buffer = mBuffer->AsAudioBlockBuffer(); + if (buffer && !buffer->HasLastingShares()) { + MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32); + // No need to allocate again. + mVolume = 1.0f; + return; + } + } + + RefPtr buffer = AudioBlockBuffer::Create(aChannelCount); + mChannelData.SetLength(aChannelCount); + for (uint32_t i = 0; i < aChannelCount; ++i) { + mChannelData[i] = buffer->ChannelData(i); + } + mBuffer = buffer.forget(); + mVolume = 1.0f; + mBufferFormat = AUDIO_FORMAT_FLOAT32; +} + +} // namespace mozilla diff --git a/gecko/src/media/libcubeb/src/cubeb_audiounit.cpp b/gecko/src/media/libcubeb/src/cubeb_audiounit.cpp index de966dd..1ac4306 100644 --- a/gecko/src/media/libcubeb/src/cubeb_audiounit.cpp +++ b/gecko/src/media/libcubeb/src/cubeb_audiounit.cpp @@ -673,12 +673,33 @@ audiounit_reinit_stream(cubeb_stream * stm, device_flags_value flags) return CUBEB_OK; } +static char const * +event_addr_to_string(AudioObjectPropertySelector selector) +{ + switch(selector) { + case kAudioHardwarePropertyDefaultOutputDevice: + return "kAudioHardwarePropertyDefaultOutputDevice"; + case kAudioHardwarePropertyDefaultInputDevice: + return "kAudioHardwarePropertyDefaultInputDevice"; + case kAudioDevicePropertyDeviceIsAlive: + return "kAudioDevicePropertyDeviceIsAlive"; + case kAudioDevicePropertyDataSource: + return "kAudioDevicePropertyDataSource"; + default: + return "Unknown"; + } +} + static OSStatus -audiounit_property_listener_callback(AudioObjectID /* id */, UInt32 address_count, +audiounit_property_listener_callback(AudioObjectID id, UInt32 address_count, const AudioObjectPropertyAddress * addresses, void * user) { cubeb_stream * stm = (cubeb_stream*) user; + if (stm->switching_device) { + LOG("Switching is already taking place. Skip Event %s for id=%d", event_addr_to_string(addresses[0].mSelector), id); + return noErr; + } stm->switching_device = true; device_flags_value switch_side = DEV_UKNOWN; @@ -686,23 +707,24 @@ audiounit_property_listener_callback(AudioObjectID /* id */, UInt32 address_coun for (UInt32 i = 0; i < address_count; i++) { switch(addresses[i].mSelector) { case kAudioHardwarePropertyDefaultOutputDevice: { - LOG("Event[%u] - mSelector == kAudioHardwarePropertyDefaultOutputDevice", (unsigned int) i); + LOG("Event[%u] - mSelector == kAudioHardwarePropertyDefaultOutputDevice for id=%d", (unsigned int) i, id); // Allow restart to choose the new default switch_side |= DEV_OUTPUT; } break; case kAudioHardwarePropertyDefaultInputDevice: { - LOG("Event[%u] - mSelector == kAudioHardwarePropertyDefaultInputDevice", (unsigned int) i); + LOG("Event[%u] - mSelector == kAudioHardwarePropertyDefaultInputDevice for id=%d", (unsigned int) i, id); // Allow restart to choose the new default switch_side |= DEV_INPUT; } break; case kAudioDevicePropertyDeviceIsAlive: { - LOG("Event[%u] - mSelector == kAudioDevicePropertyDeviceIsAlive", (unsigned int) i); + LOG("Event[%u] - mSelector == kAudioDevicePropertyDeviceIsAlive for id=%d", (unsigned int) i, id); // If this is the default input device ignore the event, // kAudioHardwarePropertyDefaultInputDevice will take care of the switch if (stm->input_device.flags & DEV_SYSTEM_DEFAULT) { LOG("It's the default input device, ignore the event"); + stm->switching_device = false; return noErr; } // Allow restart to choose the new default. Event register only for input. @@ -710,11 +732,13 @@ audiounit_property_listener_callback(AudioObjectID /* id */, UInt32 address_coun } break; case kAudioDevicePropertyDataSource: { - LOG("Event[%u] - mSelector == kAudioHardwarePropertyDataSource", (unsigned int) i); - return noErr; + LOG("Event[%u] - mSelector == kAudioHardwarePropertyDataSource for id=%d", (unsigned int) i, id); + switch_side |= DEV_INPUT; } + break; default: LOG("Event[%u] - mSelector == Unexpected Event id %d, return", (unsigned int) i, addresses[i].mSelector); + stm->switching_device = false; return noErr; } } diff --git a/import.py b/import.py index 0e402e6..b4895b3 100755 --- a/import.py +++ b/import.py @@ -13,20 +13,28 @@ # Imports Gecko code for building in Servo. header_files = [ + ("dom/media/AudioChannelFormat.h", "AudioChannelFormat.h"), ("dom/media/AudioConverter.h", "AudioConverter.h"), ("dom/media/AudioDeviceInfo.h", "mozilla/dom/AudioDeviceInfo.h"), + ("dom/media/AudioMixer.h", "AudioMixer.h"), ("dom/media/AudioSampleFormat.h", "AudioSampleFormat.h"), + ("dom/media/AudioSegment.h", "AudioSegment.h"), ("dom/media/AudioStream.h", "AudioStream.h"), ("dom/media/CubebUtils.h", "CubebUtils.h"), ("dom/media/Intervals.h", "Intervals.h"), + ("dom/media/Latency.h", "Latency.h"), ("dom/media/MediaData.h", "MediaData.h"), ("dom/media/MediaInfo.h", "MediaInfo.h"), + ("dom/media/MediaSegment.h", "MediaSegment.h"), ("dom/media/SharedBuffer.h", "SharedBuffer.h"), ("dom/media/StreamTracks.h", "StreamTracks.h"), ("dom/media/TimeUnits.h", "TimeUnits.h"), ("dom/media/TrackID.h", "TrackID.h"), ("dom/media/VideoLimits.h", "VideoLimits.h"), ("dom/media/VideoUtils.h", "VideoUtils.h"), + ("dom/media/webaudio/AlignmentUtils.h", "webaudio/AlignmentUtils.h"), + ("dom/media/webaudio/AudioBlock.h", "webaudio/AudioBlock.h"), + ("dom/media/webaudio/WebAudioUtils.h", "webaudio/WebAudioUtils.h"), ("gfx/2d/BaseCoord.h", "mozilla/gfx/BaseCoord.h"), ("gfx/2d/BaseMargin.h", "mozilla/gfx/BaseMargin.h"), ("gfx/2d/BasePoint.h", "mozilla/gfx/BasePoint.h"), @@ -43,7 +51,7 @@ ("gfx/2d/Types.h", "mozilla/gfx/Types.h"), ("gfx/layers/ImageTypes.h", "ImageTypes.h"), ("gfx/thebes/gfxPrefs.h", "gfxPrefs.h"), - ("intl/unicharutil/util/nsUnicharUtils.h", "nsUnicharUtils.h"), + ("intl/unicharutil/util/nsUnicharUtils.h", "nsUnicharUtils.h"), ("media/libcubeb/include/cubeb-stdint.h", "mozilla/media/libcubeb/include/cubeb-stdint.h"), ("media/libcubeb/include/cubeb.h", "mozilla/media/libcubeb/include/cubeb.h"), ("media/libcubeb/include/cubeb_export.h", "mozilla/media/libcubeb/include/cubeb_export.h"), @@ -113,6 +121,7 @@ ("mfbt/MemoryReporting.h", "mozilla/MemoryReporting.h"), ("mfbt/Move.h", "mozilla/Move.h"), ("mfbt/NotNull.h", "mozilla/NotNull.h"), + ("mfbt/Opaque.h", "mozilla/Opaque.h"), ("mfbt/OperatorNewExtensions.h", "mozilla/OperatorNewExtensions.h"), ("mfbt/Pair.h", "mozilla/Pair.h"), ("mfbt/PodOperations.h", "mozilla/PodOperations.h"), @@ -303,6 +312,7 @@ ("xpcom/threads/Mutex.h", "mozilla/Mutex.h"), ("xpcom/threads/nsICancelableRunnable.h", "nsICancelableRunnable.h"), ("xpcom/threads/nsILabelableRunnable.h", "nsILabelableRunnable.h"), + ("xpcom/threads/nsProxyRelease.h", "nsProxyRelease.h"), ("xpcom/threads/nsThread.h", "nsThread.h"), ("xpcom/threads/nsThreadUtils.h", "nsThreadUtils.h"), ("xpcom/threads/nsThreadManager.h", "nsThreadManager.h"), @@ -320,9 +330,12 @@ ] src_files = [ + "dom/media/AudioChannelFormat.cpp", + "dom/media/AudioSegment.cpp", "dom/media/AudioStream.cpp", "dom/media/CubebUtils.cpp", "dom/media/MediaInfo.cpp", + "dom/media/webaudio/AudioBlock.cpp", "media/libcubeb/src/cubeb.c", "media/libcubeb/src/cubeb_alsa.c", "media/libcubeb/src/cubeb_audiotrack.c", @@ -434,12 +447,21 @@ "cubeb/cubeb_export.h", "ErrorList.h", "js-config.h", + "js/GCAnnotations.h", + "js/GCAPI.h", + "js/GCPolicyAPI.h", "js/HeapAPI.h", + "js/HashTable.h", "js/ProfilingStack.h", "js/Result.h", + "js/RootingAPI.h", "js/TraceKind.h", + "js/TracingAPI.h", "js/TypeDecls.h", "js/Utility.h", + "js/UniquePtr.h", + "js/Value.h", + "jsalloc.h", "jsbytecode.h", "jscpucfg.h", "jsprototypes.h", @@ -463,9 +485,11 @@ "nsIObserver.h", "nsIPrefBranch.h", "nsIPrefService.h", + "nsIPrincipal.h", "nsIProperties.h", "nsIRunnable.h", "nsISerialEventTarget.h", + "nsISerializable.h", "nsIServiceManager.h", "nsISimpleEnumerator.h", "nsIStringBundle.h",