Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Apply extra patches to M118 #32

Merged
merged 3 commits into from
Apr 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions audio/audio_send_stream.cc
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,11 @@ void AudioSendStream::SetMuted(bool muted) {
channel_send_->SetInputMute(muted);
}

bool AudioSendStream::GetMuted() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
return channel_send_->InputMute();
}

webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
return GetStats(true);
}
Expand Down
1 change: 1 addition & 0 deletions audio/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ class AudioSendStream final : public webrtc::AudioSendStream,
int payload_frequency,
int event,
int duration_ms) override;
bool GetMuted() override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
Expand Down
64 changes: 55 additions & 9 deletions audio/audio_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,26 @@ void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
UpdateAudioTransportWithSendingStreams();

// Make sure recording is initialized; start recording if enabled.
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {
adm->StartRecording();
if (ShouldRecord()) {
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {

// TODO: Verify if the following windows only logic is still required.
#if defined(WEBRTC_WIN)
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
adm->StartPlayout();
}
#endif
adm->StartRecording();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
}
}
Expand All @@ -115,7 +127,8 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
auto count = sending_streams_.erase(stream);
RTC_DCHECK_EQ(1, count);
UpdateAudioTransportWithSendingStreams();
if (sending_streams_.empty()) {

if (!ShouldRecord()) {
config_.audio_device_module->StopRecording();
}
}
Expand Down Expand Up @@ -143,7 +156,7 @@ void AudioState::SetRecording(bool enabled) {
if (recording_enabled_ != enabled) {
recording_enabled_ = enabled;
if (enabled) {
if (!sending_streams_.empty()) {
if (ShouldRecord()) {
config_.audio_device_module->StartRecording();
}
} else {
Expand Down Expand Up @@ -203,6 +216,39 @@ void AudioState::UpdateNullAudioPollerState() {
null_audio_poller_.Stop();
}
}

void AudioState::OnMuteStreamChanged() {

auto* adm = config_.audio_device_module.get();
bool should_record = ShouldRecord();

if (should_record && !adm->Recording()) {
if (adm->InitRecording() == 0) {
adm->StartRecording();
}
} else if (!should_record && adm->Recording()) {
adm->StopRecording();
}
}

bool AudioState::ShouldRecord() {
// no streams to send
if (sending_streams_.empty()) {
return false;
}

int stream_count = sending_streams_.size();

int muted_count = 0;
for (const auto& kv : sending_streams_) {
if (kv.first->GetMuted()) {
muted_count++;
}
}

return muted_count != stream_count;
}

} // namespace internal

rtc::scoped_refptr<AudioState> AudioState::Create(
Expand Down
5 changes: 5 additions & 0 deletions audio/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ class AudioState : public webrtc::AudioState {

void SetStereoChannelSwapping(bool enable) override;

void OnMuteStreamChanged() override;

AudioDeviceModule* audio_device_module() {
RTC_DCHECK(config_.audio_device_module);
return config_.audio_device_module.get();
Expand All @@ -64,6 +66,9 @@ class AudioState : public webrtc::AudioState {
void UpdateAudioTransportWithSendingStreams();
void UpdateNullAudioPollerState() RTC_RUN_ON(&thread_checker_);

// Returns true when at least 1 stream exists and all streams are not muted.
bool ShouldRecord();

SequenceChecker thread_checker_;
SequenceChecker process_thread_checker_{SequenceChecker::kDetached};
const webrtc::AudioState::Config config_;
Expand Down
4 changes: 2 additions & 2 deletions audio/channel_send.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ class ChannelSend : public ChannelSendInterface,
// Muting, Volume and Level.
void SetInputMute(bool enable) override;

bool InputMute() const override;

// Stats.
ANAStats GetANAStatistics() const override;

Expand Down Expand Up @@ -161,8 +163,6 @@ class ChannelSend : public ChannelSendInterface,
size_t payloadSize,
int64_t absolute_capture_timestamp_ms) override;

bool InputMute() const;

int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp_without_offset,
Expand Down
2 changes: 2 additions & 0 deletions audio/channel_send.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ class ChannelSendInterface {
virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
virtual int GetTargetBitrate() const = 0;

virtual bool InputMute() const = 0;
virtual void SetInputMute(bool muted) = 0;

virtual void ProcessAndEncodeAudio(
Expand Down
1 change: 1 addition & 0 deletions call/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@ class AudioSendStream : public AudioSender {
int event,
int duration_ms) = 0;

virtual bool GetMuted() = 0;
virtual void SetMuted(bool muted) = 0;

virtual Stats GetStats() const = 0;
Expand Down
3 changes: 3 additions & 0 deletions call/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ class AudioState : public rtc::RefCountInterface {

virtual void SetStereoChannelSwapping(bool enable) = 0;

// Notify the AudioState that a stream updated it's mute state.
virtual void OnMuteStreamChanged() = 0;

static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);

Expand Down
3 changes: 3 additions & 0 deletions media/engine/webrtc_voice_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1707,6 +1707,9 @@ bool WebRtcVoiceSendChannel::MuteStream(uint32_t ssrc, bool muted) {
ap->set_output_will_be_muted(all_muted);
}

// Notify the AudioState that the mute state has updated.
engine_->audio_state()->OnMuteStreamChanged();

return true;
}

Expand Down
3 changes: 2 additions & 1 deletion media/engine/webrtc_voice_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,8 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
// Moved to public so WebRtcVoiceMediaChannel can access it.
webrtc::AudioState* audio_state();

private:
// Every option that is "set" will be applied. Every option not "set" will be
Expand All @@ -148,7 +150,6 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();

std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
Expand Down
4 changes: 4 additions & 0 deletions modules/audio_device/audio_device_data_observer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,10 @@ class ADMWrapper : public AudioDeviceModule, public AudioTransport {
}
#endif // WEBRTC_IOS

int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override {
return impl_->SetAudioDeviceSink(sink);
}

protected:
rtc::scoped_refptr<AudioDeviceModule> impl_;
AudioDeviceDataObserver* legacy_observer_ = nullptr;
Expand Down
4 changes: 4 additions & 0 deletions modules/audio_device/audio_device_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@ class AudioDeviceGeneric {
virtual int GetRecordAudioParameters(AudioParameters* params) const;
#endif // WEBRTC_IOS

virtual int32_t SetAudioDeviceSink(AudioDeviceSink* sink) { return -1; }
virtual int32_t GetPlayoutDevice() const { return -1; }
virtual int32_t GetRecordingDevice() const { return -1; }

virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;

virtual ~AudioDeviceGeneric() {}
Expand Down
46 changes: 39 additions & 7 deletions modules/audio_device/audio_device_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,17 @@ namespace webrtc {

rtc::scoped_refptr<AudioDeviceModule> AudioDeviceModule::Create(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory);
return AudioDeviceModule::CreateForTest(audio_layer, task_queue_factory, bypass_voice_processing);
}

// static
rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing) {
RTC_DLOG(LS_INFO) << __FUNCTION__;

// The "AudioDeviceModule::kWindowsCoreAudio2" audio layer has its own
Expand All @@ -92,7 +94,7 @@ rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(

// Create the generic reference counted (platform independent) implementation.
auto audio_device = rtc::make_ref_counted<AudioDeviceModuleImpl>(
audio_layer, task_queue_factory);
audio_layer, task_queue_factory, bypass_voice_processing);

// Ensure that the current platform is supported.
if (audio_device->CheckPlatform() == -1) {
Expand All @@ -115,17 +117,26 @@ rtc::scoped_refptr<AudioDeviceModuleForTest> AudioDeviceModule::CreateForTest(

AudioDeviceModuleImpl::AudioDeviceModuleImpl(
AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory)
: audio_layer_(audio_layer), audio_device_buffer_(task_queue_factory) {
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing)
: audio_layer_(audio_layer),
#if defined(WEBRTC_IOS)
bypass_voice_processing_(bypass_voice_processing),
#endif
audio_device_buffer_(task_queue_factory) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
}

AudioDeviceModuleImpl::AudioDeviceModuleImpl(
AudioLayer audio_layer,
std::unique_ptr<AudioDeviceGeneric> audio_device,
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing,
bool create_detached)
: audio_layer_(audio_layer),
#if defined(WEBRTC_IOS)
bypass_voice_processing_(bypass_voice_processing),
#endif
audio_device_buffer_(task_queue_factory, create_detached),
audio_device_(std::move(audio_device)) {
RTC_DLOG(LS_INFO) << __FUNCTION__;
Expand Down Expand Up @@ -240,7 +251,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
#if defined(WEBRTC_IOS)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false));
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/bypass_voice_processing_));
RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
}
// END #if defined(WEBRTC_IOS)
Expand Down Expand Up @@ -895,6 +906,27 @@ int AudioDeviceModuleImpl::GetRecordAudioParameters(
}
#endif // WEBRTC_IOS

int32_t AudioDeviceModuleImpl::SetAudioDeviceSink(AudioDeviceSink* sink) const {
RTC_LOG(LS_INFO) << __FUNCTION__ << "(" << sink << ")";
int32_t ok = audio_device_->SetAudioDeviceSink(sink);
RTC_LOG(LS_INFO) << "output: " << ok;
return ok;
}

int32_t AudioDeviceModuleImpl::GetPlayoutDevice() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
int32_t r = audio_device_->GetPlayoutDevice();
RTC_LOG(LS_INFO) << "output: " << r;
return r;
}

int32_t AudioDeviceModuleImpl::GetRecordingDevice() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
int32_t r = audio_device_->GetRecordingDevice();
RTC_LOG(LS_INFO) << "output: " << r;
return r;
}

AudioDeviceModuleImpl::PlatformType AudioDeviceModuleImpl::Platform() const {
RTC_LOG(LS_INFO) << __FUNCTION__;
return platform_type_;
Expand Down
24 changes: 22 additions & 2 deletions modules/audio_device/audio_device_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,15 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int32_t AttachAudioBuffer();

AudioDeviceModuleImpl(AudioLayer audio_layer,
TaskQueueFactory* task_queue_factory);
TaskQueueFactory* task_queue_factory,
bool bypass_voice_processing = false);
// If `create_detached` is true, created ADM can be used on another thread
// compared to the one on which it was created. It's useful for testing.
AudioDeviceModuleImpl(AudioLayer audio_layer,
std::unique_ptr<AudioDeviceGeneric> audio_device,
TaskQueueFactory* task_queue_factory,
bool create_detached);
bool bypass_voice_processing = false,
bool create_detached = false);
~AudioDeviceModuleImpl() override;

// Retrieve the currently utilized audio layer
Expand Down Expand Up @@ -155,6 +157,18 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
int GetRecordAudioParameters(AudioParameters* params) const override;
#endif // WEBRTC_IOS


int32_t SetAudioDeviceSink(AudioDeviceSink* sink) const override;
int32_t GetPlayoutDevice() const override;
int32_t GetRecordingDevice() const override;

#if defined(WEBRTC_ANDROID)
// Only use this acccessor for test purposes on Android.
AudioManager* GetAndroidAudioManagerForTest() {
return audio_manager_android_.get();
}
#endif

AudioDeviceBuffer* GetAudioDeviceBuffer() { return &audio_device_buffer_; }

int RestartPlayoutInternally() override { return -1; }
Expand All @@ -169,6 +183,12 @@ class AudioDeviceModuleImpl : public AudioDeviceModuleForTest {
AudioLayer audio_layer_;
PlatformType platform_type_ = kPlatformNotSupported;
bool initialized_ = false;
#if defined(WEBRTC_IOS)
bool bypass_voice_processing_;
#elif defined(WEBRTC_ANDROID)
// Should be declared first to ensure that it outlives other resources.
std::unique_ptr<AudioManager> audio_manager_android_;
#endif
AudioDeviceBuffer audio_device_buffer_;
std::unique_ptr<AudioDeviceGeneric> audio_device_;
};
Expand Down
Loading