diff --git a/AudioDeviceExample/AudioDevices/ExampleAVAudioEngineDevice.m b/AudioDeviceExample/AudioDevices/ExampleAVAudioEngineDevice.m index 4c02ab1f..41efd06f 100644 --- a/AudioDeviceExample/AudioDevices/ExampleAVAudioEngineDevice.m +++ b/AudioDeviceExample/AudioDevices/ExampleAVAudioEngineDevice.m @@ -7,8 +7,8 @@ #import "ExampleAVAudioEngineDevice.h" -// We want to get as close to 10 msec buffers as possible because this is what the media engine prefers. -static double const kPreferredIOBufferDuration = 0.01; +// We want to get as close to 20 millisecond buffers as possible because this is what the media engine prefers. +static double const kPreferredIOBufferDuration = 0.02; // We will use mono playback and recording where available. static size_t const kPreferredNumberOfChannels = 1; @@ -558,7 +558,7 @@ - (void)setupAVAudioSession { } /* - * We want to be as close as possible to the 10 millisecond buffer size that the media engine needs. If there is + * We will operate our graph at roughly double the duration that the media engine natively operates in. If there is * a mismatch then TwilioVideo will ensure that appropriately sized audio buffers are delivered. */ if (![session setPreferredIOBufferDuration:kPreferredIOBufferDuration error:&error]) { diff --git a/AudioDeviceExample/AudioDevices/ExampleCoreAudioDevice.m b/AudioDeviceExample/AudioDevices/ExampleCoreAudioDevice.m index b96a83ed..fadb1413 100644 --- a/AudioDeviceExample/AudioDevices/ExampleCoreAudioDevice.m +++ b/AudioDeviceExample/AudioDevices/ExampleCoreAudioDevice.m @@ -7,8 +7,8 @@ #import "ExampleCoreAudioDevice.h" -// We want to get as close to 10 msec buffers as possible because this is what the media engine prefers. -static double const kPreferredIOBufferDuration = 0.01; +// We want to get as close to 20 msec buffers as possible because this is what the media engine prefers. +static double const kPreferredIOBufferDuration = 0.02; // We will use stereo playback where available. Some audio routes may be restricted to mono only. static size_t const kPreferredNumberOfChannels = 2; // An audio sample is a signed 16-bit integer. @@ -245,7 +245,7 @@ - (void)setupAVAudioSession { } /* - * We want to be as close as possible to the 10 millisecond buffer size that the media engine needs. If there is + * We will operate our graph at roughly double the duration that the media engine natively operates in. If there is * a mismatch then TwilioVideo will ensure that appropriately sized audio buffers are delivered. */ if (![session setPreferredIOBufferDuration:kPreferredIOBufferDuration error:&error]) { diff --git a/CoViewingExample.xcodeproj/project.pbxproj b/CoViewingExample.xcodeproj/project.pbxproj new file mode 100644 index 00000000..bfbc9d7c --- /dev/null +++ b/CoViewingExample.xcodeproj/project.pbxproj @@ -0,0 +1,377 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 50; + objects = { + +/* Begin PBXBuildFile section */ + 8A34C1D52189333400F22BE9 /* ExampleAVPlayerAudioTap.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A34C1D42189333400F22BE9 /* ExampleAVPlayerAudioTap.swift */; }; + 8A34C1DA2189496A00F22BE9 /* ExampleAVPlayerAudioDevice.m in Sources */ = {isa = PBXBuildFile; fileRef = 8A34C1D92189496A00F22BE9 /* ExampleAVPlayerAudioDevice.m */; }; + 8A395E432187D2B200437980 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A395E422187D2B200437980 /* AppDelegate.swift */; }; + 8A395E452187D2B200437980 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A395E442187D2B200437980 /* ViewController.swift */; }; + 8A395E482187D2B200437980 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 8A395E462187D2B200437980 /* Main.storyboard */; }; + 8A395E4A2187D2B300437980 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A395E492187D2B300437980 /* Assets.xcassets */; }; + 8A395E4D2187D2B300437980 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 8A395E4B2187D2B300437980 /* LaunchScreen.storyboard */; }; + 8A395E552187D52400437980 /* ExampleAVPlayerView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A395E542187D52400437980 /* ExampleAVPlayerView.swift */; }; + 8A395E572187F04C00437980 /* ExampleAVPlayerSource.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A395E562187F04C00437980 /* ExampleAVPlayerSource.swift */; }; + 8AF48A832193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.m in Sources */ = {isa = PBXBuildFile; fileRef = 8AF48A822193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.m */; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 8A34C1D42189333400F22BE9 /* ExampleAVPlayerAudioTap.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ExampleAVPlayerAudioTap.swift; sourceTree = ""; }; + 8A34C1D72189496A00F22BE9 /* ExampleAVPlayerAudioDevice.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExampleAVPlayerAudioDevice.h; sourceTree = ""; }; + 8A34C1D82189496A00F22BE9 /* AudioDevices-Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "AudioDevices-Bridging-Header.h"; sourceTree = ""; }; + 8A34C1D92189496A00F22BE9 /* ExampleAVPlayerAudioDevice.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ExampleAVPlayerAudioDevice.m; sourceTree = ""; }; + 8A395E3F2187D2B200437980 /* CoViewingExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = CoViewingExample.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 8A395E422187D2B200437980 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + 8A395E442187D2B200437980 /* ViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewController.swift; sourceTree = ""; }; + 8A395E472187D2B200437980 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; + 8A395E492187D2B300437980 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + 8A395E4C2187D2B300437980 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; + 8A395E4E2187D2B300437980 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 8A395E542187D52400437980 /* ExampleAVPlayerView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ExampleAVPlayerView.swift; sourceTree = ""; }; + 8A395E562187F04C00437980 /* ExampleAVPlayerSource.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ExampleAVPlayerSource.swift; sourceTree = ""; }; + 8AF48A812193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ExampleAVPlayerProcessingTap.h; sourceTree = ""; }; + 8AF48A822193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ExampleAVPlayerProcessingTap.m; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 8A395E3C2187D2B200437980 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 8A34C1D62189496A00F22BE9 /* AudioDevices */ = { + isa = PBXGroup; + children = ( + 8A34C1D82189496A00F22BE9 /* AudioDevices-Bridging-Header.h */, + 8A34C1D72189496A00F22BE9 /* ExampleAVPlayerAudioDevice.h */, + 8A34C1D92189496A00F22BE9 /* ExampleAVPlayerAudioDevice.m */, + 8AF48A812193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.h */, + 8AF48A822193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.m */, + ); + path = AudioDevices; + sourceTree = ""; + }; + 8A395E362187D2B200437980 = { + isa = PBXGroup; + children = ( + 8A395E412187D2B200437980 /* CoViewingExample */, + 8A395E402187D2B200437980 /* Products */, + ); + sourceTree = ""; + }; + 8A395E402187D2B200437980 /* Products */ = { + isa = PBXGroup; + children = ( + 8A395E3F2187D2B200437980 /* CoViewingExample.app */, + ); + name = Products; + sourceTree = ""; + }; + 8A395E412187D2B200437980 /* CoViewingExample */ = { + isa = PBXGroup; + children = ( + 8A395E422187D2B200437980 /* AppDelegate.swift */, + 8A34C1D62189496A00F22BE9 /* AudioDevices */, + 8A34C1D42189333400F22BE9 /* ExampleAVPlayerAudioTap.swift */, + 8A395E562187F04C00437980 /* ExampleAVPlayerSource.swift */, + 8A395E542187D52400437980 /* ExampleAVPlayerView.swift */, + 8A395E442187D2B200437980 /* ViewController.swift */, + 8A395E462187D2B200437980 /* Main.storyboard */, + 8A395E492187D2B300437980 /* Assets.xcassets */, + 8A395E4B2187D2B300437980 /* LaunchScreen.storyboard */, + 8A395E4E2187D2B300437980 /* Info.plist */, + ); + path = CoViewingExample; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 8A395E3E2187D2B200437980 /* CoViewingExample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 8A395E512187D2B300437980 /* Build configuration list for PBXNativeTarget "CoViewingExample" */; + buildPhases = ( + 8A395E3B2187D2B200437980 /* Sources */, + 8A395E3C2187D2B200437980 /* Frameworks */, + 8A395E3D2187D2B200437980 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = CoViewingExample; + productName = CoViewingExample; + productReference = 8A395E3F2187D2B200437980 /* CoViewingExample.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 8A395E372187D2B200437980 /* Project object */ = { + isa = PBXProject; + attributes = { + LastSwiftUpdateCheck = 1000; + LastUpgradeCheck = 1000; + ORGANIZATIONNAME = "Twilio Inc."; + TargetAttributes = { + 8A395E3E2187D2B200437980 = { + CreatedOnToolsVersion = 10.0; + }; + }; + }; + buildConfigurationList = 8A395E3A2187D2B200437980 /* Build configuration list for PBXProject "CoViewingExample" */; + compatibilityVersion = "Xcode 9.3"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 8A395E362187D2B200437980; + productRefGroup = 8A395E402187D2B200437980 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8A395E3E2187D2B200437980 /* CoViewingExample */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 8A395E3D2187D2B200437980 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8A395E4D2187D2B300437980 /* LaunchScreen.storyboard in Resources */, + 8A395E4A2187D2B300437980 /* Assets.xcassets in Resources */, + 8A395E482187D2B200437980 /* Main.storyboard in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 8A395E3B2187D2B200437980 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8A395E452187D2B200437980 /* ViewController.swift in Sources */, + 8A395E432187D2B200437980 /* AppDelegate.swift in Sources */, + 8A395E552187D52400437980 /* ExampleAVPlayerView.swift in Sources */, + 8A34C1D52189333400F22BE9 /* ExampleAVPlayerAudioTap.swift in Sources */, + 8AF48A832193FC5B007B1A84 /* ExampleAVPlayerProcessingTap.m in Sources */, + 8A34C1DA2189496A00F22BE9 /* ExampleAVPlayerAudioDevice.m in Sources */, + 8A395E572187F04C00437980 /* ExampleAVPlayerSource.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXVariantGroup section */ + 8A395E462187D2B200437980 /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 8A395E472187D2B200437980 /* Base */, + ); + name = Main.storyboard; + sourceTree = ""; + }; + 8A395E4B2187D2B300437980 /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 8A395E4C2187D2B300437980 /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 8A395E4F2187D2B300437980 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "c++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_IDENTITY = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 11.0; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + 8A395E502187D2B300437980 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "c++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + CODE_SIGN_IDENTITY = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 11.0; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + SDKROOT = iphoneos; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 8A395E522187D2B300437980 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CODE_SIGN_STYLE = Automatic; + DEVELOPMENT_TEAM = SX5J6BN2KX; + INFOPLIST_FILE = CoViewingExample/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.twilio.CoViewingExample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_OBJC_BRIDGING_HEADER = "CoViewingExample/AudioDevices/AudioDevices-Bridging-Header.h"; + SWIFT_VERSION = 4.2; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 8A395E532187D2B300437980 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CODE_SIGN_STYLE = Automatic; + DEVELOPMENT_TEAM = SX5J6BN2KX; + INFOPLIST_FILE = CoViewingExample/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.twilio.CoViewingExample; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_OBJC_BRIDGING_HEADER = "CoViewingExample/AudioDevices/AudioDevices-Bridging-Header.h"; + SWIFT_VERSION = 4.2; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 8A395E3A2187D2B200437980 /* Build configuration list for PBXProject "CoViewingExample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8A395E4F2187D2B300437980 /* Debug */, + 8A395E502187D2B300437980 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 8A395E512187D2B300437980 /* Build configuration list for PBXNativeTarget "CoViewingExample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 8A395E522187D2B300437980 /* Debug */, + 8A395E532187D2B300437980 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 8A395E372187D2B200437980 /* Project object */; +} diff --git a/CoViewingExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/CoViewingExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 00000000..77a38387 --- /dev/null +++ b/CoViewingExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/CoViewingExample.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/CoViewingExample.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 00000000..18d98100 --- /dev/null +++ b/CoViewingExample.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ + + + + + IDEDidComputeMac32BitWarning + + + diff --git a/CoViewingExample/AppDelegate.swift b/CoViewingExample/AppDelegate.swift new file mode 100644 index 00000000..b448d8f3 --- /dev/null +++ b/CoViewingExample/AppDelegate.swift @@ -0,0 +1,59 @@ +// +// AppDelegate.swift +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +import UIKit + +@UIApplicationMain +class AppDelegate: UIResponder, UIApplicationDelegate { + + var window: UIWindow? + + + func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool { + print("didFinishLaunchingWithOptions:", launchOptions as Any) + if let options = launchOptions, + let videoUrl = options[UIApplication.LaunchOptionsKey.url] as? URL { + let rootVC = window?.rootViewController as! ViewController + rootVC.startPresenter(contentUrl: videoUrl) + } + return true + } + + func application(_ app: UIApplication, open url: URL, options: [UIApplication.OpenURLOptionsKey : Any] = [:]) -> Bool { + print("app:openURL:", url, " options:", options as Any) + + let rootVC = window?.rootViewController as! ViewController + rootVC.startPresenter(contentUrl: url) + + return true + } + + func applicationWillResignActive(_ application: UIApplication) { + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game. + } + + func applicationDidEnterBackground(_ application: UIApplication) { + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. + } + + func applicationWillEnterForeground(_ application: UIApplication) { + // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background. + } + + func applicationDidBecomeActive(_ application: UIApplication) { + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. + } + + func applicationWillTerminate(_ application: UIApplication) { + // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. + } + + +} + diff --git a/CoViewingExample/Assets.xcassets/AppIcon.appiconset/Contents.json b/CoViewingExample/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 00000000..d8db8d65 --- /dev/null +++ b/CoViewingExample/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,98 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "size" : "20x20", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "20x20", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "3x" + }, + { + "idiom" : "ipad", + "size" : "20x20", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "20x20", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "40x40", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "76x76", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "76x76", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "83.5x83.5", + "scale" : "2x" + }, + { + "idiom" : "ios-marketing", + "size" : "1024x1024", + "scale" : "1x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/CoViewingExample/Assets.xcassets/Contents.json b/CoViewingExample/Assets.xcassets/Contents.json new file mode 100644 index 00000000..da4a164c --- /dev/null +++ b/CoViewingExample/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/CoViewingExample/AudioDevices/AudioDevices-Bridging-Header.h b/CoViewingExample/AudioDevices/AudioDevices-Bridging-Header.h new file mode 100644 index 00000000..e759f7ce --- /dev/null +++ b/CoViewingExample/AudioDevices/AudioDevices-Bridging-Header.h @@ -0,0 +1,8 @@ +// +// AudioDevices-Bridging-Header.h +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +#import "ExampleAVPlayerAudioDevice.h" diff --git a/CoViewingExample/AudioDevices/ExampleAVPlayerAudioDevice.h b/CoViewingExample/AudioDevices/ExampleAVPlayerAudioDevice.h new file mode 100644 index 00000000..d554fd38 --- /dev/null +++ b/CoViewingExample/AudioDevices/ExampleAVPlayerAudioDevice.h @@ -0,0 +1,29 @@ +// +// ExampleAVPlayerAudioDevice.h +// CoViewingExample +// +// Copyright © 2018 Twilio, Inc. All rights reserved. +// + +#import + +/* + * ExampleAVPlayerAudioDevice uses a VoiceProcessingIO audio unit to play audio from an MTAudioProcessingTap + * attached to an AVPlayerItem. The AVPlayer audio is mixed with Room audio provided by Twilio. + * The microphone input, and MTAudioProcessingTap output are mixed into a single recorded stream. + */ +@interface ExampleAVPlayerAudioDevice : NSObject + +- (void)audioTapDidPrepare; + +- (void)startAudioTapAtTime:(CMTime)startTime; + +/* + * Creates a processing tap bound to the device instance. + * + * @return An `MTAudioProcessingTap`, or NULL if there is an error. The caller assumes all ownership + * of the tap, and should call CFRelease when they are finished with it. + */ +- (nullable MTAudioProcessingTapRef)createProcessingTap; + +@end diff --git a/CoViewingExample/AudioDevices/ExampleAVPlayerAudioDevice.m b/CoViewingExample/AudioDevices/ExampleAVPlayerAudioDevice.m new file mode 100644 index 00000000..49ffd72f --- /dev/null +++ b/CoViewingExample/AudioDevices/ExampleAVPlayerAudioDevice.m @@ -0,0 +1,1229 @@ +// +// ExampleAVPlayerAudioDevice.m +// CoViewingExample +// +// Copyright © 2018 Twilio, Inc. All rights reserved. +// + +#import "ExampleAVPlayerAudioDevice.h" + +#import "ExampleAVPlayerProcessingTap.h" +#import "TPCircularBuffer+AudioBufferList.h" + +// We want to get as close to 20 msec buffers as possible, to match the behavior of TVIDefaultAudioDevice. +static double const kPreferredIOBufferDuration = 0.02; +// We will use stereo playback where available. Some audio routes may be restricted to mono only. +static size_t const kPreferredNumberOfChannels = 2; +static size_t const kPreferredNumberOfInputChannels = 1; +// An audio sample is a signed 16-bit integer. +static size_t const kAudioSampleSize = sizeof(SInt16); +static uint32_t const kPreferredSampleRate = 48000; + +typedef struct ExampleAVPlayerRendererContext { + // Used to pull audio from the media engine. + TVIAudioDeviceContext deviceContext; + size_t expectedFramesPerBuffer; + size_t maxFramesPerBuffer; + + // The buffer of AVPlayer content that we will consume. + TPCircularBuffer *playoutBuffer; + AudioTimeStamp playoutStartTimestamp; + AudioTimeStamp playoutSampleTimestamp; +} ExampleAVPlayerRendererContext; + +typedef struct ExampleAVPlayerCapturerContext { + // Used to deliver recorded audio to the media engine. + TVIAudioDeviceContext deviceContext; + size_t expectedFramesPerBuffer; + size_t maxFramesPerBuffer; + + // Core Audio's VoiceProcessingIO audio unit. + AudioUnit audioUnit; + AudioConverterRef audioConverter; + + // Buffer used to render audio samples into. + int16_t *audioBuffer; + + // The buffer of AVPlayer content that we will consume. + TPCircularBuffer *recordingBuffer; +} ExampleAVPlayerCapturerContext; + +// The IO audio units use bus 0 for ouptut, and bus 1 for input. +static int kOutputBus = 0; +static int kInputBus = 1; +// This is the maximum slice size for RemoteIO (as observed in the field). We will double check at initialization time. +static size_t kMaximumFramesPerBuffer = 1156; + +@interface ExampleAVPlayerAudioDevice() + +/** + Indicates that our AVAudioSession and audio graph have been interrupted. When an interruption ends we will take steps + to restart our audio graph. + */ +@property (nonatomic, assign, getter=isInterrupted) BOOL interrupted; + +/** + A multi-channel mixer which takes as input: + + 1. Decoded LPCM audio from Twilio. Remote audio is mixed and pulled from the media engine in `renderingFormat`. + 2. Decoded, format converted LPCM audio consumed from our MTAudioProcessingTap. + + The mixer's output is connected to the input of the VoiceProcessingIO's output bus. + */ +@property (nonatomic, assign) AudioUnit playbackMixer; + +/** + A VoiceProcessingIO audio unit which performs several important functions. + + Input Graph + 1. Record from the microphone. + 2. Echo cancellation of the loudspeaker output from the microphone input. + 3. Deliver mixed, recorded samples from the microphone and AVPlayer to Twilio. + + Output Graph + 1. Pull audio from the output of `playbackMixer`. + + The mixer's output is connected to the input of the VoiceProcessingIO's output bus. + */ +@property (nonatomic, assign) AudioUnit voiceProcessingIO; + + +/** + The tap used to access audio samples from AVPlayer. This is where we produce audio for playback and recording. + */ +@property (nonatomic, assign, nullable) MTAudioProcessingTapRef audioTap; + +/** + A context which contains the state needed for the processing tap's C functions. + */ +@property (nonatomic, assign, nullable) ExampleAVPlayerAudioTapContext *audioTapContext; + +/** + A circular buffer used to feed the recording side of the audio graph with frames produced by our processing tap. + */ +@property (nonatomic, assign, nullable) TPCircularBuffer *audioTapCapturingBuffer; + +/** + A circular buffer used to feed the playback side of the audio graph with frames produced by our processing tap. + */ +@property (nonatomic, assign, nullable) TPCircularBuffer *audioTapRenderingBuffer; + +@property (nonatomic, assign) AudioConverterRef captureConverter; +@property (nonatomic, assign) int16_t *captureBuffer; +@property (nonatomic, strong, nullable) TVIAudioFormat *capturingFormat; +@property (nonatomic, assign, nullable) ExampleAVPlayerCapturerContext *capturingContext; +@property (atomic, assign, nullable) ExampleAVPlayerRendererContext *renderingContext; +@property (nonatomic, strong, nullable) TVIAudioFormat *renderingFormat; + +/** + A convenience getter that indicates if either `wantsCapturing` or `wantsRendering` are true. + */ +@property (nonatomic, assign, readonly) BOOL wantsAudio; + +/** + Indicates that our audio device has been requested to capture audio by Twilio. Capturing occurs when you publish + a TVILocalAudioTrack in a Group Room, or a Peer-to-Peer Room with 1 or more Participant. + */ +@property (nonatomic, assign) BOOL wantsCapturing; + +/** + Indicates that our audio device has been requested to render audio by Twilio. Rendering occurs when one or more Remote + Participants publish a TVIRemoteAudioTrack in a Room. + */ +@property (nonatomic, assign) BOOL wantsRendering; + +@end + +@implementation ExampleAVPlayerAudioDevice + +@synthesize audioTapCapturingBuffer = _audioTapCapturingBuffer; + +#pragma mark - Init & Dealloc + +- (id)init { + self = [super init]; + if (self) { + _audioTapCapturingBuffer = calloc(1, sizeof(TPCircularBuffer)); + _audioTapRenderingBuffer = calloc(1, sizeof(TPCircularBuffer)); + _wantsCapturing = NO; + _wantsRendering = NO; + + _audioTapContext = calloc(1, sizeof(ExampleAVPlayerAudioTapContext)); + _audioTapContext->capturingBuffer = _audioTapCapturingBuffer; + _audioTapContext->renderingBuffer = _audioTapRenderingBuffer; + _audioTapContext->audioDevice = self; + _audioTapContext->audioTapPrepared = NO; + } + return self; +} + +- (void)dealloc { + [self unregisterAVAudioSessionObservers]; + + free(_audioTapCapturingBuffer); + free(_audioTapRenderingBuffer); + free(_audioTapContext); +} + ++ (NSString *)description { + return @"ExampleAVPlayerAudioDevice"; +} + +/* + * Determine at runtime the maximum slice size used by our audio unit. Setting the stream format and sample rate doesn't + * appear to impact the maximum size so we prefer to read this value once at initialization time. + */ ++ (void)initialize { + AudioComponentDescription audioUnitDescription = [self audioUnitDescription]; + AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioUnitDescription); + AudioUnit audioUnit; + OSStatus status = AudioComponentInstanceNew(audioComponent, &audioUnit); + if (status != 0) { + NSLog(@"Could not find RemoteIO AudioComponent instance!"); + return; + } + + UInt32 framesPerSlice = 0; + UInt32 propertySize = sizeof(framesPerSlice); + status = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, + kAudioUnitScope_Global, kOutputBus, + &framesPerSlice, &propertySize); + if (status != 0) { + NSLog(@"Could not read RemoteIO AudioComponent instance!"); + AudioComponentInstanceDispose(audioUnit); + return; + } + + NSLog(@"This device uses a maximum slice size of %d frames.", (unsigned int)framesPerSlice); + kMaximumFramesPerBuffer = (size_t)framesPerSlice; + AudioComponentInstanceDispose(audioUnit); +} + +#pragma mark - Public + +- (BOOL)wantsAudio { + return _wantsCapturing || _wantsRendering; +} + +- (void)audioTapDidPrepare { + NSLog(@"%s", __PRETTY_FUNCTION__); +} + +- (void)startAudioTapAtTime:(CMTime)startTime { + @synchronized (self) { + TVIAudioDeviceContext *context = _capturingContext ? _capturingContext->deviceContext : _renderingContext ? _renderingContext->deviceContext : NULL; + if (context) { + TVIAudioDeviceExecuteWorkerBlock(context, ^{ + [self restartAudioUnitAtTime:startTime]; + }); + } + } +} + +- (MTAudioProcessingTapRef)createProcessingTap { + if (_audioTap) { + return _audioTap; + } + + MTAudioProcessingTapRef processingTap; + MTAudioProcessingTapCallbacks callbacks; + callbacks.version = kMTAudioProcessingTapCallbacksVersion_0; + callbacks.init = AVPlayerProcessingTapInit; + callbacks.prepare = AVPlayerProcessingTapPrepare; + callbacks.process = AVPlayerProcessingTapProcess; + callbacks.unprepare = AVPlayerProcessingTapUnprepare; + callbacks.finalize = AVPlayerProcessingTapFinalize; + callbacks.clientInfo = (void *)(_audioTapContext); + + OSStatus status = MTAudioProcessingTapCreate(kCFAllocatorDefault, + &callbacks, + kMTAudioProcessingTapCreationFlag_PostEffects, + &processingTap); + if (status == kCVReturnSuccess) { + _audioTap = processingTap; + return processingTap; + } else { + return NULL; + } +} + +#pragma mark - TVIAudioDeviceRenderer + +- (nullable TVIAudioFormat *)renderFormat { + if (!_renderingFormat) { + // Setup the AVAudioSession early. You could also defer to `startRendering:` and `stopRendering:`. + [self setupAVAudioSession]; + + _renderingFormat = [[self class] activeFormat]; + } + + return _renderingFormat; +} + +- (BOOL)initializeRenderer { + /* + * In this example we don't need any fixed size buffers or other pre-allocated resources. We will simply write + * directly to the AudioBufferList provided in the AudioUnit's rendering callback. + */ + return YES; +} + +- (BOOL)startRendering:(nonnull TVIAudioDeviceContext)context { + NSLog(@"%s %@", __PRETTY_FUNCTION__, self.renderingFormat); + + @synchronized(self) { + // Restart the already setup graph. + if (_voiceProcessingIO) { + [self stopAudioUnit]; + [self teardownAudioUnit]; + } + + self.wantsRendering = YES; + if (!self.renderingContext) { + self.renderingContext = malloc(sizeof(ExampleAVPlayerRendererContext)); + memset(self.renderingContext, 0, sizeof(ExampleAVPlayerRendererContext)); + } + self.renderingContext->deviceContext = context; + self.renderingContext->maxFramesPerBuffer = _renderingFormat.framesPerBuffer; + + // Ensure that we wait for the audio tap buffer to become ready. + if (self.audioTapContext->audioTapPrepared) { + self.renderingContext->playoutBuffer = _audioTapRenderingBuffer; + } else { + AudioTimeStamp start = {0}; + start.mFlags = kAudioTimeStampNothingValid; + self.renderingContext->playoutStartTimestamp = start; + self.renderingContext->playoutBuffer = NULL; + } + + const NSTimeInterval sessionBufferDuration = [AVAudioSession sharedInstance].IOBufferDuration; + const double sessionSampleRate = [AVAudioSession sharedInstance].sampleRate; + const size_t sessionFramesPerBuffer = (size_t)(sessionSampleRate * sessionBufferDuration + .5); + self.renderingContext->expectedFramesPerBuffer = sessionFramesPerBuffer; + + if (![self setupAudioUnitRendererContext:self.renderingContext + capturerContext:self.capturingContext]) { + free(self.renderingContext); + self.renderingContext = NULL; + self.wantsRendering = NO; + return NO; + } else if (self.capturingContext) { + self.capturingContext->audioUnit = _voiceProcessingIO; + self.capturingContext->audioConverter = _captureConverter; + } + } + + BOOL success = [self startAudioUnit]; + if (success) { + TVIAudioSessionActivated(context); + } + return success; +} + +- (BOOL)stopRendering { + NSLog(@"%s", __PRETTY_FUNCTION__); + + @synchronized(self) { + NSAssert(self.renderingContext != NULL, @"We should have a rendering context when stopping."); + self.wantsRendering = NO; + + if (!self.wantsAudio) { + [self stopAudioUnit]; + TVIAudioSessionDeactivated(self.renderingContext->deviceContext); + [self teardownAudioUnit]; + + free(self.capturingContext); + self.capturingContext = NULL; + + free(self.captureBuffer); + self.captureBuffer = NULL; + + free(self.renderingContext); + self.renderingContext = NULL; + } + } + + return YES; +} + +#pragma mark - TVIAudioDeviceCapturer + +- (nullable TVIAudioFormat *)captureFormat { + if (!_capturingFormat) { + + /* + * Assume that the AVAudioSession has already been configured and started and that the values + * for sampleRate and IOBufferDuration are final. + */ + _capturingFormat = [[self class] capturingFormat]; + } + + return _capturingFormat; +} + +- (BOOL)initializeCapturer { + if (_captureBuffer == NULL) { + size_t byteSize = kMaximumFramesPerBuffer * 4 * 2; + byteSize += 16; + _captureBuffer = malloc(byteSize); + } + + return YES; +} + +- (BOOL)startCapturing:(nonnull TVIAudioDeviceContext)context { + NSLog(@"%s %@", __PRETTY_FUNCTION__, self.capturingFormat); + + @synchronized(self) { + // Restart the already setup graph. + if (_voiceProcessingIO) { + [self stopAudioUnit]; + [self teardownAudioUnit]; + } + + self.wantsCapturing = YES; + if (!self.capturingContext) { + self.capturingContext = malloc(sizeof(ExampleAVPlayerCapturerContext)); + memset(self.capturingContext, 0, sizeof(ExampleAVPlayerCapturerContext)); + } + self.capturingContext->deviceContext = context; + self.capturingContext->maxFramesPerBuffer = _capturingFormat.framesPerBuffer; + self.capturingContext->audioBuffer = _captureBuffer; + + // Ensure that we wait for the audio tap buffer to become ready. + if (self.audioTapContext->audioTapPrepared) { + self.capturingContext->recordingBuffer = _audioTapCapturingBuffer; + } else { + self.capturingContext->recordingBuffer = NULL; + } + + const NSTimeInterval sessionBufferDuration = [AVAudioSession sharedInstance].IOBufferDuration; + const double sessionSampleRate = [AVAudioSession sharedInstance].sampleRate; + const size_t sessionFramesPerBuffer = (size_t)(sessionSampleRate * sessionBufferDuration + .5); + self.capturingContext->expectedFramesPerBuffer = sessionFramesPerBuffer; + + if (![self setupAudioUnitRendererContext:self.renderingContext + capturerContext:self.capturingContext]) { + free(self.capturingContext); + self.capturingContext = NULL; + self.wantsCapturing = NO; + return NO; + } else { + self.capturingContext->audioUnit = _voiceProcessingIO; + self.capturingContext->audioConverter = _captureConverter; + } + } + BOOL success = [self startAudioUnit]; + if (success) { + TVIAudioSessionActivated(context); + } + return success; +} + +- (BOOL)stopCapturing { + NSLog(@"%s", __PRETTY_FUNCTION__); + + @synchronized (self) { + NSAssert(self.capturingContext != NULL, @"We should have a capturing context when stopping."); + self.wantsCapturing = NO; + + if (!self.wantsAudio) { + [self stopAudioUnit]; + TVIAudioSessionDeactivated(self.capturingContext->deviceContext); + [self teardownAudioUnit]; + + free(self.capturingContext); + self.capturingContext = NULL; + + free(self.captureBuffer); + self.captureBuffer = NULL; + + free(self.renderingContext); + self.renderingContext = NULL; + } + } + return YES; +} + +#pragma mark - Private (AudioUnit callbacks) + +static void ExampleAVPlayerAudioDeviceDequeueFrames(TPCircularBuffer *buffer, + UInt32 numFrames, + const AudioTimeStamp *timestamp, + AudioBufferList *bufferList) { + int8_t *audioBuffer = (int8_t *)bufferList->mBuffers[0].mData; + + // TODO: Include this format in the context? What if the formats are somehow not matched? + AudioStreamBasicDescription format = {0}; + format.mBitsPerChannel = 16; + format.mChannelsPerFrame = bufferList->mBuffers[0].mNumberChannels; + format.mBytesPerFrame = format.mChannelsPerFrame * format.mBitsPerChannel / 8; + format.mFormatID = kAudioFormatLinearPCM; + format.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger; + format.mSampleRate = kPreferredSampleRate; + + UInt32 framesInOut = numFrames; + if (timestamp) { + AudioTimeStamp dequeuedTimestamp; + do { + TPCircularBufferDequeueBufferListFrames(buffer, &framesInOut, bufferList, &dequeuedTimestamp, &format); + } while (dequeuedTimestamp.mSampleTime < timestamp->mSampleTime); + } else { + TPCircularBufferDequeueBufferListFrames(buffer, &framesInOut, bufferList, NULL, &format); + } + + if (framesInOut != numFrames) { + // Render silence for the remaining frames. + UInt32 framesRemaining = numFrames - framesInOut; + UInt32 bytesRemaining = framesRemaining * format.mBytesPerFrame; + audioBuffer += format.mBytesPerFrame * framesInOut; + + memset(audioBuffer, 0, bytesRemaining); + } +} + +static OSStatus ExampleAVPlayerAudioDeviceAudioTapPlaybackCallback(void *refCon, + AudioUnitRenderActionFlags *actionFlags, + const AudioTimeStamp *timestamp, + UInt32 busNumber, + UInt32 numFrames, + AudioBufferList *bufferList) { + assert(bufferList->mNumberBuffers == 1); + assert(bufferList->mBuffers[0].mNumberChannels <= 2); + assert(bufferList->mBuffers[0].mNumberChannels > 0); + + ExampleAVPlayerRendererContext *context = (ExampleAVPlayerRendererContext *)refCon; + AudioTimeStamp startTimestamp = context->playoutStartTimestamp; + BOOL readyToPlay = (startTimestamp.mFlags & kAudioTimeStampHostTimeValid) && (timestamp->mHostTime >= startTimestamp.mHostTime); + TPCircularBuffer *buffer = context->playoutBuffer; + UInt32 audioBufferSizeInBytes = bufferList->mBuffers[0].mDataByteSize; + + // Render silence if there are temporary mismatches between CoreAudio and our rendering format. + if (numFrames > context->maxFramesPerBuffer) { + NSLog(@"Can handle a max of %u frames but got %u.", (unsigned int)context->maxFramesPerBuffer, (unsigned int)numFrames); + *actionFlags |= kAudioUnitRenderAction_OutputIsSilence; + int8_t *audioBuffer = (int8_t *)bufferList->mBuffers[0].mData; + memset(audioBuffer, 0, audioBufferSizeInBytes); + return noErr; + } else if (buffer == nil || + !readyToPlay) { + *actionFlags |= kAudioUnitRenderAction_OutputIsSilence; + memset(bufferList->mBuffers[0].mData, 0, audioBufferSizeInBytes); + return noErr; + } + + if (readyToPlay && context->playoutStartTimestamp.mSampleTime == 0) { + ExampleAVPlayerAudioDeviceDequeueFrames(buffer, numFrames, &context->playoutStartTimestamp, bufferList); + context->playoutStartTimestamp.mSampleTime += 1; + } else { + ExampleAVPlayerAudioDeviceDequeueFrames(buffer, numFrames, NULL, bufferList); + } + + return noErr; +} + +static OSStatus ExampleAVPlayerAudioDeviceAudioRendererPlaybackCallback(void *refCon, + AudioUnitRenderActionFlags *actionFlags, + const AudioTimeStamp *timestamp, + UInt32 busNumber, + UInt32 numFrames, + AudioBufferList *bufferList) { + assert(bufferList->mNumberBuffers == 1); + assert(bufferList->mBuffers[0].mNumberChannels <= 2); + assert(bufferList->mBuffers[0].mNumberChannels > 0); + + ExampleAVPlayerCapturerContext *context = (ExampleAVPlayerCapturerContext *)refCon; + int8_t *audioBuffer = (int8_t *)bufferList->mBuffers[0].mData; + UInt32 audioBufferSizeInBytes = bufferList->mBuffers[0].mDataByteSize; + + // Render silence if there are temporary mismatches between CoreAudio and our rendering format. + if (numFrames > context->maxFramesPerBuffer) { + NSLog(@"Can handle a max of %u frames but got %u.", (unsigned int)context->maxFramesPerBuffer, (unsigned int)numFrames); + *actionFlags |= kAudioUnitRenderAction_OutputIsSilence; + memset(audioBuffer, 0, audioBufferSizeInBytes); + return noErr; + } + + // Pull decoded, mixed audio data from the media engine into the AudioUnit's AudioBufferList. + assert(numFrames <= context->maxFramesPerBuffer); + assert(audioBufferSizeInBytes == (bufferList->mBuffers[0].mNumberChannels * kAudioSampleSize * numFrames)); + TVIAudioDeviceReadRenderData(context->deviceContext, audioBuffer, audioBufferSizeInBytes); + + return noErr; +} + +static OSStatus ExampleAVPlayerAudioDeviceRecordingInputCallback(void *refCon, + AudioUnitRenderActionFlags *actionFlags, + const AudioTimeStamp *timestamp, + UInt32 busNumber, + UInt32 numFrames, + AudioBufferList *bufferList) { + ExampleAVPlayerCapturerContext *context = (ExampleAVPlayerCapturerContext *)refCon; + if (context->deviceContext == NULL) { + return noErr; + } + + if (numFrames > context->maxFramesPerBuffer) { + NSLog(@"Expected %u frames but got %u.", (unsigned int)context->maxFramesPerBuffer, (unsigned int)numFrames); + return noErr; + } + + + // Render input into the IO Unit's internal buffer. + AudioBufferList microphoneBufferList; + microphoneBufferList.mNumberBuffers = 1; + + AudioBuffer *microphoneAudioBuffer = µphoneBufferList.mBuffers[0]; + microphoneAudioBuffer->mNumberChannels = 1; + microphoneAudioBuffer->mDataByteSize = (UInt32)numFrames * 2; + microphoneAudioBuffer->mData = NULL; + + OSStatus status = AudioUnitRender(context->audioUnit, + actionFlags, + timestamp, + busNumber, + numFrames, + µphoneBufferList); + if (status != noErr) { + return status; + } + + // Early return with microphone only recording. + if (context->recordingBuffer == NULL) { + if (context->deviceContext) { + TVIAudioDeviceWriteCaptureData(context->deviceContext, + microphoneAudioBuffer->mData, + microphoneAudioBuffer->mDataByteSize); + } + return noErr; + } + + // Dequeue the AVPlayer audio. + AudioBufferList playerBufferList; + playerBufferList.mNumberBuffers = 1; + AudioBuffer *playerAudioBuffer = &playerBufferList.mBuffers[0]; + playerAudioBuffer->mNumberChannels = kPreferredNumberOfChannels; + playerAudioBuffer->mDataByteSize = (UInt32)numFrames * playerAudioBuffer->mNumberChannels * kAudioSampleSize; + playerAudioBuffer->mData = context->audioBuffer; + + ExampleAVPlayerAudioDeviceDequeueFrames(context->recordingBuffer, numFrames, NULL, &playerBufferList); + + // Early return to test player audio. + // Deliver the samples (via copying) to WebRTC. + if (context->deviceContext) { + TVIAudioDeviceWriteCaptureData(context->deviceContext, playerAudioBuffer->mData, playerAudioBuffer->mDataByteSize); + return noErr; + } + + + // Convert the mono AVPlayer and Microphone sources into a stereo stream. + AudioConverterRef converter = context->audioConverter; + + // Source buffers. + AudioBufferList *playerMicrophoneBufferList = (AudioBufferList *)alloca(sizeof(AudioBufferList) + sizeof(AudioBuffer)); + playerMicrophoneBufferList->mNumberBuffers = 2; + + AudioBuffer *playerConvertBuffer = &playerMicrophoneBufferList->mBuffers[0]; + playerConvertBuffer->mNumberChannels = 1; + playerConvertBuffer->mDataByteSize = (UInt32)numFrames * 2; + playerConvertBuffer->mData = context->audioBuffer; + + AudioBuffer *microphoneConvertBuffer = &playerMicrophoneBufferList->mBuffers[1]; + microphoneConvertBuffer->mNumberChannels = microphoneAudioBuffer->mNumberChannels; + microphoneConvertBuffer->mDataByteSize = microphoneAudioBuffer->mDataByteSize; + microphoneConvertBuffer->mData = microphoneAudioBuffer->mData; + + // Destination buffer list. + AudioBufferList convertedBufferList; + convertedBufferList.mNumberBuffers = 1; + AudioBuffer *convertedAudioBuffer = &convertedBufferList.mBuffers[0]; + convertedAudioBuffer->mNumberChannels = 2; + convertedAudioBuffer->mDataByteSize = (UInt32)numFrames * 4; + // Ensure 16-byte alignment. + UInt32 byteOffset = (UInt32)numFrames * 2; + byteOffset += 16 - (byteOffset % 16); + convertedAudioBuffer->mData = context->audioBuffer + byteOffset; + assert((byteOffset % 16) == 0); + + status = AudioConverterConvertComplexBuffer(converter, numFrames, playerMicrophoneBufferList, &convertedBufferList); + if (status != noErr) { + NSLog(@"Convert failed, status: %d", status); + } + int8_t *convertedAudioData = (int8_t *)convertedAudioBuffer->mData; + + // Deliver the samples (via copying) to WebRTC. + if (context->deviceContext && convertedAudioData) { + TVIAudioDeviceWriteCaptureData(context->deviceContext, convertedAudioData, convertedAudioBuffer->mDataByteSize); + } + + return status; +} + +#pragma mark - Private (AVAudioSession and CoreAudio) + ++ (nonnull TVIAudioFormat *)capturingFormat { + /* + * Use the pre-determined maximum frame size. AudioUnit callbacks are variable, and in most sitations will be close + * to the `AVAudioSession.preferredIOBufferDuration` that we've requested. + */ + return [[TVIAudioFormat alloc] initWithChannels:kPreferredNumberOfChannels + sampleRate:kPreferredSampleRate + framesPerBuffer:kMaximumFramesPerBuffer]; +} + ++ (nullable TVIAudioFormat *)activeFormat { + /* + * Use the pre-determined maximum frame size. AudioUnit callbacks are variable, and in most sitations will be close + * to the `AVAudioSession.preferredIOBufferDuration` that we've requested. + */ + const size_t sessionFramesPerBuffer = kMaximumFramesPerBuffer; + const double sessionSampleRate = [AVAudioSession sharedInstance].sampleRate; + const NSInteger sessionOutputChannels = [AVAudioSession sharedInstance].outputNumberOfChannels; + size_t rendererChannels = sessionOutputChannels >= TVIAudioChannelsStereo ? TVIAudioChannelsStereo : TVIAudioChannelsMono; + + return [[TVIAudioFormat alloc] initWithChannels:rendererChannels + sampleRate:sessionSampleRate + framesPerBuffer:sessionFramesPerBuffer]; +} + ++ (AudioComponentDescription)audioUnitDescription { + AudioComponentDescription audioUnitDescription; + audioUnitDescription.componentType = kAudioUnitType_Output; + audioUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO; + audioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + audioUnitDescription.componentFlags = 0; + audioUnitDescription.componentFlagsMask = 0; + return audioUnitDescription; +} + ++ (AudioComponentDescription)mixerAudioCompontentDescription { + AudioComponentDescription audioUnitDescription; + audioUnitDescription.componentType = kAudioUnitType_Mixer; + audioUnitDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer; + audioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + audioUnitDescription.componentFlags = 0; + audioUnitDescription.componentFlagsMask = 0; + return audioUnitDescription; +} + ++ (AudioComponentDescription)genericOutputAudioCompontentDescription { + AudioComponentDescription audioUnitDescription; + audioUnitDescription.componentType = kAudioUnitType_Output; + audioUnitDescription.componentSubType = kAudioUnitSubType_GenericOutput; + audioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + audioUnitDescription.componentFlags = 0; + audioUnitDescription.componentFlagsMask = 0; + return audioUnitDescription; +} + +- (void)setupAVAudioSession { + AVAudioSession *session = [AVAudioSession sharedInstance]; + NSError *error = nil; + + if (![session setPreferredSampleRate:kPreferredSampleRate error:&error]) { + NSLog(@"Error setting sample rate: %@", error); + } + + size_t preferredOutputChannels = session.outputNumberOfChannels >= kPreferredNumberOfChannels ? kPreferredNumberOfChannels : session.outputNumberOfChannels; + if (![session setPreferredOutputNumberOfChannels:preferredOutputChannels error:&error]) { + NSLog(@"Error setting number of output channels to %zu: %@", preferredOutputChannels, error); + } + + /* + * We want to be as close as possible to the buffer size that the media engine needs. If there is + * a mismatch then TwilioVideo will ensure that appropriately sized audio buffers are delivered. + */ + if (![session setPreferredIOBufferDuration:kPreferredIOBufferDuration error:&error]) { + NSLog(@"Error setting IOBuffer duration: %@", error); + } + + if (![session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error]) { + NSLog(@"Error setting session category: %@", error); + } + + if (![session setMode:AVAudioSessionModeVideoChat error:&error]) { + NSLog(@"Error setting session category: %@", error); + } + + [self registerAVAudioSessionObservers]; + + if (![session setActive:YES error:&error]) { + NSLog(@"Error activating AVAudioSession: %@", error); + } + + if (![session setPreferredInputNumberOfChannels:kPreferredNumberOfInputChannels error:&error]) { + NSLog(@"Error setting preferred number of input channels to %zu: %@", kPreferredNumberOfChannels, error); + } +} + +- (AudioStreamBasicDescription)microphoneInputStreamDescription { + AudioStreamBasicDescription formatDescription = self.capturingFormat.streamDescription; + formatDescription.mBytesPerFrame = 2; + formatDescription.mBytesPerPacket = 2; + formatDescription.mChannelsPerFrame = 1; + return formatDescription; +} + +- (AudioStreamBasicDescription)nonInterleavedStereoStreamDescription { + AudioStreamBasicDescription formatDescription = self.capturingFormat.streamDescription; + formatDescription.mBytesPerFrame = 2; + formatDescription.mBytesPerPacket = 2; + formatDescription.mChannelsPerFrame = 2; + formatDescription.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; + return formatDescription; +} + +- (OSStatus)setupAudioCapturer:(ExampleAVPlayerCapturerContext *)capturerContext { + UInt32 enableInput = capturerContext ? 1 : 0; + OSStatus status = AudioUnitSetProperty(_voiceProcessingIO, kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Input, kInputBus, &enableInput, + sizeof(enableInput)); + + if (status != noErr) { + NSLog(@"Could not enable/disable input bus!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return status; + } else if (!enableInput) { + // Input is not required. + return noErr; + } + + // Request mono audio capture regardless of hardware. + AudioStreamBasicDescription capturingFormatDescription = [self microphoneInputStreamDescription]; + + // Our converter will interleave the mono microphone input and player audio in one stereo stream. + if (_captureConverter == NULL) { + AudioStreamBasicDescription sourceFormat = [self nonInterleavedStereoStreamDescription]; + AudioStreamBasicDescription destinationFormat = [self.capturingFormat streamDescription]; + OSStatus status = AudioConverterNew(&sourceFormat, + &destinationFormat, + &_captureConverter); + if (status != noErr) { + NSLog(@"Could not create capture converter! code: %d", status); + return status; + } + } + + status = AudioUnitSetProperty(_voiceProcessingIO, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, kInputBus, + &capturingFormatDescription, sizeof(capturingFormatDescription)); + if (status != noErr) { + NSLog(@"Could not set stream format on the input bus!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return status; + } + + // Setup the I/O input callback. + AURenderCallbackStruct capturerCallback; + capturerCallback.inputProc = ExampleAVPlayerAudioDeviceRecordingInputCallback; + capturerCallback.inputProcRefCon = (void *)(capturerContext); + status = AudioUnitSetProperty(_voiceProcessingIO, kAudioOutputUnitProperty_SetInputCallback, + kAudioUnitScope_Global, kInputBus, &capturerCallback, + sizeof(capturerCallback)); + if (status != noErr) { + NSLog(@"Could not set capturing callback!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return status; + } + + return status; +} + +- (BOOL)setupAudioUnitRendererContext:(ExampleAVPlayerRendererContext *)rendererContext + capturerContext:(ExampleAVPlayerCapturerContext *)capturerContext { + AudioComponentDescription audioUnitDescription = [[self class] audioUnitDescription]; + AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioUnitDescription); + + OSStatus status = AudioComponentInstanceNew(audioComponent, &_voiceProcessingIO); + if (status != noErr) { + NSLog(@"Could not find the AudioComponent instance!"); + return NO; + } + + /* + * Configure the VoiceProcessingIO audio unit. Our rendering format attempts to match what AVAudioSession requires to + * prevent any additional format conversions after the media engine has mixed our playout audio. + */ + UInt32 enableOutput = rendererContext ? 1 : 0; + status = AudioUnitSetProperty(_voiceProcessingIO, kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Output, kOutputBus, + &enableOutput, sizeof(enableOutput)); + if (status != noErr) { + NSLog(@"Could not enable/disable output bus!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + if (enableOutput) { + AudioStreamBasicDescription renderingFormatDescription = self.renderingFormat.streamDescription; + AudioStreamBasicDescription playerFormatDescription = renderingFormatDescription; + if (self.renderingContext->playoutBuffer) { + playerFormatDescription = self.audioTapContext->renderingFormat; + } + + // Setup playback mixer. + AudioComponentDescription mixerComponentDescription = [[self class] mixerAudioCompontentDescription]; + AudioComponent mixerComponent = AudioComponentFindNext(NULL, &mixerComponentDescription); + + OSStatus status = AudioComponentInstanceNew(mixerComponent, &_playbackMixer); + if (status != noErr) { + NSLog(@"Could not find the mixer AudioComponent instance!"); + return NO; + } + + // Configure the mixer's output format. + status = AudioUnitSetProperty(_playbackMixer, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, kOutputBus, + &renderingFormatDescription, sizeof(renderingFormatDescription)); + if (status != noErr) { + NSLog(@"Could not set stream format on the mixer output bus!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + status = AudioUnitSetProperty(_playbackMixer, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, 0, + &playerFormatDescription, sizeof(playerFormatDescription)); + if (status != noErr) { + NSLog(@"Could not set stream format on the mixer input bus 0!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + status = AudioUnitSetProperty(_playbackMixer, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, 1, + &renderingFormatDescription, sizeof(renderingFormatDescription)); + if (status != noErr) { + NSLog(@"Could not set stream format on the mixer input bus 1!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + // Connection: Mixer Output 0 -> VoiceProcessingIO Input Scope, Output Bus + AudioUnitConnection mixerOutputConnection; + mixerOutputConnection.sourceAudioUnit = _playbackMixer; + mixerOutputConnection.sourceOutputNumber = kOutputBus; + mixerOutputConnection.destInputNumber = kOutputBus; + + status = AudioUnitSetProperty(_voiceProcessingIO, kAudioUnitProperty_MakeConnection, + kAudioUnitScope_Input, kOutputBus, + &mixerOutputConnection, sizeof(mixerOutputConnection)); + if (status != noErr) { + NSLog(@"Could not connect the mixer output to voice processing input!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + status = AudioUnitSetProperty(_voiceProcessingIO, kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, kOutputBus, + &renderingFormatDescription, sizeof(renderingFormatDescription)); + if (status != noErr) { + NSLog(@"Could not set stream format on the output bus!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + // Setup the rendering callbacks. + UInt32 elementCount = 2; + status = AudioUnitSetProperty(_playbackMixer, kAudioUnitProperty_ElementCount, + kAudioUnitScope_Input, 0, &elementCount, + sizeof(elementCount)); + if (status != 0) { + NSLog(@"Could not set input element count!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + AURenderCallbackStruct audioTapRenderCallback; + audioTapRenderCallback.inputProc = ExampleAVPlayerAudioDeviceAudioTapPlaybackCallback; + audioTapRenderCallback.inputProcRefCon = (void *)(rendererContext); + status = AudioUnitSetProperty(_playbackMixer, kAudioUnitProperty_SetRenderCallback, + kAudioUnitScope_Input, 0, &audioTapRenderCallback, + sizeof(audioTapRenderCallback)); + if (status != 0) { + NSLog(@"Could not set audio tap rendering callback!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + AURenderCallbackStruct audioRendererRenderCallback; + audioRendererRenderCallback.inputProc = ExampleAVPlayerAudioDeviceAudioRendererPlaybackCallback; + audioRendererRenderCallback.inputProcRefCon = (void *)(rendererContext); + status = AudioUnitSetProperty(_playbackMixer, kAudioUnitProperty_SetRenderCallback, + kAudioUnitScope_Input, 1, &audioRendererRenderCallback, + sizeof(audioRendererRenderCallback)); + if (status != 0) { + NSLog(@"Could not set audio renderer rendering callback!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + } + + [self setupAudioCapturer:self.capturingContext]; + + // Finally, initialize the IO audio unit and mixer (if present). + status = AudioUnitInitialize(_voiceProcessingIO); + if (status != noErr) { + NSLog(@"Could not initialize the audio unit!"); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + return NO; + } + + if (_playbackMixer) { + status = AudioUnitInitialize(_playbackMixer); + if (status != noErr) { + NSLog(@"Could not initialize the playback mixer audio unit!"); + AudioComponentInstanceDispose(_playbackMixer); + _playbackMixer = NULL; + return NO; + } + } + + return YES; +} + +- (BOOL)startAudioUnit { + OSStatus status = AudioOutputUnitStart(_voiceProcessingIO); + if (status != noErr) { + NSLog(@"Could not start the audio unit. code: %d", status); + return NO; + } + + return YES; +} + +- (BOOL)stopAudioUnit { + OSStatus status = AudioOutputUnitStop(_voiceProcessingIO); + if (status != noErr) { + NSLog(@"Could not stop the audio unit. code: %d", status); + return NO; + } + + return YES; +} + +- (void)teardownAudioUnit { + if (_voiceProcessingIO) { + AudioUnitUninitialize(_voiceProcessingIO); + AudioComponentInstanceDispose(_voiceProcessingIO); + _voiceProcessingIO = NULL; + } + + if (_playbackMixer) { + AudioUnitUninitialize(_playbackMixer); + AudioComponentInstanceDispose(_playbackMixer); + _playbackMixer = NULL; + } + + if (_captureConverter == NULL) { + AudioConverterDispose(_captureConverter); + _captureConverter = NULL; + } +} + +- (void)restartAudioUnitAtTime:(CMTime)startTime { + BOOL restart = NO; + + AudioTimeStamp startTimestamp = {0}; + startTimestamp.mFlags = kAudioTimeStampHostTimeValid; + startTimestamp.mHostTime = CMClockConvertHostTimeToSystemUnits(startTime); + self.renderingContext->playoutStartTimestamp = startTimestamp; + + // TODO: Assumption, pass as an arg using the asset's current time and audio timescale? + AudioTimeStamp sampleTimestamp = {0}; + sampleTimestamp.mFlags = kAudioTimeStampSampleTimeValid; + sampleTimestamp.mSampleTime = 0; + + @synchronized (self) { + if (self.wantsAudio) { + restart = YES; + [self stopAudioUnit]; + [self teardownAudioUnit]; + if (self.renderingContext) { + self.renderingContext->playoutBuffer = _audioTapRenderingBuffer; + self.renderingContext->playoutSampleTimestamp = sampleTimestamp; + } + if (self.capturingContext) { + self.capturingContext->recordingBuffer = _audioTapCapturingBuffer; + } + if ([self setupAudioUnitRendererContext:self.renderingContext + capturerContext:self.capturingContext]) { + if (self.capturingContext) { + self.capturingContext->audioUnit = _voiceProcessingIO; + self.capturingContext->audioConverter = _captureConverter; + } + } else { + return; + } + } + } + + [self startAudioUnit]; +} + +#pragma mark - NSNotification Observers + +- (void)registerAVAudioSessionObservers { + // An audio device that interacts with AVAudioSession should handle events like interruptions and route changes. + NSNotificationCenter *center = [NSNotificationCenter defaultCenter]; + + [center addObserver:self selector:@selector(handleAudioInterruption:) name:AVAudioSessionInterruptionNotification object:nil]; + /* + * Interruption handling is different on iOS 9.x. If your application becomes interrupted while it is in the + * background then you will not get a corresponding notification when the interruption ends. We workaround this + * by handling UIApplicationDidBecomeActiveNotification and treating it as an interruption end. + */ + if (![[NSProcessInfo processInfo] isOperatingSystemAtLeastVersion:(NSOperatingSystemVersion){10, 0, 0}]) { + [center addObserver:self selector:@selector(handleApplicationDidBecomeActive:) name:UIApplicationDidBecomeActiveNotification object:nil]; + } + + [center addObserver:self selector:@selector(handleRouteChange:) name:AVAudioSessionRouteChangeNotification object:nil]; + [center addObserver:self selector:@selector(handleMediaServiceLost:) name:AVAudioSessionMediaServicesWereLostNotification object:nil]; + [center addObserver:self selector:@selector(handleMediaServiceRestored:) name:AVAudioSessionMediaServicesWereResetNotification object:nil]; +} + +- (void)handleAudioInterruption:(NSNotification *)notification { + AVAudioSessionInterruptionType type = [notification.userInfo[AVAudioSessionInterruptionTypeKey] unsignedIntegerValue]; + + @synchronized(self) { + // TODO: Multiple contexts. + // If the worker block is executed, then context is guaranteed to be valid. + TVIAudioDeviceContext context = self.renderingContext ? self.renderingContext->deviceContext : NULL; + if (context) { + TVIAudioDeviceExecuteWorkerBlock(context, ^{ + if (type == AVAudioSessionInterruptionTypeBegan) { + NSLog(@"Interruption began."); + self.interrupted = YES; + [self stopAudioUnit]; + TVIAudioSessionDeactivated(context); + } else { + NSLog(@"Interruption ended."); + self.interrupted = NO; + if ([self startAudioUnit]) { + TVIAudioSessionActivated(context); + } + } + }); + } + } +} + +- (void)handleApplicationDidBecomeActive:(NSNotification *)notification { + @synchronized(self) { + // If the worker block is executed, then context is guaranteed to be valid. + TVIAudioDeviceContext context = self.renderingContext ? self.renderingContext->deviceContext : NULL; + if (context) { + TVIAudioDeviceExecuteWorkerBlock(context, ^{ + if (self.isInterrupted) { + NSLog(@"Synthesizing an interruption ended event for iOS 9.x devices."); + self.interrupted = NO; + if ([self startAudioUnit]) { + TVIAudioSessionActivated(context); + } + } + }); + } + } +} + +- (void)handleRouteChange:(NSNotification *)notification { + // Check if the sample rate, or channels changed and trigger a format change if it did. + AVAudioSessionRouteChangeReason reason = [notification.userInfo[AVAudioSessionRouteChangeReasonKey] unsignedIntegerValue]; + + switch (reason) { + case AVAudioSessionRouteChangeReasonUnknown: + case AVAudioSessionRouteChangeReasonNewDeviceAvailable: + case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: + // Each device change might cause the actual sample rate or channel configuration of the session to change. + case AVAudioSessionRouteChangeReasonCategoryChange: + // In iOS 9.2+ switching routes from a BT device in control center may cause a category change. + case AVAudioSessionRouteChangeReasonOverride: + case AVAudioSessionRouteChangeReasonWakeFromSleep: + case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory: + case AVAudioSessionRouteChangeReasonRouteConfigurationChange: + // With CallKit, AVAudioSession may change the sample rate during a configuration change. + // If a valid route change occurs we may want to update our audio graph to reflect the new output device. + @synchronized(self) { + // TODO: Contexts + if (self.renderingContext) { + TVIAudioDeviceExecuteWorkerBlock(self.renderingContext->deviceContext, ^{ + [self handleValidRouteChange]; + }); + } + } + break; + } +} + +- (void)handleValidRouteChange { + // Nothing to process while we are interrupted. We will interrogate the AVAudioSession once the interruption ends. + if (self.isInterrupted) { + return; + } else if (_voiceProcessingIO == NULL) { + return; + } + + NSLog(@"A route change ocurred while the AudioUnit was started. Checking the active audio format."); + + // Determine if the format actually changed. We only care about sample rate and number of channels. + TVIAudioFormat *activeFormat = [[self class] activeFormat]; + + if (![activeFormat isEqual:_renderingFormat]) { + NSLog(@"The rendering format changed. Restarting with %@", activeFormat); + // Signal a change by clearing our cached format, and allowing TVIAudioDevice to drive the process. + _renderingFormat = nil; + + @synchronized(self) { + if (self.renderingContext) { + TVIAudioDeviceFormatChanged(self.renderingContext->deviceContext); + } else if (self.capturingContext) { + TVIAudioDeviceFormatChanged(self.capturingContext->deviceContext); + } + } + } +} + +- (void)handleMediaServiceLost:(NSNotification *)notification { + @synchronized(self) { + // TODO: Contexts. + if (self.renderingContext) { + TVIAudioDeviceExecuteWorkerBlock(self.renderingContext->deviceContext, ^{ + [self stopAudioUnit]; + TVIAudioSessionDeactivated(self.renderingContext->deviceContext); + }); + } + } +} + +- (void)handleMediaServiceRestored:(NSNotification *)notification { + @synchronized(self) { + // If the worker block is executed, then context is guaranteed to be valid. + TVIAudioDeviceContext context = self.renderingContext ? self.renderingContext->deviceContext : NULL; + if (context) { + TVIAudioDeviceExecuteWorkerBlock(context, ^{ + if ([self startAudioUnit]) { + TVIAudioSessionActivated(context); + } + }); + } + } +} + +- (void)unregisterAVAudioSessionObservers { + [[NSNotificationCenter defaultCenter] removeObserver:self]; +} + +@end diff --git a/CoViewingExample/AudioDevices/ExampleAVPlayerProcessingTap.h b/CoViewingExample/AudioDevices/ExampleAVPlayerProcessingTap.h new file mode 100644 index 00000000..9ccf46d7 --- /dev/null +++ b/CoViewingExample/AudioDevices/ExampleAVPlayerProcessingTap.h @@ -0,0 +1,47 @@ +// +// ExampleAVPlayerProcessingTap.h +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +#import +#import + +@class ExampleAVPlayerAudioDevice; + +typedef struct ExampleAVPlayerAudioTapContext { + __weak ExampleAVPlayerAudioDevice *audioDevice; + BOOL audioTapPrepared; + + TPCircularBuffer *capturingBuffer; + AudioConverterRef captureFormatConverter; + BOOL capturingSampleRateConversion; + BOOL captureFormatConvertIsPrimed; + + TPCircularBuffer *renderingBuffer; + AudioConverterRef renderFormatConverter; + AudioStreamBasicDescription renderingFormat; + + // Cached source audio, in case we need to perform a sample rate conversion and can't consume all the samples in one go. + AudioBufferList *sourceCache; + UInt32 sourceCacheFrames; + AudioStreamBasicDescription sourceFormat; +} ExampleAVPlayerAudioTapContext; + +void AVPlayerProcessingTapInit(MTAudioProcessingTapRef tap, void *clientInfo, void **tapStorageOut); + +void AVPlayerProcessingTapFinalize(MTAudioProcessingTapRef tap); + +void AVPlayerProcessingTapPrepare(MTAudioProcessingTapRef tap, + CMItemCount maxFrames, + const AudioStreamBasicDescription *processingFormat); + +void AVPlayerProcessingTapUnprepare(MTAudioProcessingTapRef tap); + +void AVPlayerProcessingTapProcess(MTAudioProcessingTapRef tap, + CMItemCount numberFrames, + MTAudioProcessingTapFlags flags, + AudioBufferList *bufferListInOut, + CMItemCount *numberFramesOut, + MTAudioProcessingTapFlags *flagsOut); diff --git a/CoViewingExample/AudioDevices/ExampleAVPlayerProcessingTap.m b/CoViewingExample/AudioDevices/ExampleAVPlayerProcessingTap.m new file mode 100644 index 00000000..05574adf --- /dev/null +++ b/CoViewingExample/AudioDevices/ExampleAVPlayerProcessingTap.m @@ -0,0 +1,395 @@ +// +// ExampleAVPlayerProcessingTap.m +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +#import "ExampleAVPlayerProcessingTap.h" + +#import "ExampleAVPlayerAudioDevice.h" +#import "TPCircularBuffer+AudioBufferList.h" + +static size_t const kPreferredNumberOfChannels = 2; +static uint32_t const kPreferredSampleRate = 48000; + +typedef struct ExampleAVPlayerAudioConverterContext { + AudioBufferList *cacheBuffers; + UInt32 cachePackets; + AudioBufferList *sourceBuffers; + // Keep track if we are iterating through the source to provide data to a converter. + UInt32 sourcePackets; + UInt32 sourcePacketIndex; +} ExampleAVPlayerAudioConverterContext; + +AudioBufferList *AudioBufferListCreate(const AudioStreamBasicDescription *audioFormat, int frameCount) { + int numberOfBuffers = audioFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved ? audioFormat->mChannelsPerFrame : 1; + AudioBufferList *audio = malloc(sizeof(AudioBufferList) + (numberOfBuffers - 1) * sizeof(AudioBuffer)); + if (!audio) { + return NULL; + } + audio->mNumberBuffers = numberOfBuffers; + + int channelsPerBuffer = audioFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved ? 1 : audioFormat->mChannelsPerFrame; + int bytesPerBuffer = audioFormat->mBytesPerFrame * frameCount; + for (int i = 0; i < numberOfBuffers; i++) { + if (bytesPerBuffer > 0) { + audio->mBuffers[i].mData = calloc(bytesPerBuffer, 1); + if (!audio->mBuffers[i].mData) { + for (int j = 0; j < i; j++ ) { + free(audio->mBuffers[j].mData); + } + free(audio); + return NULL; + } + } else { + audio->mBuffers[i].mData = NULL; + } + audio->mBuffers[i].mDataByteSize = bytesPerBuffer; + audio->mBuffers[i].mNumberChannels = channelsPerBuffer; + } + return audio; +} + +void AudioBufferListFree(AudioBufferList *bufferList ) { + for (int i=0; imNumberBuffers; i++) { + if (bufferList->mBuffers[i].mData != NULL) { + free(bufferList->mBuffers[i].mData); + } + } + free(bufferList); +} + +OSStatus AVPlayerAudioTapConverterInputDataProc(AudioConverterRef inAudioConverter, + UInt32 *ioNumberDataPackets, + AudioBufferList *ioData, + AudioStreamPacketDescription * _Nullable *outDataPacketDescription, + void *inUserData) { + UInt32 bytesPerChannel = 4; + + // Give the converter what they asked for. They might not consume all of our source in one callback. + UInt32 minimumPackets = *ioNumberDataPackets; + ExampleAVPlayerAudioConverterContext *context = inUserData; + + assert(context->sourcePackets + context->cachePackets >= *ioNumberDataPackets); +// printf("Convert at least %d input packets. We have %d source packets, %d cached packets.\n", *ioNumberDataPackets, context->sourcePackets, context->cachePackets); + AudioBufferList *sourceBufferList = (AudioBufferList *)context->sourceBuffers; + AudioBufferList *cacheBufferList = (AudioBufferList *)context->cacheBuffers; + assert(sourceBufferList->mNumberBuffers == ioData->mNumberBuffers); + + for (UInt32 i = 0; i < sourceBufferList->mNumberBuffers; i++) { + if (context->cachePackets > 0) { + AudioBuffer *cacheBuffer = &cacheBufferList->mBuffers[i]; + AudioBuffer *outputBuffer = &ioData->mBuffers[i]; + UInt32 cachedBytes = context->cachePackets * bytesPerChannel; + UInt32 cachedFrames = context->cachePackets; + outputBuffer->mNumberChannels = cacheBuffer->mNumberChannels; + outputBuffer->mDataByteSize = cachedBytes; + outputBuffer->mData = cacheBuffer->mData; + *ioNumberDataPackets = cachedFrames; + } else { + UInt32 sourceFrames = minimumPackets; + UInt32 sourceBytes = sourceFrames * bytesPerChannel; + + AudioBuffer *sourceBuffer = &sourceBufferList->mBuffers[i]; + AudioBuffer *outputBuffer = &ioData->mBuffers[i]; + outputBuffer->mNumberChannels = sourceBuffer->mNumberChannels; + outputBuffer->mDataByteSize = sourceBytes; + outputBuffer->mData = sourceBuffer->mData + (context->sourcePacketIndex * bytesPerChannel * sourceBuffer->mNumberChannels); + } + } + + if (context->cachePackets > 0) { + context->cachePackets = 0; + } else { + context->sourcePacketIndex += *ioNumberDataPackets; + } + +// if (context->sourcePackets - minimumPackets > 0) { +// // Copy the remainder of the source which was not used into the front of our cache. +// +// UInt32 packetsToCopy = context->sourcePackets - minimumPackets; +// for (UInt32 i = 0; i < sourceBufferList->mNumberBuffers; i++) { +// AudioBuffer *cacheBuffer = &cacheBufferList->mBuffers[i]; +// AudioBuffer *sourceBuffer = &sourceBufferList->mBuffers[i]; +// assert(cacheBuffer->mDataByteSize >= sourceBuffer->mDataByteSize); +// UInt32 bytesToCopy = packetsToCopy * bytesPerChannel; +// void *sourceData = sourceBuffer->mData + (minimumPackets * bytesPerChannel); +// memcpy(cacheBuffer->mData, sourceData, bytesToCopy); +// } +// context->cachePackets = packetsToCopy; +// } + + return noErr; +} + +static inline void AVPlayerAudioTapProduceFilledFrames(TPCircularBuffer *buffer, + AudioConverterRef converter, + BOOL isConverterPrimed, + AudioBufferList *bufferListIn, + AudioBufferList *sourceCache, + UInt32 *cachedSourceFrames, + UInt32 framesIn, + UInt32 channelsOut) { + // Start with input buffer size as our argument. + // TODO: Does non-interleaving count towards the size (*2)? + // Give us a little more priming than we need (~8 frames). + UInt32 primeFrames = 8; + UInt32 sourceFrames = framesIn; + if (!isConverterPrimed) { + framesIn -= primeFrames; + } else if (*cachedSourceFrames > 0) { + framesIn += *cachedSourceFrames; + } + UInt32 desiredIoBufferSize = framesIn * 4 * bufferListIn->mNumberBuffers; +// printf("Input is %d bytes (%d total frames, %d cached frames).\n", desiredIoBufferSize, framesIn, *cachedSourceFrames); + UInt32 propertySizeIo = sizeof(desiredIoBufferSize); + OSStatus status = AudioConverterGetProperty(converter, + kAudioConverterPropertyCalculateOutputBufferSize, + &propertySizeIo, &desiredIoBufferSize); + + UInt32 bytesPerFrameOut = channelsOut * sizeof(SInt16); + UInt32 framesOut = (desiredIoBufferSize) / bytesPerFrameOut; +// UInt32 framesOut = (desiredIoBufferSize + (bytesPerFrameOut - 1)) / bytesPerFrameOut; +// framesOut += framesOut % 2; + UInt32 bytesOut = framesOut * bytesPerFrameOut; +// printf("Converter wants an output of %d bytes (%d frames, %d bytes per frames).\n", +// desiredIoBufferSize, framesOut, bytesPerFrameOut); + + AudioBufferList *producerBufferList = TPCircularBufferPrepareEmptyAudioBufferList(buffer, 1, bytesOut, NULL); + if (producerBufferList == NULL) { + return; + } + producerBufferList->mBuffers[0].mNumberChannels = channelsOut; + + UInt32 ioPacketSize = framesOut; +// printf("Ready to fill output buffer of frames: %d, bytes: %d with input buffer of frames: %d, bytes: %d.\n", +// framesOut, bytesOut, framesIn, framesIn * 4 * bufferListIn->mNumberBuffers); + ExampleAVPlayerAudioConverterContext context; + context.sourceBuffers = bufferListIn; + context.cacheBuffers = sourceCache; + context.sourcePackets = sourceFrames; + context.sourcePacketIndex = 0; + context.cachePackets = *cachedSourceFrames; + status = AudioConverterFillComplexBuffer(converter, + AVPlayerAudioTapConverterInputDataProc, + &context, + &ioPacketSize, + producerBufferList, + NULL); + // Adjust for what the format converter actually produced, in case it was different than what we asked for. + producerBufferList->mBuffers[0].mDataByteSize = ioPacketSize * bytesPerFrameOut; +// printf("Output was: %d packets / %d bytes. Consumed input packets: %d. Cached input packets: %d.\n", +// ioPacketSize, ioPacketSize * bytesPerFrameOut, context.sourcePackets, context.cachePackets); + + // TODO: Do we still produce the buffer list after a failure? + if (status == kCVReturnSuccess) { + *cachedSourceFrames = context.cachePackets; + TPCircularBufferProduceAudioBufferList(buffer, NULL); + } else { + printf("Error converting buffers: %d\n", status); + } +} + +static inline void AVPlayerAudioTapProduceConvertedFrames(TPCircularBuffer *buffer, + AudioConverterRef converter, + AudioBufferList *bufferListIn, + UInt32 framesIn, + CMTimeRange *sourceRangeIn, + UInt32 channelsOut) { + UInt32 bytesOut = framesIn * channelsOut * 2; + AudioBufferList *producerBufferList = TPCircularBufferPrepareEmptyAudioBufferList(buffer, 1, bytesOut, NULL); + if (producerBufferList == NULL) { + return; + } + producerBufferList->mBuffers[0].mNumberChannels = channelsOut; + + OSStatus status = AudioConverterConvertComplexBuffer(converter, + framesIn, + bufferListIn, + producerBufferList); + + // TODO: Do we still produce the buffer list after a failure? + if (status == kCVReturnSuccess) { + AudioTimeStamp timestamp = {0}; + timestamp.mFlags = kAudioTimeStampSampleTimeValid; + timestamp.mSampleTime = sourceRangeIn->start.value; + TPCircularBufferProduceAudioBufferList(buffer, ×tamp); + } else { + printf("Error converting buffers: %d\n", status); + } +} + +#pragma mark - MTAudioProcessingTap + +void AVPlayerProcessingTapInit(MTAudioProcessingTapRef tap, void *clientInfo, void **tapStorageOut) { + NSLog(@"Init audio tap."); + + // Provide access to our device in the Callbacks. + *tapStorageOut = clientInfo; +} + +void AVPlayerProcessingTapFinalize(MTAudioProcessingTapRef tap) { + NSLog(@"Finalize audio tap."); + + ExampleAVPlayerAudioTapContext *context = (ExampleAVPlayerAudioTapContext *)MTAudioProcessingTapGetStorage(tap); + context->audioTapPrepared = NO; + TPCircularBuffer *capturingBuffer = context->capturingBuffer; + TPCircularBuffer *renderingBuffer = context->renderingBuffer; + TPCircularBufferCleanup(capturingBuffer); + TPCircularBufferCleanup(renderingBuffer); +} + +void AVPlayerProcessingTapPrepare(MTAudioProcessingTapRef tap, + CMItemCount maxFrames, + const AudioStreamBasicDescription *processingFormat) { + NSLog(@"Preparing with frames: %d, channels: %d, bits/channel: %d, sample rate: %0.1f", + (int)maxFrames, processingFormat->mChannelsPerFrame, processingFormat->mBitsPerChannel, processingFormat->mSampleRate); + assert(processingFormat->mFormatID == kAudioFormatLinearPCM); + + // Defer init of the ring buffer memory until we understand the processing format. + ExampleAVPlayerAudioTapContext *context = (ExampleAVPlayerAudioTapContext *)MTAudioProcessingTapGetStorage(tap); + TPCircularBuffer *capturingBuffer = context->capturingBuffer; + TPCircularBuffer *renderingBuffer = context->renderingBuffer; + + size_t bufferSize = processingFormat->mBytesPerFrame * maxFrames; + // We need to add some overhead for the AudioBufferList data structures. + bufferSize += 2048; + // TODO: Size the buffer appropriately, as we may need to accumulate more than maxFrames due to bursty processing. + bufferSize *= 20; + + // TODO: If we are re-allocating then check the size? + TPCircularBufferInit(capturingBuffer, bufferSize); + TPCircularBufferInit(renderingBuffer, bufferSize); + + AudioBufferList *cacheBufferList = AudioBufferListCreate(processingFormat, (int)maxFrames); + context->sourceCache = cacheBufferList; + context->sourceCacheFrames = 0; + context->sourceFormat = *processingFormat; + + TVIAudioFormat *playbackFormat = [[TVIAudioFormat alloc] initWithChannels:processingFormat->mChannelsPerFrame + sampleRate:processingFormat->mSampleRate + framesPerBuffer:maxFrames]; + AudioStreamBasicDescription preferredPlaybackDescription = [playbackFormat streamDescription]; + BOOL requiresFormatConversion = preferredPlaybackDescription.mFormatFlags != processingFormat->mFormatFlags; + + context->renderingFormat = preferredPlaybackDescription; + + if (requiresFormatConversion) { + OSStatus status = AudioConverterNew(processingFormat, &preferredPlaybackDescription, &context->renderFormatConverter); + if (status != 0) { + NSLog(@"Failed to create AudioConverter: %d", (int)status); + return; + } + } + + TVIAudioFormat *recordingFormat = [[TVIAudioFormat alloc] initWithChannels:kPreferredNumberOfChannels + sampleRate:(Float64)kPreferredSampleRate + framesPerBuffer:maxFrames]; + AudioStreamBasicDescription preferredRecordingDescription = [recordingFormat streamDescription]; + BOOL requiresSampleRateConversion = processingFormat->mSampleRate != preferredRecordingDescription.mSampleRate; + context->capturingSampleRateConversion = requiresSampleRateConversion; + + if (requiresFormatConversion || requiresSampleRateConversion) { + OSStatus status = AudioConverterNew(processingFormat, &preferredRecordingDescription, &context->captureFormatConverter); + if (status != 0) { + NSLog(@"Failed to create AudioConverter: %d", (int)status); + return; + } + UInt32 primingMethod = kConverterPrimeMethod_Normal; + status = AudioConverterSetProperty(context->captureFormatConverter, kAudioConverterPrimeMethod, + sizeof(UInt32), &primingMethod); + } + + context->audioTapPrepared = YES; + [context->audioDevice audioTapDidPrepare]; +} + +void AVPlayerProcessingTapUnprepare(MTAudioProcessingTapRef tap) { + NSLog(@"Unpreparing audio tap."); + + // Prevent any more frames from being consumed. Note that this might end audio playback early. + ExampleAVPlayerAudioTapContext *context = (ExampleAVPlayerAudioTapContext *)MTAudioProcessingTapGetStorage(tap); + TPCircularBuffer *capturingBuffer = context->capturingBuffer; + TPCircularBuffer *renderingBuffer = context->renderingBuffer; + + TPCircularBufferClear(capturingBuffer); + TPCircularBufferClear(renderingBuffer); + if (context->sourceCache) { + AudioBufferListFree(context->sourceCache); + context->sourceCache = NULL; + context->sourceCacheFrames = 0; + } + + if (context->renderFormatConverter != NULL) { + AudioConverterDispose(context->renderFormatConverter); + context->renderFormatConverter = NULL; + } + + if (context->captureFormatConverter != NULL) { + AudioConverterDispose(context->captureFormatConverter); + context->captureFormatConverter = NULL; + context->captureFormatConvertIsPrimed = NO; + } +} + +void AVPlayerProcessingTapProcess(MTAudioProcessingTapRef tap, + CMItemCount numberFrames, + MTAudioProcessingTapFlags flags, + AudioBufferList *bufferListInOut, + CMItemCount *numberFramesOut, + MTAudioProcessingTapFlags *flagsOut) { + ExampleAVPlayerAudioTapContext *context = (ExampleAVPlayerAudioTapContext *)MTAudioProcessingTapGetStorage(tap); + CMTimeRange sourceRange; + OSStatus status = MTAudioProcessingTapGetSourceAudio(tap, + numberFrames, + bufferListInOut, + flagsOut, + &sourceRange, + numberFramesOut); + if (status != noErr) { + // TODO: It might be useful to fill zeros here. + return; + } else if(CMTIMERANGE_IS_EMPTY(sourceRange) || + CMTIMERANGE_IS_INVALID(sourceRange)) { + return; + } + + UInt32 framesToCopy = (UInt32)*numberFramesOut; + + // Produce renderer buffers. These are interleaved, signed integer frames in the source's sample rate. + TPCircularBuffer *renderingBuffer = context->renderingBuffer; + AVPlayerAudioTapProduceConvertedFrames(renderingBuffer, + context->renderFormatConverter, + bufferListInOut, + framesToCopy, + &sourceRange, + kPreferredNumberOfChannels); + + // Produce capturer buffers. We will perform a sample rate conversion if needed. + TPCircularBuffer *capturingBuffer = context->capturingBuffer; + if (context->capturingSampleRateConversion) { + AVPlayerAudioTapProduceFilledFrames(capturingBuffer, + context->captureFormatConverter, + context->captureFormatConvertIsPrimed, + bufferListInOut, context->sourceCache, + &context->sourceCacheFrames, + framesToCopy, + kPreferredNumberOfChannels); + context->captureFormatConvertIsPrimed = YES; + } else { + AVPlayerAudioTapProduceConvertedFrames(capturingBuffer, + context->captureFormatConverter, + bufferListInOut, + framesToCopy, + &sourceRange, + kPreferredNumberOfChannels); + } + + // Flush converters on a discontinuity. This is especially important for priming a sample rate converter. + if (*flagsOut & kMTAudioProcessingTapFlag_EndOfStream) { + AudioConverterReset(context->renderFormatConverter); + AudioConverterReset(context->captureFormatConverter); + context->captureFormatConvertIsPrimed = NO; + } +} diff --git a/CoViewingExample/Base.lproj/LaunchScreen.storyboard b/CoViewingExample/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 00000000..bfa36129 --- /dev/null +++ b/CoViewingExample/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/CoViewingExample/Base.lproj/Main.storyboard b/CoViewingExample/Base.lproj/Main.storyboard new file mode 100644 index 00000000..5ed2925e --- /dev/null +++ b/CoViewingExample/Base.lproj/Main.storyboard @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/CoViewingExample/ExampleAVPlayerAudioTap.swift b/CoViewingExample/ExampleAVPlayerAudioTap.swift new file mode 100644 index 00000000..3e3342e1 --- /dev/null +++ b/CoViewingExample/ExampleAVPlayerAudioTap.swift @@ -0,0 +1,64 @@ +// +// ExampleAVPlayerAudioTap.swift +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +import Foundation +import MediaToolbox + +class ExampleAVPlayerAudioTap { + + static func mediaToolboxAudioProcessingTapCreate(audioTap: ExampleAVPlayerAudioTap) -> MTAudioProcessingTap? { + var callbacks = MTAudioProcessingTapCallbacks( + version: kMTAudioProcessingTapCallbacksVersion_0, + clientInfo: UnsafeMutableRawPointer(Unmanaged.passUnretained(audioTap).toOpaque()), + init: audioTap.tapInit, + finalize: audioTap.tapFinalize, + prepare: audioTap.tapPrepare, + unprepare: audioTap.tapUnprepare, + process: audioTap.tapProcess + ) + + var tap: Unmanaged? + let status = MTAudioProcessingTapCreate(kCFAllocatorDefault, + &callbacks, + kMTAudioProcessingTapCreationFlag_PostEffects, + &tap) + + if status == kCVReturnSuccess { + return tap!.takeUnretainedValue() + } else { + return nil + } + } + + let tapInit: MTAudioProcessingTapInitCallback = { (tap, clientInfo, tapStorageOut) in + let nonOptionalSelf = clientInfo!.assumingMemoryBound(to: ExampleAVPlayerAudioTap.self).pointee + print("init:", tap, clientInfo as Any, tapStorageOut, nonOptionalSelf) + } + + let tapFinalize: MTAudioProcessingTapFinalizeCallback = { + (tap) in + print(#function) + } + + let tapPrepare: MTAudioProcessingTapPrepareCallback = {(tap, b, c) in + print("Prepare:", tap, b, c) + } + + let tapUnprepare: MTAudioProcessingTapUnprepareCallback = {(tap) in + print("Unprepare:", tap) + } + + let tapProcess: MTAudioProcessingTapProcessCallback = { + (tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) in + print("Process callback:", tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) + + let status = MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, flagsOut, nil, numberFramesOut) + if status != kCVReturnSuccess { + print("Failed to get source audio: ", status) + } + } +} diff --git a/CoViewingExample/ExampleAVPlayerSource.swift b/CoViewingExample/ExampleAVPlayerSource.swift new file mode 100644 index 00000000..cd5adfe6 --- /dev/null +++ b/CoViewingExample/ExampleAVPlayerSource.swift @@ -0,0 +1,254 @@ +// +// ExampleAVPlayerSource.swift +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +import AVFoundation +import TwilioVideo + +/* + * This capturer manages an AVPlayerVideoItemOutput, attempting to output each frame that becomes available + * for presentation. By default, a CADisplayLink timer is used to sample at the natural cadence of the display. + * When there is no more content to sample, the capturer suspends its timer and waits for callbacks via + * AVPlayerItemOutputPullDelegate to resume. In some cases, downscaling is used to reduce CPU and memory consumption. + * + * Please be aware that AVPlayer and its playback pipeline prepare content for presentation on your device, including + * mapping frames to the display. For example, when playing 23.976 or 24 fps content a technique known as 3:2 pulldown + * is used to time video samples for a 60 Hz iPhone display. Our capturer tags the frames with the best timing infromation + * that it has available - the presentation timestamps provided by AVPlayerVideoItemOutput. + */ +class ExampleAVPlayerSource: NSObject, TVIVideoCapturer { + + private var captureConsumer: TVIVideoCaptureConsumer? = nil + // Track how often we are receiving content. If no new frames are coming there is no need to sample the output. + private var lastPresentationTimestamp: CMTime? + // Display timer which fires at the natural cadence of our display. Sampling typically occurs within these timer callbacks. + private var outputTimer: CADisplayLink? = nil + // Dispatch timer which fires at a pre-determined cadence `kFrameOutputInterval`. + private var timerSource: DispatchSourceTimer? = nil + var videoOutput: AVPlayerItemVideoOutput? = nil + private let videoSampleQueue: DispatchQueue + + // Frame output/sampling interval for a DispatchSource. Note: 60 Hz = 16667, 23.976 Hz = 41708 + static let kFrameOutputInterval = DispatchTimeInterval.microseconds(16667) + static let kFrameOutputLeeway = DispatchTimeInterval.milliseconds(0) + // How much time we will wait without receiving any frames before suspending output/sampling. + static let kFrameOutputSuspendTimeout = Double(1.0) + // The largest dimension we will output for streaming using the Video SDK. + static let kFrameOutputMaxDimension = CGFloat(960.0) + // A bounding box which represents the largest video we will output for streaming. + static let kFrameOutputMaxRect = CGRect(x: 0, y: 0, width: kFrameOutputMaxDimension, height: kFrameOutputMaxDimension) + + // Use a CADisplayLink, or a DispatchSourceTimer (experimental) for sampling. + static private var useDisplayLinkTimer = true + + init(item: AVPlayerItem) { + videoSampleQueue = DispatchQueue(label: "com.twilio.avplayersource", qos: DispatchQoS.userInteractive, + attributes: DispatchQueue.Attributes(rawValue: 0), + autoreleaseFrequency: DispatchQueue.AutoreleaseFrequency.workItem, + target: nil) + super.init() + + let presentationSize = item.presentationSize + let presentationPixels = presentationSize.width * presentationSize.height + print("Prepare for player item with size:", presentationSize, " pixels:", presentationPixels); + + /* + * We might request buffers downscaled for streaming. The output will always be 8-bit 4:2:0 NV12. + */ + let attributes: [String : Any] + + if (presentationSize.width > ExampleAVPlayerSource.kFrameOutputMaxDimension || + presentationSize.height > ExampleAVPlayerSource.kFrameOutputMaxDimension) { + let streamingRect = AVMakeRect(aspectRatio: presentationSize, insideRect: ExampleAVPlayerSource.kFrameOutputMaxRect) + print("Requesting downscaling to:", streamingRect.size, "."); + + attributes = [ + kCVPixelBufferWidthKey as String : Int(streamingRect.width), + kCVPixelBufferHeightKey as String : Int(streamingRect.height), + kCVPixelBufferIOSurfacePropertiesKey as String : [ : ], + kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_420YpCbCr8BiPlanarFullRange + ] as [String : Any] + } else { + attributes = [ + kCVPixelBufferIOSurfacePropertiesKey as String : [ : ], + kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_420YpCbCr8BiPlanarFullRange + ] as [String : Any] + } + + videoOutput = AVPlayerItemVideoOutput(pixelBufferAttributes: attributes) + videoOutput?.setDelegate(self, queue: videoSampleQueue) + + if ExampleAVPlayerSource.useDisplayLinkTimer { + addDisplayTimer() + } + videoOutput?.requestNotificationOfMediaDataChange(withAdvanceInterval: 0.02) + + item.add(videoOutput!) + } + + func outputFrame(itemTimestamp: CMTime) { + guard let output = videoOutput else { + return + } + guard let consumer = captureConsumer else { + return + } + if !output.hasNewPixelBuffer(forItemTime: itemTimestamp) { + // TODO: Consider suspending the timer and requesting a notification when media becomes available. +// print("No frame for host timestamp:", CACurrentMediaTime(), "\n", +// "Last presentation timestamp was:", lastPresentationTimestamp != nil ? lastPresentationTimestamp! : CMTime.zero) + return + } + + var presentationTimestamp = CMTime.zero + let pixelBuffer = output.copyPixelBuffer(forItemTime: itemTimestamp, + itemTimeForDisplay: &presentationTimestamp) + if let buffer = pixelBuffer { + if let lastTime = lastPresentationTimestamp { + // TODO: Use this info for 3:2 pulldown to re-construct the proper timestamps without display cadence? +// let delta = presentationTimestamp - lastTime +// print("Frame delta was:", delta) +// let movieTime = CVBufferGetAttachment(buffer, kCVBufferMovieTimeKey, nil) +// print("Movie time was:", movieTime as Any) + } + lastPresentationTimestamp = presentationTimestamp + + guard let frame = TVIVideoFrame(timestamp: presentationTimestamp, + buffer: buffer, + orientation: TVIVideoOrientation.up) else { + assertionFailure("We couldn't create a TVIVideoFrame with a valid CVPixelBuffer.") + return + } + consumer.consumeCapturedFrame(frame) + } + + if ExampleAVPlayerSource.useDisplayLinkTimer { + outputTimer?.isPaused = false + } else if timerSource == nil { + startTimerSource(hostTime: CACurrentMediaTime()) + } + } + + func startTimerSource(hostTime: CFTimeInterval) { + print(#function) + + let source = DispatchSource.makeTimerSource(flags: DispatchSource.TimerFlags.strict, + queue: videoSampleQueue) + timerSource = source + + source.setEventHandler(handler: { + if let output = self.videoOutput { + let currentHostTime = CACurrentMediaTime() + let currentItemTime = output.itemTime(forHostTime: currentHostTime) + self.outputFrame(itemTimestamp: currentItemTime) + } + }) + + // Thread safe cleanup of temporary storage, in case of cancellation. + source.setCancelHandler(handler: { + }) + + // Schedule a first time source for the full interval. + let deadline = DispatchTime.now() + ExampleAVPlayerSource.kFrameOutputInterval + source.schedule(deadline: deadline, + repeating: ExampleAVPlayerSource.kFrameOutputInterval, + leeway: ExampleAVPlayerSource.kFrameOutputLeeway) + source.resume() + } + + func addDisplayTimer() { + let timer = CADisplayLink(target: self, + selector: #selector(ExampleAVPlayerSource.displayLinkDidFire(displayLink:))) + // Fire at the native v-sync cadence of our display. This is what AVPlayer is targeting anyways. + timer.preferredFramesPerSecond = 0 + timer.isPaused = true + timer.add(to: RunLoop.current, forMode: RunLoop.Mode.common) + outputTimer = timer + } + + @objc func displayLinkDidFire(displayLink: CADisplayLink) { + if let output = self.videoOutput { + // We want the video content targeted for the next v-sync. + let targetHostTime = displayLink.targetTimestamp + let currentItemTime = output.itemTime(forHostTime: targetHostTime) + self.outputFrame(itemTimestamp: currentItemTime) + } + } + + @objc func stopTimerSource() { + print(#function) + + timerSource?.cancel() + timerSource = nil + } + + func stopDisplayTimer() { + outputTimer?.invalidate() + outputTimer = nil + } + + public var isScreencast: Bool { + get { + return false + } + } + + public var supportedFormats: [TVIVideoFormat] { + get { + let format = TVIVideoFormat() + format.dimensions = CMVideoDimensions(width: 640, height: 360) + format.frameRate = 30 + format.pixelFormat = TVIPixelFormat.formatYUV420BiPlanarFullRange + return [format] + } + } + + func startCapture(_ format: TVIVideoFormat, consumer: TVIVideoCaptureConsumer) { + print(#function) + + self.captureConsumer = consumer; + consumer.captureDidStart(true) + } + + func stopCapture() { + print(#function) + + if ExampleAVPlayerSource.useDisplayLinkTimer { + stopDisplayTimer() + } else { + stopTimerSource() + } + self.captureConsumer = nil + } +} + +extension ExampleAVPlayerSource: AVPlayerItemOutputPullDelegate { + + func outputMediaDataWillChange(_ sender: AVPlayerItemOutput) { + print(#function) + + // Begin to receive video frames. + let videoOutput = sender as! AVPlayerItemVideoOutput + let currentHostTime = CACurrentMediaTime() + let currentItemTime = videoOutput.itemTime(forHostTime: currentHostTime) + + // We might have been called back so late that the output already has a frame ready. + let hasFrame = videoOutput.hasNewPixelBuffer(forItemTime: currentItemTime) + if hasFrame { + outputFrame(itemTimestamp: currentItemTime) + } else if ExampleAVPlayerSource.useDisplayLinkTimer { + outputTimer?.isPaused = false + } else { + startTimerSource(hostTime: currentHostTime); + } + } + + func outputSequenceWasFlushed(_ output: AVPlayerItemOutput) { + print(#function) + + // TODO: Flush and output a black frame while we wait? + } +} diff --git a/CoViewingExample/ExampleAVPlayerView.swift b/CoViewingExample/ExampleAVPlayerView.swift new file mode 100644 index 00000000..12a9acdb --- /dev/null +++ b/CoViewingExample/ExampleAVPlayerView.swift @@ -0,0 +1,54 @@ +// +// ExampleAVPlayerView.swift +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +import AVFoundation +import UIKit + +class ExampleAVPlayerView: UIView { + + init(frame: CGRect, player: AVPlayer) { + super.init(frame: frame) + self.playerLayer.player = player + self.contentMode = .scaleAspectFit + } + + required init?(coder aDecoder: NSCoder) { + super.init(coder: aDecoder) + // It won't be possible to hookup an AVPlayer yet. + self.contentMode = .scaleAspectFit + } + + var playerLayer : AVPlayerLayer { + get { + return self.layer as! AVPlayerLayer + } + } + + override var contentMode: UIView.ContentMode { + set { + switch newValue { + case .scaleAspectFill: + playerLayer.videoGravity = .resizeAspectFill + case .scaleAspectFit: + playerLayer.videoGravity = .resizeAspect + case .scaleToFill: + playerLayer.videoGravity = .resize + default: + playerLayer.videoGravity = .resizeAspect + } + super.contentMode = newValue + } + + get { + return super.contentMode + } + } + + override class var layerClass : AnyClass { + return AVPlayerLayer.self + } +} diff --git a/CoViewingExample/Info.plist b/CoViewingExample/Info.plist new file mode 100644 index 00000000..9e96ba8f --- /dev/null +++ b/CoViewingExample/Info.plist @@ -0,0 +1,90 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleDocumentTypes + + + CFBundleTypeIconFiles + + CFBundleTypeName + mpeg4 + LSHandlerRank + Default + LSItemContentTypes + + public.mpeg-4 + + + + CFBundleTypeIconFiles + + CFBundleTypeName + quicktime + LSHandlerRank + Default + LSItemContentTypes + + com.apple.quicktime-movie + + + + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + LSRequiresIPhoneOS + + NSAppTransportSecurity + + NSAllowsArbitraryLoadsForMedia + + + NSCameraUsageDescription + ${PRODUCT_NAME} uses your camera to capture video which is shared with other Viewers. + NSMicrophoneUsageDescription + ${PRODUCT_NAME} shares your microphone with other Viewers. Tap to mute your audio at any time. + UIBackgroundModes + + audio + voip + + UILaunchStoryboardName + LaunchScreen + UIMainStoryboardFile + Main + UIRequiredDeviceCapabilities + + armv7 + + UIRequiresPersistentWiFi + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + LSSupportsOpeningDocumentsInPlace + + + diff --git a/CoViewingExample/ViewController.swift b/CoViewingExample/ViewController.swift new file mode 100644 index 00000000..77d51631 --- /dev/null +++ b/CoViewingExample/ViewController.swift @@ -0,0 +1,749 @@ +// +// ViewController.swift +// CoViewingExample +// +// Copyright © 2018 Twilio Inc. All rights reserved. +// + +import AVFoundation +import UIKit + +class ViewController: UIViewController { + + // MARK: View Controller Members + + // Configure access token manually for testing, if desired! Create one manually in the console + // at https://www.twilio.com/console/video/runtime/testing-tools + var accessToken = "TWILIO_ACCESS_TOKEN" + + // Configure remote URL to fetch token from + var tokenUrl = "http://localhost:8000/token.php" + + // Twilio Video classes. + var room: TVIRoom? + var camera: TVICameraCapturer? + var localVideoTrack: TVILocalVideoTrack! + var playerVideoTrack: TVILocalVideoTrack? + var localAudioTrack: TVILocalAudioTrack! + + // How long we will spend in pre-roll, attempting to synchronize our AVPlayer and AudioUnit graph. + static let kPrerollDuration = Double(1.0) + static let kPlayerTrackName = "player-track" + + // AVPlayer Audio/Video. + var audioDevice: ExampleAVPlayerAudioDevice? + var videoPlayer: AVPlayer? = nil + var videoPlayerAudioTap: ExampleAVPlayerAudioTap? = nil + var videoPlayerSource: ExampleAVPlayerSource? = nil + var videoPlayerView: ExampleAVPlayerView? = nil + var videoPlayerUrl: URL? = nil + var videoPlayerPreroll: Bool = false + + var isPresenter: Bool? + + @IBOutlet weak var localHeightConstraint: NSLayoutConstraint? + @IBOutlet weak var localWidthConstraint: NSLayoutConstraint? + @IBOutlet weak var remoteHeightConstraint: NSLayoutConstraint? + @IBOutlet weak var remoteWidthConstraint: NSLayoutConstraint? + + @IBOutlet weak var hangupButton: UIButton! + @IBOutlet weak var presenterButton: UIButton! + @IBOutlet weak var viewerButton: UIButton! + + @IBOutlet weak var localView: TVIVideoView! + weak var remotePlayerView: TVIVideoView? + @IBOutlet weak var remoteView: TVIVideoView! + + static let kRemoteContentUrls = [ + // Nice stereo separation in the trailer music. We now record and playback in stereo. + "American Animals Trailer 2 (720p24, 44.1 kHz)" : URL(string: "http://movietrailers.apple.com/movies/independent/american-animals/american-animals-trailer-2_h720p.mov")!, + "Avengers: Infinity War Trailer 3 (720p24, 44.1 kHz)" : URL(string: "https://trailers.apple.com/movies/marvel/avengers-infinity-war/avengers-infinity-war-trailer-2_h720p.mov")!, + // HLS stream which runs into the AVPlayer / AVAudioMix issue. + "BitDash - Parkour (HLS)" : URL(string: "https://bitdash-a.akamaihd.net/content/MI201109210084_1/m3u8s/f08e80da-bf1d-4e3d-8899-f0f6155f6efa.m3u8")!, + // 540p variant taken directly from the master playlist above. Still shows the AVPlayer issue. + "BitDash - Parkour (HLS, 540p)" : URL(string: "https://bitdash-a.akamaihd.net/content/MI201109210084_1/m3u8s/f08e80da-bf1d-4e3d-8899-f0f6155f6efa_video_540_1200000.m3u8")!, + // Progressive download mp4 version. Demonstrates that 48 kHz support is incorrect right now. + "BitDash - Parkour (1080p25, 48 kHz)" : URL(string: "https://bitmovin-a.akamaihd.net/content/MI201109210084_1/MI201109210084_mpeg-4_hd_high_1080p25_10mbits.mp4")!, + // Encoding in 1080p takes significantly more CPU than 720p + "Interstellar Trailer 3 (720p24, 44.1 kHz)" : URL(string: "http://movietrailers.apple.com/movies/paramount/interstellar/interstellar-tlr4_h720p.mov")!, + "Interstellar Trailer 3 (1080p24, 44.1 kHz)" : URL(string: "http://movietrailers.apple.com/movies/paramount/interstellar/interstellar-tlr4_h1080p.mov")!, + // Most trailers have a lot of cuts... this one not as many + "Mississippi Grind (720p24, 44.1 kHz)" : URL(string: "http://movietrailers.apple.com/movies/independent/mississippigrind/mississippigrind-tlr1_h1080p.mov")!, + // HLS stream which runs into the AVPlayer / AVAudioMix issue. + "Tele Quebec (HLS)" : URL(string: "https://mnmedias.api.telequebec.tv/m3u8/29880.m3u8")!, + // Video only source, but at 30 fps which is the max frame rate that we can capture. + "Telecom ParisTech, GPAC (720p30)" : URL(string: "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_720p30_2M.mp4")!, + "Telecom ParisTech, GPAC (1080p30)" : URL(string: "https://download.tsi.telecom-paristech.fr/gpac/dataset/dash/uhd/mux_sources/hevcds_1080p30_6M.mp4")!, + "Twilio: What is Cloud Communications? (1080p24, 44.1 kHz)" : URL(string: "https://s3-us-west-1.amazonaws.com/avplayervideo/What+Is+Cloud+Communications.mov")! + ] + static let kRemoteContentURL = kRemoteContentUrls["Mississippi Grind (720p24, 44.1 kHz)"]! + + override func viewDidLoad() { + super.viewDidLoad() + + let red = UIColor(red: 226.0/255.0, + green: 29.0/255.0, + blue: 37.0/255.0, + alpha: 1.0) + + presenterButton.backgroundColor = red + self.hangupButton.backgroundColor = red + self.hangupButton.titleLabel?.textColor = UIColor.white + self.hangupButton.isHidden = true + + presenterButton.layer.cornerRadius = 4; + viewerButton.layer.cornerRadius = 4; + hangupButton.layer.cornerRadius = 2; + + self.localView.contentMode = UIView.ContentMode.scaleAspectFit + self.localView.delegate = self + self.localWidthConstraint = self.localView.constraints.first + self.localHeightConstraint = self.localView.constraints.last + self.remoteView.contentMode = UIView.ContentMode.scaleAspectFit + self.remoteView.delegate = self + self.remoteHeightConstraint = self.remoteView.constraints.first + self.remoteWidthConstraint = self.remoteView.constraints.last + + if let videoUrl = videoPlayerUrl { + startPresenter(contentUrl: videoUrl) + } + } + + override func viewWillAppear(_ animated: Bool) { + super.viewWillAppear(animated) + } + + override func viewWillLayoutSubviews() { + super.viewWillLayoutSubviews() + + if let playerView = videoPlayerView { + playerView.frame = CGRect(origin: CGPoint.zero, size: self.view.bounds.size) + } + if let remotePlayerView = remotePlayerView { + remotePlayerView.frame = CGRect(origin: CGPoint.zero, size: self.view.bounds.size) + } + } + + override func updateViewConstraints() { + super.updateViewConstraints() + + if self.localView.hasVideoData { + let localDimensions = self.localView.videoDimensions + if localDimensions.width > localDimensions.height { + self.localWidthConstraint?.constant = 128 + self.localHeightConstraint?.constant = 96 + } else { + self.localWidthConstraint?.constant = 96 + self.localHeightConstraint?.constant = 128 + } + } + + if self.remoteView.hasVideoData { + let remoteDimensions = self.remoteView.videoDimensions + if remoteDimensions.width > remoteDimensions.height { + self.remoteWidthConstraint?.constant = 128 + self.remoteHeightConstraint?.constant = 96 + } else { + self.remoteWidthConstraint?.constant = 96 + self.remoteHeightConstraint?.constant = 128 + } + } + } + + override var prefersHomeIndicatorAutoHidden: Bool { + get { + return self.room != nil + } + } + + override var prefersStatusBarHidden: Bool { + get { + return self.room != nil + } + } + + @IBAction func startPresenter(_ sender: Any) { + startPresenter(contentUrl: ViewController.kRemoteContentURL) + } + + public func startPresenter(contentUrl: URL) { + videoPlayerUrl = contentUrl + if self.isViewLoaded == false { + return + } + + if self.audioDevice == nil { + let device = ExampleAVPlayerAudioDevice() + TwilioVideo.audioDevice = device + self.audioDevice = device + } + isPresenter = true + connect(name: "presenter") + } + + @IBAction func startViewer(_ sender: Any) { + self.audioDevice = nil + TwilioVideo.audioDevice = TVIDefaultAudioDevice() + isPresenter = false + connect(name: "viewer") + } + + @IBAction func hangup(_ sender: Any) { + self.room?.disconnect() + } + + func logMessage(messageText: String) { + print(messageText) + } + + func connect(name: String) { + // Configure access token either from server or manually. + // If the default wasn't changed, try fetching from server. + if (accessToken == "TWILIO_ACCESS_TOKEN") { + let urlStringWithRole = tokenUrl + "?identity=" + name + do { + accessToken = try String(contentsOf:URL(string: urlStringWithRole)!) + } catch { + let message = "Failed to fetch access token" + print(message) + return + } + } + + // Prepare local media which we will share with Room Participants. + self.prepareLocalMedia() + // Preparing the connect options with the access token that we fetched (or hardcoded). + let connectOptions = TVIConnectOptions.init(token: accessToken) { (builder) in + + // Use the local media that we prepared earlier. + builder.videoTracks = self.localVideoTrack != nil ? [self.localVideoTrack!] : [] + builder.audioTracks = self.localAudioTrack != nil ? [self.localAudioTrack!] : [TVILocalAudioTrack]() + + // The name of the Room where the Client will attempt to connect to. Please note that if you pass an empty + // Room `name`, the Client will create one for you. You can get the name or sid from any connected Room. + builder.roomName = "twilio" + + // Restrict video bandwidth used by viewers to improve presenter video. Use more bandwidth for presenter audio. + if name == "viewer" { + builder.encodingParameters = TVIEncodingParameters(audioBitrate: 0, videoBitrate: 1024 * 900) + } else { + builder.encodingParameters = TVIEncodingParameters(audioBitrate: 1024 * 96, videoBitrate: 0) + } + } + + // Connect to the Room using the options we provided. + room = TwilioVideo.connect(with: connectOptions, delegate: self) + print("Attempting to connect to:", connectOptions.roomName as Any) + + self.showRoomUI(inRoom: true) + } + + func prepareLocalMedia() { + // All Participants share local audio and video when they connect to the Room. + // Create an audio track. + if (localAudioTrack == nil) { + localAudioTrack = TVILocalAudioTrack.init() + + if (localAudioTrack == nil) { + print("Failed to create audio track") + } + } + + // Create a camera video Track. + #if !targetEnvironment(simulator) + if (localVideoTrack == nil) { + // Preview our local camera track in the local video preview view. + + camera = TVICameraCapturer(source: .frontCamera, delegate: nil) + let constraints = TVIVideoConstraints.init { (builder) in + builder.maxSize = TVIVideoConstraintsSize480x360 + builder.maxFrameRate = TVIVideoConstraintsFrameRate24 + } + + localVideoTrack = TVILocalVideoTrack(capturer: camera!, + enabled: true, + constraints: constraints, + name: "camera") + localVideoTrack.addRenderer(self.localView) + // We use the front facing camera only. Set mirroring each time since the renderer might be reused. + localView.shouldMirror = true + } + #else + localAudioTrack.isEnabled = false + #endif + } + + func showRoomUI(inRoom: Bool) { + self.hangupButton.isHidden = !inRoom + self.localView.isHidden = !inRoom + self.remoteView.isHidden = !inRoom + self.presenterButton.isHidden = inRoom + self.viewerButton.isHidden = inRoom + self.setNeedsUpdateOfHomeIndicatorAutoHidden() + self.setNeedsStatusBarAppearanceUpdate() + UIApplication.shared.isIdleTimerDisabled = inRoom + + if inRoom == false { + UIView.animate(withDuration: 0.2) { + self.view.backgroundColor = .white + } + } + } + + func startVideoPlayer() { + if let player = self.videoPlayer { + player.play() + return + } + + let asset = AVAsset(url: videoPlayerUrl!) + let assetKeysToPreload = [ + "hasProtectedContent", + "playable", + "tracks" + ] + print("Created asset with tracks:", asset.tracks as Any) + + let playerItem = AVPlayerItem(asset: asset, automaticallyLoadedAssetKeys: assetKeysToPreload) + // Prevent excessive resource usage when the content is HLS. We will downscale large progressively streamed content. + playerItem.preferredMaximumResolution = ExampleAVPlayerSource.kFrameOutputMaxRect.size + // Register as an observer of the player item's status property + playerItem.addObserver(self, + forKeyPath: #keyPath(AVPlayerItem.status), + options: [.old, .new], + context: nil) + + playerItem.addObserver(self, + forKeyPath: #keyPath(AVPlayerItem.tracks), + options: [.old, .new], + context: nil) + + let player = AVPlayer(playerItem: playerItem) + player.volume = Float(0) + player.automaticallyWaitsToMinimizeStalling = false + + var audioClock: CMClock? = nil + let status = CMAudioClockCreate(allocator: nil, clockOut: &audioClock) + if (status == noErr) { + player.masterClock = audioClock; + } + videoPlayer = player + + let playerView = ExampleAVPlayerView(frame: CGRect.zero, player: player) + videoPlayerView = playerView + + let tapRecognizer = UITapGestureRecognizer(target: self, action: #selector(handlePlayerTap)) + tapRecognizer.numberOfTapsRequired = 2 + videoPlayerView?.addGestureRecognizer(tapRecognizer) + + // We will rely on frame based layout to size and position `self.videoPlayerView`. + self.view.insertSubview(playerView, at: 0) + self.view.setNeedsLayout() + UIView.animate(withDuration: 0.2) { + self.view.backgroundColor = UIColor.black + } + } + + @objc func handlePlayerTap(recognizer: UITapGestureRecognizer) { + if let view = self.videoPlayerView { + view.contentMode = view.contentMode == .scaleAspectFit ? .scaleAspectFill : .scaleAspectFit + } + } + + func setupRemoteVideoPlayer(videoTrack: TVIRemoteVideoTrack) { + guard let view = TVIVideoView(frame: self.view.bounds, delegate: nil) else { + return + } + view.contentMode = UIView.ContentMode.scaleAspectFit + videoTrack.addRenderer(view) + self.remotePlayerView = view + self.view.insertSubview(view, at: 0) + self.view.setNeedsLayout() + UIView.animate(withDuration: 0.2) { + self.view.backgroundColor = UIColor.black + } + } + + func setupVideoSource(item: AVPlayerItem) { + videoPlayerSource = ExampleAVPlayerSource(item: item) + + // Create and publish video track. + if let track = TVILocalVideoTrack(capturer: videoPlayerSource!, + enabled: true, + constraints: nil, + name: ViewController.kPlayerTrackName) { + playerVideoTrack = track + self.room!.localParticipant!.publishVideoTrack(track) + } + } + + func setupAudioMix(player: AVPlayer, playerItem: AVPlayerItem) { + guard let audioAssetTrack = firstAudioAssetTrack(playerItem: playerItem) else { + return + } + print("Setup audio mix with AudioAssetTrack, Id:", audioAssetTrack.trackID as Any, "\n", + "Asset:", audioAssetTrack.asset as Any, "\n", + "Audio Fallbacks:", audioAssetTrack.associatedTracks(ofType: AVAssetTrack.AssociationType.audioFallback), "\n", + "isPlayable:", audioAssetTrack.isPlayable) + + let audioMix = AVMutableAudioMix() + + let inputParameters = AVMutableAudioMixInputParameters(track: audioAssetTrack) + // TODO: Is memory management of the MTAudioProcessingTap correct? + inputParameters.audioTapProcessor = audioDevice!.createProcessingTap()?.takeUnretainedValue() + audioMix.inputParameters = [inputParameters] + playerItem.audioMix = audioMix + } + + func firstAudioAssetTrack(playerItem: AVPlayerItem) -> AVAssetTrack? { + var audioAssetTracks: [AVAssetTrack] = [] + for playerItemTrack in playerItem.tracks { + if let assetTrack = playerItemTrack.assetTrack, + assetTrack.mediaType == AVMediaType.audio { + audioAssetTracks.append(assetTrack) + } + } + return audioAssetTracks.first + } + + func updateAudioMixParameters(playerItem: AVPlayerItem) { + // Update the audio mix to point to the first AVAssetTrack that we find. + if let audioAssetTrack = firstAudioAssetTrack(playerItem: playerItem), + let inputParameters = playerItem.audioMix?.inputParameters.first { + let mutableInputParameters = inputParameters as! AVMutableAudioMixInputParameters + mutableInputParameters.trackID = audioAssetTrack.trackID + print("Update the mix input parameters to use Track Id:", audioAssetTrack.trackID as Any, "\n", + "Asset:", audioAssetTrack.asset as Any, "\n", + "Audio Fallbacks:", audioAssetTrack.associatedTracks(ofType: AVAssetTrack.AssociationType.audioFallback), "\n", + "isPlayable:", audioAssetTrack.isPlayable) + } else { + // TODO + } + } + + func stopVideoPlayer() { + print(#function) + + videoPlayer?.pause() + videoPlayer?.currentItem?.removeObserver(self, forKeyPath: #keyPath(AVPlayerItem.status)) + videoPlayer?.currentItem?.removeObserver(self, forKeyPath: #keyPath(AVPlayerItem.tracks)) + videoPlayer?.currentItem?.remove((videoPlayerSource?.videoOutput)!) + videoPlayer?.currentItem?.audioMix = nil + videoPlayer?.replaceCurrentItem(with: nil) + videoPlayer = nil + + // TODO: Unpublish player video. + + // Remove player UI + videoPlayerView?.removeFromSuperview() + videoPlayerView = nil + } + + func prerollVideoPlayer() { + print("Preparing to play asset with Tracks:", videoPlayer?.currentItem?.asset.tracks as Any) + + videoPlayerPreroll = true + videoPlayer?.preroll(atRate: 1.0, completionHandler: { (success) in + if (success) { + // Start audio and video playback at a time synchronized with both parties. + // let now = CMClockGetTime(CMClockGetHostTimeClock()) + let now = CMClockGetTime((self.videoPlayer?.masterClock)!) + let start = now + CMTime(seconds: ViewController.kPrerollDuration, preferredTimescale: now.timescale) + + let audioAssetTrack = self.firstAudioAssetTrack(playerItem: (self.videoPlayer?.currentItem)!) + var range = CMTimeRange.invalid + if let assetTrack = audioAssetTrack { + range = assetTrack.timeRange + } + + print("Pre-roll success for item:", self.videoPlayer?.currentItem as Any, "\n", + "Current time:", self.videoPlayer?.currentItem?.currentTime() as Any, "\n", + "Audio asset range:", range as Any, "\n", + "\nStarting at:", start.seconds) + self.videoPlayer?.setRate(1.0, time: CMTime.invalid, atHostTime: start) + self.audioDevice?.startAudioTap(at: start) + } else { + print("Pre-roll failed, waiting to try again ...") + self.videoPlayerPreroll = false + } + }) + } + + override func observeValue(forKeyPath keyPath: String?, + of object: Any?, + change: [NSKeyValueChangeKey : Any]?, + context: UnsafeMutableRawPointer?) { + + if keyPath == #keyPath(AVPlayerItem.status) { + let status: AVPlayerItem.Status + + // Get the status change from the change dictionary + if let statusNumber = change?[.newKey] as? NSNumber { + status = AVPlayerItem.Status(rawValue: statusNumber.intValue)! + } else { + status = .unknown + } + + // Switch over the status + switch status { + case .readyToPlay: + // Player item is ready to play. + print("Ready to play asset.") + // Defer video source setup until we've loaded the asset so that we can determine downscaling for progressive streaming content. + if self.videoPlayerSource == nil { + setupVideoSource(item: object as! AVPlayerItem) + } + + if videoPlayer?.rate == 0 && + videoPlayerPreroll == false { + self.prerollVideoPlayer() + } + break + case .failed: + // Player item failed. See error. + // TODO: Show in the UI. + print("Playback failed with error:", videoPlayer?.currentItem?.error as Any) + break + case .unknown: + // Player item is not yet ready. + print("Player item status is unknown.") + break + } + } else if keyPath == #keyPath(AVPlayerItem.tracks) { + let playerItem = object as! AVPlayerItem + print("Player item tracks are:", playerItem.tracks as Any) + + // Configure our audio capturer to receive audio samples from the AVPlayerItem. + if playerItem.audioMix == nil, + firstAudioAssetTrack(playerItem: playerItem) != nil { + setupAudioMix(player: videoPlayer!, playerItem: playerItem) + } else { + // TODO: Possibly update the existing mix for HLS? + // This doesn't seem to fix the tap bug, nor does deferring mix creation. +// updateAudioMixParameters(playerItem: playerItem) + } + } + } +} + +// MARK: TVIRoomDelegate +extension ViewController : TVIRoomDelegate { + func didConnect(to room: TVIRoom) { + + // Listen to events from existing `TVIRemoteParticipant`s + for remoteParticipant in room.remoteParticipants { + remoteParticipant.delegate = self + } + + if (room.remoteParticipants.count > 0 && self.isPresenter!) { + stopVideoPlayer() + startVideoPlayer() + } + + let connectMessage = "Connected to room \(room.name) as \(room.localParticipant?.identity ?? "")." + logMessage(messageText: connectMessage) + + self.showRoomUI(inRoom: true) + } + + func room(_ room: TVIRoom, didDisconnectWithError error: Error?) { + if let disconnectError = error { + logMessage(messageText: "Disconnected from \(room.name).\ncode = \((disconnectError as NSError).code) error = \(disconnectError.localizedDescription)") + } else { + logMessage(messageText: "Disconnected from \(room.name)") + } + + stopVideoPlayer() + self.localVideoTrack = nil + self.localAudioTrack = nil + self.playerVideoTrack = nil + self.videoPlayerSource = nil + self.room = nil + self.showRoomUI(inRoom: false) + self.accessToken = "TWILIO_ACCESS_TOKEN" + } + + func room(_ room: TVIRoom, didFailToConnectWithError error: Error) { + logMessage(messageText: "Failed to connect to Room:\n\(error.localizedDescription)") + + self.room = nil + self.localVideoTrack = nil + self.localAudioTrack = nil + self.showRoomUI(inRoom: false) + self.accessToken = "TWILIO_ACCESS_TOKEN" + } + + func room(_ room: TVIRoom, participantDidConnect participant: TVIRemoteParticipant) { + participant.delegate = self + + logMessage(messageText: "Participant \(participant.identity) connected with \(participant.remoteAudioTracks.count) audio and \(participant.remoteVideoTracks.count) video tracks") + + if (room.remoteParticipants.count == 1 && self.isPresenter!) { + stopVideoPlayer() + startVideoPlayer() + } + } + + func room(_ room: TVIRoom, participantDidDisconnect participant: TVIRemoteParticipant) { + logMessage(messageText: "Room \(room.name), Participant \(participant.identity) disconnected") + } +} + +// MARK: TVIRemoteParticipantDelegate +extension ViewController : TVIRemoteParticipantDelegate { + + func remoteParticipant(_ participant: TVIRemoteParticipant, + publishedVideoTrack publication: TVIRemoteVideoTrackPublication) { + + // Remote Participant has offered to share the video Track. + + logMessage(messageText: "Participant \(participant.identity) published \(publication.trackName) video track") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + unpublishedVideoTrack publication: TVIRemoteVideoTrackPublication) { + + // Remote Participant has stopped sharing the video Track. + + logMessage(messageText: "Participant \(participant.identity) unpublished \(publication.trackName) video track") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + publishedAudioTrack publication: TVIRemoteAudioTrackPublication) { + + // Remote Participant has offered to share the audio Track. + + logMessage(messageText: "Participant \(participant.identity) published \(publication.trackName) audio track") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + unpublishedAudioTrack publication: TVIRemoteAudioTrackPublication) { + + // Remote Participant has stopped sharing the audio Track. + + logMessage(messageText: "Participant \(participant.identity) unpublished \(publication.trackName) audio track") + } + + func subscribed(to videoTrack: TVIRemoteVideoTrack, + publication: TVIRemoteVideoTrackPublication, + for participant: TVIRemoteParticipant) { + + // We are subscribed to the remote Participant's video Track. We will start receiving the + // remote Participant's video frames now. + + logMessage(messageText: "Subscribed to \(publication.trackName) video track for Participant \(participant.identity)") + + // Start remote rendering. + if (videoTrack.name == ViewController.kPlayerTrackName) { + setupRemoteVideoPlayer(videoTrack: videoTrack) + } else { + videoTrack.addRenderer(self.remoteView) + } + } + + func unsubscribed(from videoTrack: TVIRemoteVideoTrack, + publication: TVIRemoteVideoTrackPublication, + for participant: TVIRemoteParticipant) { + + // We are unsubscribed from the remote Participant's video Track. We will no longer receive the + // remote Participant's video. + + logMessage(messageText: "Unsubscribed from \(publication.trackName) video track for Participant \(participant.identity)") + + let renderers = videoTrack.renderers + let hasRemotePlayerView = renderers.contains { (renderer) -> Bool in + return renderer.isEqual(self.remotePlayerView) + } + let hasRemoteView = renderers.contains { (renderer) -> Bool in + return renderer.isEqual(self.remoteView) + } + + // Stop remote rendering. + if hasRemotePlayerView, + let playerView = self.remotePlayerView { + videoTrack.removeRenderer(playerView) + playerView.removeFromSuperview() + self.remotePlayerView = nil + } else if hasRemoteView { + videoTrack.removeRenderer(self.remoteView) + } + } + + func subscribed(to audioTrack: TVIRemoteAudioTrack, + publication: TVIRemoteAudioTrackPublication, + for participant: TVIRemoteParticipant) { + + // We are subscribed to the remote Participant's audio Track. We will start receiving the + // remote Participant's audio now. + + logMessage(messageText: "Subscribed to \(publication.trackName) audio track for Participant \(participant.identity)") + } + + func unsubscribed(from audioTrack: TVIRemoteAudioTrack, + publication: TVIRemoteAudioTrackPublication, + for participant: TVIRemoteParticipant) { + + // We are unsubscribed from the remote Participant's audio Track. We will no longer receive the + // remote Participant's audio. + + logMessage(messageText: "Unsubscribed from \(publication.trackName) audio track for Participant \(participant.identity)") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + enabledVideoTrack publication: TVIRemoteVideoTrackPublication) { + logMessage(messageText: "Participant \(participant.identity) enabled \(publication.trackName) video track") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + disabledVideoTrack publication: TVIRemoteVideoTrackPublication) { + logMessage(messageText: "Participant \(participant.identity) disabled \(publication.trackName) video track") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + enabledAudioTrack publication: TVIRemoteAudioTrackPublication) { + logMessage(messageText: "Participant \(participant.identity) enabled \(publication.trackName) audio track") + } + + func remoteParticipant(_ participant: TVIRemoteParticipant, + disabledAudioTrack publication: TVIRemoteAudioTrackPublication) { + // We will continue to record silence and/or recognize audio while a Track is disabled. + logMessage(messageText: "Participant \(participant.identity) disabled \(publication.trackName) audio track") + } + + func failedToSubscribe(toAudioTrack publication: TVIRemoteAudioTrackPublication, + error: Error, + for participant: TVIRemoteParticipant) { + logMessage(messageText: "FailedToSubscribe \(publication.trackName) audio track, error = \(String(describing: error))") + } + + func failedToSubscribe(toVideoTrack publication: TVIRemoteVideoTrackPublication, + error: Error, + for participant: TVIRemoteParticipant) { + logMessage(messageText: "FailedToSubscribe \(publication.trackName) video track, error = \(String(describing: error))") + } +} + +extension ViewController : TVICameraCapturerDelegate { + func cameraCapturer(_ capturer: TVICameraCapturer, didStartWith source: TVICameraCaptureSource) { + // Layout the camera preview with dimensions appropriate for our orientation. + self.view.setNeedsLayout() + } + + func cameraCapturer(_ capturer: TVICameraCapturer, didFailWithError error: Error) { + logMessage(messageText: "Capture failed with error.\ncode = \((error as NSError).code) error = \(error.localizedDescription)") + capturer.previewView.removeFromSuperview() + } +} + +extension ViewController : TVIVideoViewDelegate { + func videoViewDidReceiveData(_ view: TVIVideoView) { + if view == self.localView || view == self.remoteView { + self.view.setNeedsUpdateConstraints() + } + } + func videoView(_ view: TVIVideoView, videoDimensionsDidChange dimensions: CMVideoDimensions) { + if view == self.localView || view == self.remoteView { + self.view.setNeedsUpdateConstraints() + } + } +} diff --git a/Podfile b/Podfile index 55961cac..e9f76caa 100644 --- a/Podfile +++ b/Podfile @@ -20,6 +20,13 @@ abstract_target 'TwilioVideo' do project 'AudioSinkExample.xcproject' end + target 'CoViewingExample' do + platform :ios, '11.0' + project 'CoViewingExample.xcproject' + + pod 'TPCircularBuffer', '~> 1.6' + end + target 'VideoQuickStart' do platform :ios, '9.0' project 'VideoQuickStart.xcproject'