mirror of https://github.com/oxen-io/session-ios
				
				
				
			Garther audio concerns, clean up session when done
- sync speakerphone state manipulated from system call screen
  - Revert audio session after call failure, ensures media plays out of
    speaker after placing a failing call.
  - Replace notification with delegate pattern since we're already using
    delegate pattern here.
- Fixes voiceover accessibility after voice memo
- Avoid audio blip after pressing hangup
- Rename CallAudioSession -> OWSAudioSession
  Going to start using it for other non-call things since we want to
  gather all our audio session concerns.
- Resume background audio when done playing video
  - Extract OWSVideoPlayer which ensures audio is in proper state before
    playback
  - Move recording session logic to shared OWSAudioSession
  - Deactivate audio session when complete
// FREEBIE
			
			
				pull/1/head
			
			
		
							parent
							
								
									fa9ac5aa46
								
							
						
					
					
						commit
						c646f76335
					
				@ -1,51 +0,0 @@
 | 
			
		||||
//
 | 
			
		||||
//  Copyright (c) 2017 Open Whisper Systems. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
import Foundation
 | 
			
		||||
import WebRTC
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * By default WebRTC starts the audio session (PlayAndRecord) immediately upon creating the peer connection
 | 
			
		||||
 * but we want to create the peer connection and set up all the signaling channels before we prompt the user
 | 
			
		||||
 * for an incoming call. Without manually handling the session, this would result in the user seeing a recording
 | 
			
		||||
 * permission requested (and recording banner) before they even know they have an incoming call.
 | 
			
		||||
 *
 | 
			
		||||
 * By using the `useManualAudio` and `isAudioEnabled` attributes of the RTCAudioSession we can delay recording until
 | 
			
		||||
 * it makes sense.
 | 
			
		||||
 */
 | 
			
		||||
class CallAudioSession {
 | 
			
		||||
 | 
			
		||||
    let TAG = "[CallAudioSession]"
 | 
			
		||||
 | 
			
		||||
    // Force singleton access
 | 
			
		||||
    static let shared = CallAudioSession()
 | 
			
		||||
    private init() {}
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * The private class that manages AVAudioSession for WebRTC
 | 
			
		||||
     */
 | 
			
		||||
    private let rtcAudioSession = RTCAudioSession.sharedInstance()
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
    * This must be called before any audio tracks are added to the peerConnection, else we'll start recording before all
 | 
			
		||||
    * our signaling is set up.
 | 
			
		||||
    */
 | 
			
		||||
    func configure() {
 | 
			
		||||
        Logger.info("\(TAG) in \(#function)")
 | 
			
		||||
        rtcAudioSession.useManualAudio = true
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Because we useManualAudio with our RTCAudioSession, we have to start/stop the recording audio session ourselves.
 | 
			
		||||
     * See header for details on  manual audio.
 | 
			
		||||
     */
 | 
			
		||||
    var isRTCAudioEnabled: Bool {
 | 
			
		||||
        get {
 | 
			
		||||
            return rtcAudioSession.isAudioEnabled
 | 
			
		||||
        }
 | 
			
		||||
        set {
 | 
			
		||||
            rtcAudioSession.isAudioEnabled = newValue
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -0,0 +1,72 @@
 | 
			
		||||
//
 | 
			
		||||
//  Copyright (c) 2018 Open Whisper Systems. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
import Foundation
 | 
			
		||||
import AVFoundation
 | 
			
		||||
 | 
			
		||||
@objc
 | 
			
		||||
protocol OWSVideoPlayerDelegate: class {
 | 
			
		||||
    @available(iOSApplicationExtension 9.0, *)
 | 
			
		||||
    func videoPlayerDidPlayToCompletion(_ videoPlayer: OWSVideoPlayer)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@objc
 | 
			
		||||
public class OWSVideoPlayer: NSObject {
 | 
			
		||||
 | 
			
		||||
    let avPlayer: AVPlayer
 | 
			
		||||
 | 
			
		||||
    weak var delegate: OWSVideoPlayerDelegate?
 | 
			
		||||
 | 
			
		||||
    @available(iOS 9.0, *)
 | 
			
		||||
    init(url: URL) {
 | 
			
		||||
        self.avPlayer = AVPlayer(url: url)
 | 
			
		||||
 | 
			
		||||
        super.init()
 | 
			
		||||
 | 
			
		||||
        NotificationCenter.default.addObserver(self,
 | 
			
		||||
                                               selector: #selector(playerItemDidPlayToCompletion(_:)),
 | 
			
		||||
                                               name: NSNotification.Name.AVPlayerItemDidPlayToEndTime,
 | 
			
		||||
                                               object: avPlayer.currentItem)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // MARK: Playback Controls
 | 
			
		||||
 | 
			
		||||
    @available(iOS 9.0, *)
 | 
			
		||||
    public func pause() {
 | 
			
		||||
        avPlayer.pause()
 | 
			
		||||
        OWSAudioSession.shared.endAudioActivity()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    @available(iOS 9.0, *)
 | 
			
		||||
    public func play() {
 | 
			
		||||
        OWSAudioSession.shared.setPlaybackCategory()
 | 
			
		||||
 | 
			
		||||
        guard let item = avPlayer.currentItem else {
 | 
			
		||||
            owsFail("\(logTag) video player item was unexpectedly nil")
 | 
			
		||||
            return
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if item.currentTime() == item.duration {
 | 
			
		||||
            // Rewind for repeated plays, but only if it previously played to end.
 | 
			
		||||
            avPlayer.seek(to: kCMTimeZero)
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        avPlayer.play()
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    @available(iOS 9.0, *)
 | 
			
		||||
    @objc(seekToTime:)
 | 
			
		||||
    public func seek(to time: CMTime) {
 | 
			
		||||
        avPlayer.seek(to: time)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // MARK: private
 | 
			
		||||
 | 
			
		||||
    @objc
 | 
			
		||||
    @available(iOS 9.0, *)
 | 
			
		||||
    private func playerItemDidPlayToCompletion(_ notification: Notification) {
 | 
			
		||||
        self.delegate?.videoPlayerDidPlayToCompletion(self)
 | 
			
		||||
        OWSAudioSession.shared.endAudioActivity()
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -0,0 +1,101 @@
 | 
			
		||||
//
 | 
			
		||||
//  Copyright (c) 2018 Open Whisper Systems. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
import Foundation
 | 
			
		||||
import WebRTC
 | 
			
		||||
 | 
			
		||||
@objc
 | 
			
		||||
public class OWSAudioSession: NSObject {
 | 
			
		||||
 | 
			
		||||
    // Force singleton access
 | 
			
		||||
    public static let shared = OWSAudioSession()
 | 
			
		||||
    private override init() {}
 | 
			
		||||
    private let avAudioSession = AVAudioSession.sharedInstance()
 | 
			
		||||
 | 
			
		||||
    // Ignores hardware mute switch, plays through external speaker
 | 
			
		||||
    public func setPlaybackCategory() {
 | 
			
		||||
        Logger.debug("\(logTag) in \(#function)")
 | 
			
		||||
 | 
			
		||||
        // In general, we should have put the audio session back to it's default
 | 
			
		||||
        // category when we were done with whatever activity required it to be modified
 | 
			
		||||
        assert(avAudioSession.category == AVAudioSessionCategorySoloAmbient)
 | 
			
		||||
 | 
			
		||||
        do {
 | 
			
		||||
            try avAudioSession.setCategory(AVAudioSessionCategoryPlayback)
 | 
			
		||||
        } catch {
 | 
			
		||||
            owsFail("\(logTag) in \(#function) failed with error: \(error)")
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public func setRecordCategory() -> Bool {
 | 
			
		||||
        Logger.debug("\(logTag) in \(#function)")
 | 
			
		||||
 | 
			
		||||
        // In general, we should have put the audio session back to it's default
 | 
			
		||||
        // category when we were done with whatever activity required it to be modified
 | 
			
		||||
        assert(avAudioSession.category == AVAudioSessionCategorySoloAmbient)
 | 
			
		||||
 | 
			
		||||
        assert(avAudioSession.recordPermission() == .granted)
 | 
			
		||||
 | 
			
		||||
        do {
 | 
			
		||||
            try avAudioSession.setCategory(AVAudioSessionCategoryRecord)
 | 
			
		||||
            return true
 | 
			
		||||
        } catch {
 | 
			
		||||
            owsFail("\(logTag) in \(#function) failed with error: \(error)")
 | 
			
		||||
            return false
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public func endAudioActivity() {
 | 
			
		||||
        Logger.debug("\(logTag) in \(#function)")
 | 
			
		||||
 | 
			
		||||
        do {
 | 
			
		||||
            try avAudioSession.setCategory(AVAudioSessionCategorySoloAmbient)
 | 
			
		||||
 | 
			
		||||
            // When playing audio in Signal, other apps audio (e.g. Music) is paused.
 | 
			
		||||
            // By notifying when we deactivate, the other app can resume playback.
 | 
			
		||||
            try avAudioSession.setActive(false, with: [.notifyOthersOnDeactivation])
 | 
			
		||||
        } catch {
 | 
			
		||||
            owsFail("\(logTag) in \(#function) failed with error: \(error)")
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // MARK: - WebRTC Audio
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * By default WebRTC starts the audio session (PlayAndRecord) immediately upon creating the peer connection
 | 
			
		||||
     * but we want to create the peer connection and set up all the signaling channels before we prompt the user
 | 
			
		||||
     * for an incoming call. Without manually handling the session, this would result in the user seeing a recording
 | 
			
		||||
     * permission requested (and recording banner) before they even know they have an incoming call.
 | 
			
		||||
     *
 | 
			
		||||
     * By using the `useManualAudio` and `isAudioEnabled` attributes of the RTCAudioSession we can delay recording until
 | 
			
		||||
     * it makes sense.
 | 
			
		||||
     */
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * The private class that manages AVAudioSession for WebRTC
 | 
			
		||||
     */
 | 
			
		||||
    private let rtcAudioSession = RTCAudioSession.sharedInstance()
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * This must be called before any audio tracks are added to the peerConnection, else we'll start recording before all
 | 
			
		||||
     * our signaling is set up.
 | 
			
		||||
     */
 | 
			
		||||
    public func configureRTCAudio() {
 | 
			
		||||
        Logger.info("\(logTag) in \(#function)")
 | 
			
		||||
        rtcAudioSession.useManualAudio = true
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * Because we useManualAudio with our RTCAudioSession, we have to start/stop the recording audio session ourselves.
 | 
			
		||||
     * See header for details on  manual audio.
 | 
			
		||||
     */
 | 
			
		||||
    public var isRTCAudioEnabled: Bool {
 | 
			
		||||
        get {
 | 
			
		||||
            return rtcAudioSession.isAudioEnabled
 | 
			
		||||
        }
 | 
			
		||||
        set {
 | 
			
		||||
            rtcAudioSession.isAudioEnabled = newValue
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
					Loading…
					
					
				
		Reference in New Issue