Support stereo-audio
This commit is contained in:
parent
764110b5fb
commit
d3c1dfbe50
|
@ -16,7 +16,7 @@ final class AppDelegate: UIResponder, UIApplicationDelegate {
|
|||
try session.setPreferredSampleRate(44_100)
|
||||
// https://stackoverflow.com/questions/51010390/avaudiosession-setcategory-swift-4-2-ios-12-play-sound-on-silent
|
||||
if #available(iOS 10.0, *) {
|
||||
try session.setCategory(.playAndRecord, mode: .default, options: [.allowBluetooth])
|
||||
try session.setCategory(.playAndRecord, mode: .default, options: [.defaultToSpeaker, .allowBluetooth])
|
||||
} else {
|
||||
session.perform(NSSelectorFromString("setCategory:withOptions:error:"), with: AVAudioSession.Category.playAndRecord, with: [AVAudioSession.CategoryOptions.allowBluetooth])
|
||||
try? session.setMode(.default)
|
||||
|
|
|
@ -67,14 +67,12 @@ final class LiveViewController: UIViewController {
|
|||
override func viewWillAppear(_ animated: Bool) {
|
||||
logger.info("viewWillAppear")
|
||||
super.viewWillAppear(animated)
|
||||
/*
|
||||
rtmpStream.attachAudio(AVCaptureDevice.default(for: .audio)) { error in
|
||||
logger.warn(error.description)
|
||||
}
|
||||
rtmpStream.attachCamera(DeviceUtil.device(withPosition: currentPosition)) { error in
|
||||
logger.warn(error.description)
|
||||
}
|
||||
*/
|
||||
rtmpStream.addObserver(self, forKeyPath: "currentFPS", options: .new, context: nil)
|
||||
lfView?.attachStream(rtmpStream)
|
||||
}
|
||||
|
@ -142,7 +140,7 @@ final class LiveViewController: UIViewController {
|
|||
if let data: ASObject = e.data as? ASObject, let code: String = data["code"] as? String {
|
||||
switch code {
|
||||
case RTMPConnection.Code.connectSuccess.rawValue:
|
||||
rtmpStream!.play(Preference.defaultInstance.streamName!)
|
||||
rtmpStream!.publish(Preference.defaultInstance.streamName!)
|
||||
// sharedObject!.connect(rtmpConnection)
|
||||
default:
|
||||
break
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
struct Preference {
|
||||
static var defaultInstance: Preference = Preference()
|
||||
|
||||
var uri: String? = "rtmp://test:test@192.168.11.15/vod"
|
||||
var streamName: String? = "sample-mono.mp4"
|
||||
var uri: String? = "rtmp://test:test@192.168.11.15/live"
|
||||
var streamName: String? = "live"
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ import AVFoundation
|
|||
|
||||
protocol AudioConverterDelegate: class {
|
||||
func didSetFormatDescription(audio formatDescription: CMFormatDescription?)
|
||||
func sampleOutput(audio bytes: UnsafeMutableRawPointer?, count: UInt32, presentationTimeStamp: CMTime)
|
||||
func sampleOutput(audio data: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime)
|
||||
}
|
||||
|
||||
// MARK: -
|
||||
|
@ -33,7 +33,7 @@ final class AudioConverter: NSObject {
|
|||
case .AAC:
|
||||
return UInt32(MPEG4ObjectID.AAC_LC.rawValue)
|
||||
case .PCM:
|
||||
return kAudioFormatFlagsNativeFloatPacked
|
||||
return kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked | kAudioFormatFlagIsFloat
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,15 @@ final class AudioConverter: NSObject {
|
|||
}
|
||||
}
|
||||
|
||||
var packetSize: UInt32 {
|
||||
switch self {
|
||||
case .AAC:
|
||||
return 1
|
||||
case .PCM:
|
||||
return 1024
|
||||
}
|
||||
}
|
||||
|
||||
var bitsPerChannel: UInt32 {
|
||||
switch self {
|
||||
case .AAC:
|
||||
|
@ -55,6 +64,15 @@ final class AudioConverter: NSObject {
|
|||
}
|
||||
}
|
||||
|
||||
var bytePerFrame: UInt32 {
|
||||
switch self {
|
||||
case .AAC:
|
||||
return 0
|
||||
case .PCM:
|
||||
return (bitsPerChannel / 8)
|
||||
}
|
||||
}
|
||||
|
||||
var inClassDescriptions: [AudioClassDescription] {
|
||||
switch self {
|
||||
case .AAC:
|
||||
|
@ -71,12 +89,12 @@ final class AudioConverter: NSObject {
|
|||
}
|
||||
}
|
||||
|
||||
func bytePerFrame(_ channel: UInt32) -> UInt32 {
|
||||
func mamimumBuffers(_ channel: UInt32) -> Int {
|
||||
switch self {
|
||||
case .AAC:
|
||||
return 0
|
||||
return 1
|
||||
case .PCM:
|
||||
return (bitsPerChannel / 8) * channel
|
||||
return Int(channel)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -87,9 +105,9 @@ final class AudioConverter: NSObject {
|
|||
mSampleRate: sampleRate == 0 ? inSourceFormat.mSampleRate : sampleRate,
|
||||
mFormatID: formatID,
|
||||
mFormatFlags: formatFlags,
|
||||
mBytesPerPacket: bytePerFrame(destinationChannels),
|
||||
mBytesPerPacket: bytePerFrame,
|
||||
mFramesPerPacket: framesPerPacket,
|
||||
mBytesPerFrame: bytePerFrame(destinationChannels),
|
||||
mBytesPerFrame: bytePerFrame,
|
||||
mChannelsPerFrame: destinationChannels,
|
||||
mBitsPerChannel: bitsPerChannel,
|
||||
mReserved: 0
|
||||
|
@ -223,7 +241,7 @@ final class AudioConverter: NSObject {
|
|||
currentBufferList?.unsafeMutablePointer.pointee.mBuffers.mNumberChannels = 1
|
||||
currentBufferList?.unsafeMutablePointer.pointee.mBuffers.mData = UnsafeMutableRawPointer(mutating: bytes)
|
||||
currentBufferList?.unsafeMutablePointer.pointee.mBuffers.mDataByteSize = UInt32(count)
|
||||
convert(Int(1024 * destination.bytePerFrame(1)), presentationTimeStamp: presentationTimeStamp)
|
||||
convert(Int(1024 * destination.bytePerFrame), presentationTimeStamp: presentationTimeStamp)
|
||||
}
|
||||
|
||||
func encodeSampleBuffer(_ sampleBuffer: CMSampleBuffer) {
|
||||
|
@ -266,12 +284,15 @@ final class AudioConverter: NSObject {
|
|||
private func convert(_ dataBytesSize: Int = 0, presentationTimeStamp: CMTime) {
|
||||
var finished: Bool = false
|
||||
repeat {
|
||||
var ioOutputDataPacketSize: UInt32 = 1024
|
||||
var ioOutputDataPacketSize: UInt32 = destination.packetSize
|
||||
|
||||
let outOutputData: UnsafeMutableAudioBufferListPointer = AudioBufferList.allocate(maximumBuffers: 1)
|
||||
outOutputData[0].mNumberChannels = inDestinationFormat.mChannelsPerFrame
|
||||
outOutputData[0].mDataByteSize = UInt32(dataBytesSize)
|
||||
outOutputData[0].mData = UnsafeMutableRawPointer.allocate(byteCount: dataBytesSize, alignment: 0)
|
||||
let mamimumBuffers = destination.mamimumBuffers((channels == 0) ? inSourceFormat?.mChannelsPerFrame ?? 1 : channels)
|
||||
let outOutputData: UnsafeMutableAudioBufferListPointer = AudioBufferList.allocate(maximumBuffers: mamimumBuffers)
|
||||
for i in 0..<mamimumBuffers {
|
||||
outOutputData[i].mNumberChannels = inDestinationFormat.mChannelsPerFrame
|
||||
outOutputData[i].mDataByteSize = UInt32(dataBytesSize)
|
||||
outOutputData[i].mData = UnsafeMutableRawPointer.allocate(byteCount: dataBytesSize, alignment: 0)
|
||||
}
|
||||
|
||||
let status: OSStatus = AudioConverterFillComplexBuffer(
|
||||
converter,
|
||||
|
@ -286,16 +307,16 @@ final class AudioConverter: NSObject {
|
|||
// kAudioConverterErr_InvalidInputSize: perhaps mistake. but can support macOS BuiltIn Mic #61
|
||||
case noErr, kAudioConverterErr_InvalidInputSize:
|
||||
delegate?.sampleOutput(
|
||||
audio: outOutputData[0].mData,
|
||||
count: outOutputData[0].mDataByteSize,
|
||||
audio: outOutputData,
|
||||
presentationTimeStamp: presentationTimeStamp
|
||||
)
|
||||
case -1:
|
||||
delegate?.sampleOutput(
|
||||
audio: outOutputData[0].mData,
|
||||
count: outOutputData[0].mDataByteSize,
|
||||
presentationTimeStamp: presentationTimeStamp
|
||||
)
|
||||
if destination == .PCM {
|
||||
delegate?.sampleOutput(
|
||||
audio: outOutputData,
|
||||
presentationTimeStamp: presentationTimeStamp
|
||||
)
|
||||
}
|
||||
finished = true
|
||||
default:
|
||||
finished = true
|
||||
|
@ -332,7 +353,8 @@ final class AudioConverter: NSObject {
|
|||
|
||||
memcpy(ioData, bufferList.unsafePointer, bufferListSize)
|
||||
ioNumberDataPackets.pointee = 1
|
||||
if outDataPacketDescription != nil {
|
||||
|
||||
if destination == .PCM && outDataPacketDescription != nil {
|
||||
audioStreamPacketDescription.mStartOffset = 0
|
||||
audioStreamPacketDescription.mDataByteSize = currentBufferList?.unsafePointer.pointee.mBuffers.mDataByteSize ?? 0
|
||||
audioStreamPacketDescription.mVariableFramesInPacket = 0
|
||||
|
|
|
@ -209,12 +209,13 @@ extension TSWriter: AudioConverterDelegate {
|
|||
audioConfig = AudioSpecificConfig(formatDescription: formatDescription)
|
||||
}
|
||||
|
||||
func sampleOutput(audio bytes: UnsafeMutableRawPointer?, count: UInt32, presentationTimeStamp: CMTime) {
|
||||
func sampleOutput(audio data: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime) {
|
||||
guard !data.isEmpty else { return }
|
||||
writeSampleBuffer(
|
||||
TSWriter.defaultAudioPID,
|
||||
streamID: 192,
|
||||
bytes: bytes?.assumingMemoryBound(to: UInt8.self),
|
||||
count: count,
|
||||
bytes: data[0].mData?.assumingMemoryBound(to: UInt8.self),
|
||||
count: data[0].mDataByteSize,
|
||||
presentationTimeStamp: presentationTimeStamp,
|
||||
decodeTimeStamp: .invalid,
|
||||
randomAccessIndicator: true
|
||||
|
|
|
@ -103,12 +103,14 @@ extension AVMixer {
|
|||
}
|
||||
|
||||
extension AVMixer {
|
||||
final public func startPlaying() {
|
||||
final public func startPlaying(_ audioEngine: AVAudioEngine?) {
|
||||
audioIO.audioEngine = audioEngine
|
||||
audioIO.encoder.delegate = audioIO
|
||||
videoIO.queue.startRunning()
|
||||
}
|
||||
|
||||
final public func stopPlaying() {
|
||||
audioIO.audioEngine = nil
|
||||
audioIO.encoder.delegate = nil
|
||||
videoIO.queue.stopRunning()
|
||||
}
|
||||
|
|
|
@ -4,46 +4,46 @@ final class AudioIOComponent: IOComponent {
|
|||
lazy var encoder: AudioConverter = AudioConverter()
|
||||
let lockQueue = DispatchQueue(label: "com.haishinkit.HaishinKit.AudioIOComponent.lock")
|
||||
|
||||
private var _audioEngine: AVAudioEngine?
|
||||
private var audioEngine: AVAudioEngine! {
|
||||
get {
|
||||
if _audioEngine == nil {
|
||||
_audioEngine = AVAudioEngine()
|
||||
}
|
||||
return _audioEngine
|
||||
}
|
||||
set {
|
||||
if _audioEngine == newValue {
|
||||
return
|
||||
}
|
||||
_audioEngine = nil
|
||||
var audioEngine: AVAudioEngine?
|
||||
|
||||
var soundTransform: SoundTransform = .init() {
|
||||
didSet {
|
||||
soundTransform.apply(playerNode)
|
||||
}
|
||||
}
|
||||
|
||||
private var _playerNode: AVAudioPlayerNode?
|
||||
private var playerNode: AVAudioPlayerNode! {
|
||||
get {
|
||||
if _playerNode == nil {
|
||||
_playerNode = AVAudioPlayerNode()
|
||||
audioEngine?.attach(_playerNode!)
|
||||
}
|
||||
return _playerNode
|
||||
}
|
||||
set {
|
||||
if _playerNode == newValue {
|
||||
return
|
||||
if let playerNode = _playerNode {
|
||||
audioEngine?.detach(playerNode)
|
||||
}
|
||||
_playerNode = nil
|
||||
_playerNode = newValue
|
||||
}
|
||||
}
|
||||
|
||||
private var audioFormat: AVAudioFormat? {
|
||||
didSet {
|
||||
guard let audioEngine = audioEngine else { return }
|
||||
audioEngine.attach(playerNode)
|
||||
guard let audioFormat = audioFormat, let audioEngine = audioEngine else {
|
||||
return
|
||||
}
|
||||
nstry({
|
||||
self.audioEngine.connect(self.playerNode, to: audioEngine.outputNode, format: self.audioFormat)
|
||||
audioEngine.connect(self.playerNode, to: audioEngine.outputNode, format: audioFormat)
|
||||
}, { exeption in
|
||||
logger.warn("\(exeption)")
|
||||
logger.warn(exeption)
|
||||
})
|
||||
try? audioEngine.start()
|
||||
do {
|
||||
try audioEngine.start()
|
||||
} catch let error {
|
||||
logger.warn(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,25 +148,33 @@ extension AudioIOComponent: AudioConverterDelegate {
|
|||
if #available(iOSApplicationExtension 9.0, *) {
|
||||
audioFormat = AVAudioFormat(cmAudioFormatDescription: formatDescription)
|
||||
} else {
|
||||
guard let asbd = formatDescription.streamBasicDescription?.pointee else {
|
||||
return
|
||||
}
|
||||
audioFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: asbd.mSampleRate, channels: asbd.mChannelsPerFrame, interleaved: false)
|
||||
}
|
||||
}
|
||||
|
||||
func sampleOutput(audio bytes: UnsafeMutableRawPointer?, count: UInt32, presentationTimeStamp: CMTime) {
|
||||
func sampleOutput(audio data: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime) {
|
||||
guard !data.isEmpty else { return }
|
||||
|
||||
guard
|
||||
let bytes = bytes,
|
||||
let audioFormat = audioFormat,
|
||||
let buffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: count / 4) else {
|
||||
let buffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: data[0].mDataByteSize / 4) else {
|
||||
return
|
||||
}
|
||||
|
||||
buffer.frameLength = buffer.frameCapacity
|
||||
memcpy(buffer.mutableAudioBufferList.pointee.mBuffers.mData, bytes, Int(count))
|
||||
buffer.mutableAudioBufferList.pointee.mBuffers.mDataByteSize = count
|
||||
buffer.mutableAudioBufferList.pointee.mBuffers.mNumberChannels = 1
|
||||
let bufferList = UnsafeMutableAudioBufferListPointer(buffer.mutableAudioBufferList)
|
||||
for i in 0..<bufferList.count {
|
||||
guard let mData = data[i].mData else { continue }
|
||||
memcpy(bufferList[i].mData, mData, Int(data[i].mDataByteSize))
|
||||
bufferList[i].mDataByteSize = data[i].mDataByteSize
|
||||
bufferList[i].mNumberChannels = 1
|
||||
}
|
||||
|
||||
nstry({
|
||||
self.playerNode.scheduleBuffer(buffer, completionHandler: {
|
||||
})
|
||||
self.playerNode.scheduleBuffer(buffer, completionHandler: nil)
|
||||
if !self.playerNode.isPlaying {
|
||||
self.playerNode.play()
|
||||
}
|
||||
|
|
|
@ -1,25 +1,18 @@
|
|||
import Foundation
|
||||
import AudioToolbox
|
||||
import AVFoundation
|
||||
|
||||
/// The SoundTransform class
|
||||
public struct SoundTransform {
|
||||
static public let defaultVolume: Float = 1.0
|
||||
static public let defaultPan: Float = 0
|
||||
|
||||
static public let defaultValue: Float32 = 1.0
|
||||
static public let defaultPlayRate: Float32 = 1.0
|
||||
static public let defaultPitch: Float32 = 1200
|
||||
static public let defaultVolumeRampTime: Float32 = 0
|
||||
static public let defaultPan: Float32 = 0
|
||||
/// The volume, ranging from 0 (silent) to 1 (full volume)
|
||||
public var volume = SoundTransform.defaultVolume
|
||||
/// The panning of the sound
|
||||
public var pan = SoundTransform.defaultPan
|
||||
|
||||
public var volume: Float32 = SoundTransform.defaultValue
|
||||
public var playRate: Float32 = SoundTransform.defaultPlayRate
|
||||
public var pitch: Float32 = SoundTransform.defaultPitch
|
||||
public var volumeRampTime: Float32 = SoundTransform.defaultVolumeRampTime
|
||||
public var pan: Float32 = SoundTransform.defaultPan
|
||||
|
||||
func setParameter(_ inAQ: AudioQueueRef) {
|
||||
AudioQueueSetParameter(inAQ, kAudioQueueParam_Volume, volume)
|
||||
AudioQueueSetParameter(inAQ, kAudioQueueParam_PlayRate, playRate)
|
||||
AudioQueueSetParameter(inAQ, kAudioQueueParam_Pitch, pitch)
|
||||
AudioQueueSetParameter(inAQ, kAudioQueueParam_VolumeRampTime, volumeRampTime)
|
||||
AudioQueueSetParameter(inAQ, kAudioQueueParam_Pan, pan)
|
||||
func apply(_ playerNode: AVAudioPlayerNode?) {
|
||||
playerNode?.volume = volume
|
||||
playerNode?.pan = pan
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import Foundation
|
||||
import AVFoundation
|
||||
|
||||
/**
|
||||
flash.net.Responder for Swift
|
||||
|
@ -206,6 +207,19 @@ open class RTMPConnection: EventDispatcher {
|
|||
var windowSizeS: Int64 = RTMPConnection.defaultWindowSizeS
|
||||
var currentTransactionId: Int = 0
|
||||
|
||||
private var _audioEngine: AVAudioEngine?
|
||||
var audioEngine: AVAudioEngine! {
|
||||
get {
|
||||
if _audioEngine == nil {
|
||||
_audioEngine = AVAudioEngine()
|
||||
}
|
||||
return _audioEngine
|
||||
}
|
||||
set {
|
||||
_audioEngine = newValue
|
||||
}
|
||||
}
|
||||
|
||||
private var timer: Timer? {
|
||||
didSet {
|
||||
oldValue?.invalidate()
|
||||
|
|
|
@ -33,13 +33,13 @@ extension RTMPMuxer: AudioConverterDelegate {
|
|||
delegate?.sampleOutput(audio: buffer, withTimestamp: 0, muxer: self)
|
||||
}
|
||||
|
||||
func sampleOutput(audio bytes: UnsafeMutableRawPointer?, count: UInt32, presentationTimeStamp: CMTime) {
|
||||
func sampleOutput(audio data: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime) {
|
||||
let delta: Double = (audioTimestamp == CMTime.zero ? 0 : presentationTimeStamp.seconds - audioTimestamp.seconds) * 1000
|
||||
guard let bytes = bytes, 0 <= delta else {
|
||||
guard let bytes = data[0].mData, 0 <= delta else {
|
||||
return
|
||||
}
|
||||
var buffer: Data = Data([RTMPMuxer.aac, FLVAACPacketType.raw.rawValue])
|
||||
buffer.append(bytes.assumingMemoryBound(to: UInt8.self), count: Int(count))
|
||||
buffer.append(bytes.assumingMemoryBound(to: UInt8.self), count: Int(data[0].mDataByteSize))
|
||||
delegate?.sampleOutput(audio: buffer, withTimestamp: delta, muxer: self)
|
||||
audioTimestamp = presentationTimeStamp
|
||||
}
|
||||
|
|
|
@ -210,6 +210,10 @@ open class RTMPStream: NetStream {
|
|||
open internal(set) var info: RTMPStreamInfo = RTMPStreamInfo()
|
||||
open private(set) var objectEncoding: UInt8 = RTMPConnection.defaultObjectEncoding
|
||||
@objc open private(set) dynamic var currentFPS: UInt16 = 0
|
||||
open var soundTransform: SoundTransform {
|
||||
get { return mixer.audioIO.soundTransform }
|
||||
set { mixer.audioIO.soundTransform = newValue }
|
||||
}
|
||||
|
||||
var id: UInt32 = RTMPStream.defaultID
|
||||
var readyState: ReadyState = .initialized {
|
||||
|
@ -242,7 +246,7 @@ open class RTMPStream: NetStream {
|
|||
info.clear()
|
||||
delegate?.clear()
|
||||
case .playing:
|
||||
mixer.startPlaying()
|
||||
mixer.startPlaying(rtmpConnection.audioEngine)
|
||||
case .publish:
|
||||
muxer.dispose()
|
||||
muxer.delegate = self
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
import Foundation
|
||||
|
||||
// @see https://www.objc.io/blog/2018/12/18/atomic-variables/
|
||||
/// Atomic<T> class
|
||||
/// @see https://www.objc.io/blog/2018/12/18/atomic-variables/
|
||||
final public class Atomic<A> {
|
||||
private let queue = DispatchQueue(label: "com.haishinkit.HaishinKit.Atomic")
|
||||
private var _value: A
|
||||
|
||||
/// Getter for the value.
|
||||
public var value: A {
|
||||
return queue.sync { self._value }
|
||||
}
|
||||
|
@ -13,6 +15,7 @@ final public class Atomic<A> {
|
|||
self._value = value
|
||||
}
|
||||
|
||||
// Setter for the value.
|
||||
public func mutate(_ transform: (inout A) -> Void) {
|
||||
queue.sync {
|
||||
transform(&self._value)
|
||||
|
|
Loading…
Reference in New Issue