Refactor AudioCodec with the AVAudioConverter.

This commit is contained in:
shogo4405 2023-02-26 20:37:35 +09:00
parent ad3d88a593
commit a22a51080d
14 changed files with 395 additions and 414 deletions

View File

@ -135,7 +135,7 @@ final class LiveViewController: UIViewController {
@IBAction func on(slider: UISlider) {
if slider == audioBitrateSlider {
audioBitrateLabel?.text = "audio \(Int(slider.value))/kbps"
rtmpStream.audioSettings.bitRate = UInt32(slider.value * 1000)
rtmpStream.audioSettings.bitRate = Int(slider.value * 1000)
}
if slider == videoBitrateSlider {
videoBitrateLabel?.text = "video \(Int(slider.value))/kbps"

View File

@ -440,6 +440,7 @@
BCCBCE9829A90D880095B51C /* NALUnit.swift in Sources */ = {isa = PBXBuildFile; fileRef = BCCBCE9629A90D880095B51C /* NALUnit.swift */; };
BCCBCE9929A90D880095B51C /* NALUnit.swift in Sources */ = {isa = PBXBuildFile; fileRef = BCCBCE9629A90D880095B51C /* NALUnit.swift */; };
BCCBCE9B29A9D96A0095B51C /* NALUnitReaderTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = BCCBCE9A29A9D96A0095B51C /* NALUnitReaderTests.swift */; };
BCCBCEA029ADF55A0095B51C /* AudioCodecBufferTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = BCCBCE9F29ADF55A0095B51C /* AudioCodecBufferTests.swift */; };
BCD1DC3A260627C300A1C593 /* Logboard.xcframework in Frameworks */ = {isa = PBXBuildFile; fileRef = BC34DFD125EBB12C005F975A /* Logboard.xcframework */; };
BCD63AB526FDF1250084842D /* iOSSwiftUIApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = BCD63AB426FDF1250084842D /* iOSSwiftUIApp.swift */; };
BCD63AB726FDF1250084842D /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = BCD63AB626FDF1250084842D /* ContentView.swift */; };
@ -781,6 +782,7 @@
BCCBCE9429A7C9C90095B51C /* AVCFormatStreamTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AVCFormatStreamTests.swift; sourceTree = "<group>"; };
BCCBCE9629A90D880095B51C /* NALUnit.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NALUnit.swift; sourceTree = "<group>"; };
BCCBCE9A29A9D96A0095B51C /* NALUnitReaderTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NALUnitReaderTests.swift; sourceTree = "<group>"; };
BCCBCE9F29ADF55A0095B51C /* AudioCodecBufferTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AudioCodecBufferTests.swift; sourceTree = "<group>"; };
BCD63AB226FDF1250084842D /* Example iOS+SwiftUI.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "Example iOS+SwiftUI.app"; sourceTree = BUILT_PRODUCTS_DIR; };
BCD63AB426FDF1250084842D /* iOSSwiftUIApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = iOSSwiftUIApp.swift; sourceTree = "<group>"; };
BCD63AB626FDF1250084842D /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = "<group>"; };
@ -1043,6 +1045,7 @@
295018191FFA196800358E10 /* Codec */ = {
isa = PBXGroup;
children = (
BCCBCE9F29ADF55A0095B51C /* AudioCodecBufferTests.swift */,
2950181F1FFA1BD700358E10 /* AudioCodecTests.swift */,
BC0BF4F62986CE8700D72CB4 /* VideoCodecTests.swift */,
);
@ -1995,6 +1998,7 @@
290EA8911DFB616000053022 /* SwiftCore+ExtensionTests.swift in Sources */,
BC0BF4F529866FDE00D72CB4 /* IOMixerTests.swift in Sources */,
290EA89A1DFB619600053022 /* TSProgramTests.swift in Sources */,
BCCBCEA029ADF55A0095B51C /* AudioCodecBufferTests.swift in Sources */,
BC0BF4F72986CE8700D72CB4 /* VideoCodecTests.swift in Sources */,
290EA8931DFB617800053022 /* HTTPRequestTests.swift in Sources */,
2976077F20A89FBB00DCF24F /* RTMPMessageTests.swift in Sources */,

View File

@ -5,9 +5,11 @@ import AVFoundation
*/
public protocol AudioCodecDelegate: AnyObject {
/// Tells the receiver to set a formatDescription.
func audioCodec(_ codec: AudioCodec, didSet formatDescription: CMFormatDescription?)
func audioCodec(_ codec: AudioCodec, didSet outputFormat: AVAudioFormat)
/// Tells the receiver to output a encoded or decoded sampleBuffer.
func audioCodec(_ codec: AudioCodec, didOutput sample: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime)
func audioCodec(_ codec: AudioCodec, didOutput audioBuffer: AVAudioBuffer, presentationTimeStamp: CMTime)
/// Tells the receiver to occured an error.
func audioCodec(_ codec: AudioCodec, errorOccurred error: AudioCodec.Error)
}
// MARK: -
@ -17,268 +19,109 @@ public protocol AudioCodecDelegate: AnyObject {
*/
public class AudioCodec {
/// The AudioCodec error domain codes.
enum Error: Swift.Error {
case setPropertyError(id: AudioConverterPropertyID, status: OSStatus)
public enum Error: Swift.Error {
case faildToConvert(error: NSError)
}
/// The default minimum bitrate for an AudioCodec, value is 8000.
public static let minimumBitrate: UInt32 = 8 * 1000
/// The default channels for an AudioCodec, the value is 0 means according to a input source.
public static let defaultChannels: UInt32 = 0
/// The default sampleRate for an AudioCodec, the value is 0 means according to a input source.
public static let defaultSampleRate: Double = 0
/// The default mamimu buffers for an AudioCodec.
public static let defaultMaximumBuffers: Int = 1
private static let numSamples: Int = 1024
/// Specifies the output format.
public var destination: AudioCodecFormat = .aac
/// Specifies the delegate.
public weak var delegate: AudioCodecDelegate?
/// This instance is running to process(true) or not(false).
public private(set) var isRunning: Atomic<Bool> = .init(false)
/// Specifies the settings for audio codec.
public var settings: AudioCodecSettings = .default {
didSet {
if settings.bitRate != oldValue.bitRate {
lockQueue.async {
if let format = self._inDestinationFormat {
self.setBitrateUntilNoErr(self.settings.bitRate * format.mChannelsPerFrame)
}
}
}
}
}
var sampleRate: Double = AudioCodec.defaultSampleRate
var actualBitrate: UInt32 = AudioCodecSettings.default.bitRate {
didSet {
logger.info(actualBitrate)
}
}
var channels: UInt32 = AudioCodec.defaultChannels
var formatDescription: CMFormatDescription? {
didSet {
guard !CMFormatDescriptionEqual(formatDescription, otherFormatDescription: oldValue) else {
return
}
logger.info(formatDescription.debugDescription)
delegate?.audioCodec(self, didSet: formatDescription)
}
}
var lockQueue = DispatchQueue(label: "com.haishinkit.HaishinKit.AudioConverter.lock")
var inSourceFormat: AudioStreamBasicDescription? {
didSet {
guard let inSourceFormat = inSourceFormat, inSourceFormat != oldValue else {
return
}
_converter = nil
formatDescription = nil
_inDestinationFormat = nil
logger.info("\(String(describing: inSourceFormat))")
let nonInterleaved = inSourceFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved != 0
maximumBuffers = nonInterleaved ? Int(inSourceFormat.mChannelsPerFrame) : AudioCodec.defaultMaximumBuffers
currentAudioBuffer = AudioCodecBuffer(inSourceFormat, numSamples: AudioCodec.numSamples)
settings.apply(audioConverter, oldValue: oldValue)
}
}
var effects: Set<AudioEffect> = []
private let numSamples = AudioCodec.numSamples
private var maximumBuffers: Int = AudioCodec.defaultMaximumBuffers
private var currentAudioBuffer = AudioCodecBuffer(AudioStreamBasicDescription(mSampleRate: 0, mFormatID: 0, mFormatFlags: 0, mBytesPerPacket: 0, mFramesPerPacket: 0, mBytesPerFrame: 0, mChannelsPerFrame: 1, mBitsPerChannel: 0, mReserved: 0))
private var _inDestinationFormat: AudioStreamBasicDescription?
private var inDestinationFormat: AudioStreamBasicDescription {
get {
if _inDestinationFormat == nil {
_inDestinationFormat = destination.audioStreamBasicDescription(inSourceFormat, sampleRate: sampleRate, channels: channels)
CMAudioFormatDescriptionCreate(
allocator: kCFAllocatorDefault,
asbd: &_inDestinationFormat!,
layoutSize: 0,
layout: nil,
magicCookieSize: 0,
magicCookie: nil,
extensions: nil,
formatDescriptionOut: &formatDescription
)
var lockQueue = DispatchQueue(label: "com.haishinkit.HaishinKit.AudioCodec.lock")
var inSourceFormat: AudioStreamBasicDescription? {
didSet {
guard var inSourceFormat, inSourceFormat != oldValue else {
return
}
return _inDestinationFormat!
}
set {
_inDestinationFormat = newValue
audioBuffer = .init(&inSourceFormat)
audioConverter = makeAudioConvter(&inSourceFormat)
}
}
private var audioConverter: AVAudioConverter?
private var audioBuffer: AudioCodecBuffer?
private var audioStreamPacketDescription = AudioStreamPacketDescription(mStartOffset: 0, mVariableFramesInPacket: 0, mDataByteSize: 0)
private let inputDataProc: AudioConverterComplexInputDataProc = {(_: AudioConverterRef, ioNumberDataPackets: UnsafeMutablePointer<UInt32>, ioData: UnsafeMutablePointer<AudioBufferList>, outDataPacketDescription: UnsafeMutablePointer<UnsafeMutablePointer<AudioStreamPacketDescription>?>?, inUserData: UnsafeMutableRawPointer?) in
Unmanaged<AudioCodec>.fromOpaque(inUserData!).takeUnretainedValue().onInputDataForAudioConverter(
ioNumberDataPackets,
ioData: ioData,
outDataPacketDescription: outDataPacketDescription
)
}
private var _converter: AudioConverterRef?
private var converter: AudioConverterRef {
var status: OSStatus = noErr
if _converter == nil {
var inClassDescriptions = destination.inClassDescriptions
status = AudioConverterNewSpecific(
&inSourceFormat!,
&inDestinationFormat,
UInt32(inClassDescriptions.count),
&inClassDescriptions,
&_converter
)
setBitrateUntilNoErr(settings.bitRate * inDestinationFormat.mChannelsPerFrame)
}
if status != noErr {
logger.warn("\(status)")
}
return _converter!
}
/// Encodes bytes data.
public func encodeBytes(_ bytes: UnsafeMutableRawPointer?, count: Int, presentationTimeStamp: CMTime) {
guard isRunning.value else {
currentAudioBuffer.clear()
/// Append a CMSampleBuffer.
public func appendSampleBuffer(_ sampleBuffer: CMSampleBuffer, offset: Int = 0) {
guard CMSampleBufferDataIsReady(sampleBuffer), isRunning.value, let audioBuffer, let audioConverter, let buffer = makeOutputBuffer() else {
return
}
currentAudioBuffer.write(bytes, count: count, presentationTimeStamp: presentationTimeStamp)
convert(numSamples * Int(destination.bytesPerFrame), presentationTimeStamp: presentationTimeStamp)
}
/// Encodes a CMSampleBuffer.
public func encodeSampleBuffer(_ sampleBuffer: CMSampleBuffer, offset: Int = 0) {
guard let format = sampleBuffer.formatDescription, CMSampleBufferDataIsReady(sampleBuffer) else {
currentAudioBuffer.clear()
return
}
inSourceFormat = format.streamBasicDescription?.pointee
guard isRunning.value else {
return
}
do {
let numSamples = try currentAudioBuffer.write(sampleBuffer, offset: offset)
if currentAudioBuffer.isReady {
for effect in effects {
effect.execute(currentAudioBuffer.input, format: inSourceFormat)
}
convert(currentAudioBuffer.maxLength, presentationTimeStamp: currentAudioBuffer.presentationTimeStamp)
let numSamples = audioBuffer.appendSampleBuffer(sampleBuffer, offset: offset)
if audioBuffer.isReady {
for effect in effects {
effect.execute(audioBuffer.current, presentationTimeStamp: audioBuffer.presentationTimeStamp)
}
if offset + numSamples < sampleBuffer.numSamples {
encodeSampleBuffer(sampleBuffer, offset: offset + numSamples)
var error: NSError?
audioConverter.convert(to: buffer, error: &error) { _, status in
status.pointee = .haveData
return audioBuffer.current
}
} catch {
logger.error(error)
}
}
@inline(__always)
private func convert(_ dataBytesSize: Int, presentationTimeStamp: CMTime) {
var finished = false
repeat {
var ioOutputDataPacketSize: UInt32 = destination.packetSize
let maximumBuffers = destination.maximumBuffers((channels == 0) ? inSourceFormat?.mChannelsPerFrame ?? 1 : channels)
let outOutputData: UnsafeMutableAudioBufferListPointer = AudioBufferList.allocate(maximumBuffers: maximumBuffers)
for i in 0..<maximumBuffers {
outOutputData[i].mNumberChannels = inDestinationFormat.mChannelsPerFrame
outOutputData[i].mDataByteSize = UInt32(dataBytesSize)
outOutputData[i].mData = UnsafeMutableRawPointer.allocate(byteCount: dataBytesSize, alignment: 0)
}
let status = AudioConverterFillComplexBuffer(
converter,
inputDataProc,
Unmanaged.passUnretained(self).toOpaque(),
&ioOutputDataPacketSize,
outOutputData.unsafeMutablePointer,
nil
)
switch status {
// kAudioConverterErr_InvalidInputSize: perhaps mistake. but can support macOS BuiltIn Mic #61
case noErr, kAudioConverterErr_InvalidInputSize:
delegate?.audioCodec(self, didOutput: outOutputData, presentationTimeStamp: presentationTimeStamp)
case -1:
if destination == .pcm {
delegate?.audioCodec(self, didOutput: outOutputData, presentationTimeStamp: presentationTimeStamp)
}
finished = true
default:
finished = true
}
for i in 0..<outOutputData.count {
if let mData = outOutputData[i].mData {
free(mData)
}
}
free(outOutputData.unsafeMutablePointer)
} while !finished
}
func invalidate() {
lockQueue.async {
self.inSourceFormat = nil
self._inDestinationFormat = nil
if let converter: AudioConverterRef = self._converter {
AudioConverterDispose(converter)
}
self._converter = nil
}
}
func onInputDataForAudioConverter(
_ ioNumberDataPackets: UnsafeMutablePointer<UInt32>,
ioData: UnsafeMutablePointer<AudioBufferList>,
outDataPacketDescription: UnsafeMutablePointer<UnsafeMutablePointer<AudioStreamPacketDescription>?>?) -> OSStatus {
guard currentAudioBuffer.isReady else {
ioNumberDataPackets.pointee = 0
return -1
}
memcpy(ioData, currentAudioBuffer.input.unsafePointer, currentAudioBuffer.listSize)
if destination == .pcm {
ioNumberDataPackets.pointee = 1
} else {
ioNumberDataPackets.pointee = UInt32(numSamples)
}
if destination == .pcm && outDataPacketDescription != nil {
audioStreamPacketDescription.mDataByteSize = currentAudioBuffer.input.unsafePointer.pointee.mBuffers.mDataByteSize
outDataPacketDescription?.pointee = withUnsafeMutablePointer(to: &audioStreamPacketDescription) { $0 }
}
currentAudioBuffer.clear()
return noErr
}
private func setBitrateUntilNoErr(_ bitrate: UInt32) {
do {
try setProperty(id: kAudioConverterEncodeBitRate, data: bitrate * inDestinationFormat.mChannelsPerFrame)
actualBitrate = bitrate
} catch {
if Self.minimumBitrate < bitrate {
setBitrateUntilNoErr(bitrate - Self.minimumBitrate)
if let error {
delegate?.audioCodec(self, errorOccurred: .faildToConvert(error: error))
} else {
actualBitrate = Self.minimumBitrate
delegate?.audioCodec(self, didOutput: buffer, presentationTimeStamp: audioBuffer.presentationTimeStamp)
}
audioBuffer.next()
}
if offset + numSamples < sampleBuffer.numSamples {
appendSampleBuffer(sampleBuffer, offset: offset + numSamples)
}
}
private func setProperty<T>(id: AudioConverterPropertyID, data: T) throws {
guard let converter: AudioConverterRef = _converter else {
func appendAudioBuffer(_ audioBuffer: AVAudioBuffer, presentationTimeStamp: CMTime) {
guard isRunning.value, let audioConverter, let buffer = makeOutputBuffer() else {
return
}
let size = UInt32(MemoryLayout<T>.size)
var buffer = data
let status = AudioConverterSetProperty(converter, id, size, &buffer)
guard status == 0 else {
throw Error.setPropertyError(id: id, status: status)
var error: NSError?
audioConverter.convert(to: buffer, error: &error) { _, status in
status.pointee = .haveData
return audioBuffer
}
if let error {
delegate?.audioCodec(self, errorOccurred: .faildToConvert(error: error))
} else {
delegate?.audioCodec(self, didOutput: buffer, presentationTimeStamp: presentationTimeStamp)
}
}
func makeInputBuffer() -> AVAudioBuffer? {
guard let inputFormat = audioConverter?.inputFormat else {
return nil
}
switch inSourceFormat?.mFormatID {
case kAudioFormatLinearPCM:
return AVAudioPCMBuffer(pcmFormat: inputFormat, frameCapacity: 1024)
default:
return AVAudioCompressedBuffer(format: inputFormat, packetCapacity: 1, maximumPacketSize: 1024)
}
}
private func makeOutputBuffer() -> AVAudioBuffer? {
guard let outputFormat = audioConverter?.outputFormat else {
return nil
}
return destination.makeAudioBuffer(outputFormat)
}
private func makeAudioConvter(_ inSourceFormat: inout AudioStreamBasicDescription) -> AVAudioConverter? {
guard
let inputFormat = AVAudioFormat(streamDescription: &inSourceFormat),
let outputFormat = destination.makeAudioFormat(inSourceFormat) else {
return nil
}
defer {
delegate?.audioCodec(self, didSet: outputFormat)
}
let converter = AVAudioConverter(from: inputFormat, to: outputFormat)
settings.apply(converter, oldValue: nil)
return converter
}
}
@ -292,14 +135,9 @@ extension AudioCodec: Running {
public func stopRunning() {
lockQueue.async {
if let convert: AudioQueueRef = self._converter {
AudioConverterDispose(convert)
self._converter = nil
}
self.currentAudioBuffer.clear()
self.inSourceFormat = nil
self.formatDescription = nil
self._inDestinationFormat = nil
self.audioConverter = nil
self.audioBuffer = nil
self.isRunning.mutate { $0 = false }
}
}

View File

@ -7,114 +7,135 @@ final class AudioCodecBuffer {
case noBlockBuffer
}
static let numSamples = 1024
let input: UnsafeMutableAudioBufferListPointer
static let numSamples: UInt32 = 1024
static let maxBuffers: Int = 6
var isReady: Bool {
numSamples == index
}
var maxLength: Int {
numSamples * bytesPerFrame * numberChannels * maximumBuffers
var current: AVAudioPCMBuffer {
return buffers[cursor]
}
let listSize: Int
private var index = 0
private var buffers: [Data]
private var numSamples: Int
private let bytesPerFrame: Int
private let maximumBuffers: Int
private let numberChannels: Int
private let bufferList: UnsafeMutableAudioBufferListPointer
private(set) var presentationTimeStamp: CMTime = .invalid
private var index: Int = 0
private var numSamples: Int
private var format: AVAudioFormat
private var buffers: [AVAudioPCMBuffer] = []
private var cursor: Int = 0
private var workingBuffer: AVAudioPCMBuffer
private var maxBuffers: Int = AudioCodecBuffer.maxBuffers
deinit {
input.unsafeMutablePointer.deallocate()
bufferList.unsafeMutablePointer.deallocate()
}
init(_ inSourceFormat: AudioStreamBasicDescription, numSamples: Int = AudioCodecBuffer.numSamples) {
self.numSamples = numSamples
let nonInterleaved = inSourceFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved != 0
bytesPerFrame = Int(inSourceFormat.mBytesPerFrame)
maximumBuffers = nonInterleaved ? Int(inSourceFormat.mChannelsPerFrame) : 1
listSize = AudioBufferList.sizeInBytes(maximumBuffers: maximumBuffers)
input = AudioBufferList.allocate(maximumBuffers: maximumBuffers)
bufferList = AudioBufferList.allocate(maximumBuffers: maximumBuffers)
numberChannels = nonInterleaved ? 1 : Int(inSourceFormat.mChannelsPerFrame)
let dataByteSize = numSamples * bytesPerFrame
buffers = .init(repeating: .init(repeating: 0, count: numSamples * bytesPerFrame), count: maximumBuffers)
input.unsafeMutablePointer.pointee.mNumberBuffers = UInt32(maximumBuffers)
for i in 0..<maximumBuffers {
input[i].mNumberChannels = UInt32(numberChannels)
buffers[i].withUnsafeMutableBytes { pointer in
input[i].mData = pointer.baseAddress
init?(_ inSourceFormat: inout AudioStreamBasicDescription, numSamples: UInt32 = AudioCodecBuffer.numSamples) {
guard
inSourceFormat.mFormatID == kAudioFormatLinearPCM,
let format = AVAudioFormat(streamDescription: &inSourceFormat),
let workingBuffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: numSamples) else {
return nil
}
for _ in 0..<maxBuffers {
guard let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: numSamples) else {
return nil
}
input[i].mDataByteSize = UInt32(dataByteSize)
buffer.frameLength = numSamples
self.buffers.append(buffer)
}
self.format = format
self.workingBuffer = workingBuffer
self.numSamples = Int(numSamples)
}
func write(_ bytes: UnsafeMutableRawPointer?, count: Int, presentationTimeStamp: CMTime) {
numSamples = count
index = count
input.unsafeMutablePointer.pointee.mBuffers.mNumberChannels = 1
input.unsafeMutablePointer.pointee.mBuffers.mData = bytes
input.unsafeMutablePointer.pointee.mBuffers.mDataByteSize = UInt32(count)
}
func write(_ sampleBuffer: CMSampleBuffer, offset: Int) throws -> Int {
guard !isReady else {
throw Error.isReady
func appendSampleBuffer(_ sampleBuffer: CMSampleBuffer, offset: Int) -> Int {
if isReady {
return -1
}
if presentationTimeStamp == .invalid {
let offsetTimeStamp: CMTime = offset == 0 ? .zero : CMTime(value: CMTimeValue(offset), timescale: sampleBuffer.presentationTimeStamp.timescale)
presentationTimeStamp = CMTimeAdd(sampleBuffer.presentationTimeStamp, offsetTimeStamp)
}
var blockBuffer: CMBlockBuffer?
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sampleBuffer,
bufferListSizeNeededOut: nil,
bufferListOut: bufferList.unsafeMutablePointer,
bufferListSize: listSize,
blockBufferAllocator: kCFAllocatorDefault,
blockBufferMemoryAllocator: kCFAllocatorDefault,
flags: 0,
blockBufferOut: &blockBuffer
)
guard blockBuffer != nil else {
throw Error.noBlockBuffer
}
let numSamples = min(self.numSamples - index, sampleBuffer.numSamples - offset)
for i in 0..<maximumBuffers {
guard let data = bufferList[i].mData else {
continue
if offset == 0 {
if workingBuffer.frameLength < sampleBuffer.numSamples {
if let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: AVAudioFrameCount(sampleBuffer.numSamples)) {
self.workingBuffer = buffer
}
}
buffers[i].replaceSubrange(
index * bytesPerFrame..<index * bytesPerFrame + numSamples * bytesPerFrame,
with: data.advanced(by: offset * bytesPerFrame),
count: numSamples * bytesPerFrame
workingBuffer.frameLength = AVAudioFrameCount(sampleBuffer.numSamples)
CMSampleBufferCopyPCMDataIntoAudioBufferList(
sampleBuffer,
at: 0,
frameCount: Int32(sampleBuffer.numSamples),
into: workingBuffer.mutableAudioBufferList
)
}
let numSamples = min(self.numSamples - index, Int(sampleBuffer.numSamples) - offset)
if format.isInterleaved {
let channelCount = Int(format.channelCount)
switch format.commonFormat {
case .pcmFormatInt16:
memcpy(current.int16ChannelData?[0].advanced(by: index), workingBuffer.int16ChannelData?[0].advanced(by: offset), numSamples * 2 * channelCount)
case .pcmFormatInt32:
memcpy(current.int32ChannelData?[0].advanced(by: index), workingBuffer.int32ChannelData?[0].advanced(by: offset), numSamples * 4 * channelCount)
case .pcmFormatFloat32:
memcpy(current.floatChannelData?[0].advanced(by: index), workingBuffer.floatChannelData?[0].advanced(by: offset), numSamples * 4 * channelCount)
default:
break
}
} else {
for i in 0..<current.stride {
switch format.commonFormat {
case .pcmFormatInt16:
memcpy(current.int16ChannelData?[i].advanced(by: index), workingBuffer.int16ChannelData?[i].advanced(by: offset), numSamples * 2)
case .pcmFormatInt32:
memcpy(current.int32ChannelData?[i].advanced(by: index), workingBuffer.int32ChannelData?[i].advanced(by: offset), numSamples * 4)
case .pcmFormatFloat32:
memcpy(current.floatChannelData?[i].advanced(by: index), workingBuffer.floatChannelData?[i].advanced(by: offset), numSamples * 4)
default:
break
}
}
}
index += numSamples
return numSamples
}
func muted() {
for i in 0..<maximumBuffers {
buffers[i].resetBytes(in: 0...)
if format.isInterleaved {
let channelCount = Int(format.channelCount)
switch format.commonFormat {
case .pcmFormatInt16:
current.int16ChannelData?[0].assign(repeating: 0, count: numSamples * channelCount)
case .pcmFormatInt32:
current.int32ChannelData?[0].assign(repeating: 0, count: numSamples * channelCount)
case .pcmFormatFloat32:
current.floatChannelData?[0].assign(repeating: 0, count: numSamples * channelCount)
default:
break
}
} else {
for i in 0..<current.stride {
switch format.commonFormat {
case .pcmFormatInt16:
current.int16ChannelData?[i].assign(repeating: 0, count: numSamples)
case .pcmFormatInt32:
current.int32ChannelData?[i].assign(repeating: 0, count: numSamples)
case .pcmFormatFloat32:
current.floatChannelData?[i].assign(repeating: 0, count: numSamples)
default:
break
}
}
}
}
func clear() {
func next() {
presentationTimeStamp = .invalid
index = 0
cursor += 1
if cursor == buffers.count {
cursor = 0
}
}
}

View File

@ -1,4 +1,5 @@
import AudioToolbox
import AVFoundation
/// The type of the AudioCodec supports format.
public enum AudioCodecFormat {
@ -70,22 +71,6 @@ public enum AudioCodecFormat {
}
}
var inClassDescriptions: [AudioClassDescription] {
switch self {
case .aac:
#if os(iOS)
return [
AudioClassDescription(mType: kAudioEncoderComponentType, mSubType: kAudioFormatMPEG4AAC, mManufacturer: kAppleSoftwareAudioCodecManufacturer),
AudioClassDescription(mType: kAudioEncoderComponentType, mSubType: kAudioFormatMPEG4AAC, mManufacturer: kAppleHardwareAudioCodecManufacturer)
]
#else
return []
#endif
case .pcm:
return []
}
}
func maximumBuffers(_ channel: UInt32) -> Int {
switch self {
case .aac:
@ -95,21 +80,30 @@ public enum AudioCodecFormat {
}
}
func audioStreamBasicDescription(_ inSourceFormat: AudioStreamBasicDescription?, sampleRate: Double, channels: UInt32) -> AudioStreamBasicDescription? {
guard let inSourceFormat = inSourceFormat else {
func makeAudioBuffer(_ format: AVAudioFormat) -> AVAudioBuffer? {
switch self {
case .aac:
return AVAudioCompressedBuffer(format: format, packetCapacity: 1, maximumPacketSize: 1024)
case .pcm:
return AVAudioPCMBuffer(pcmFormat: format, frameCapacity: 1024)
}
}
func makeAudioFormat(_ inSourceFormat: AudioStreamBasicDescription?) -> AVAudioFormat? {
guard let inSourceFormat else {
return nil
}
let destinationChannels: UInt32 = (channels == 0) ? inSourceFormat.mChannelsPerFrame : channels
return AudioStreamBasicDescription(
mSampleRate: sampleRate == 0 ? inSourceFormat.mSampleRate : sampleRate,
var streamDescription = AudioStreamBasicDescription(
mSampleRate: inSourceFormat.mSampleRate,
mFormatID: formatID,
mFormatFlags: formatFlags,
mBytesPerPacket: bytesPerPacket,
mFramesPerPacket: framesPerPacket,
mBytesPerFrame: bytesPerFrame,
mChannelsPerFrame: destinationChannels,
mChannelsPerFrame: inSourceFormat.mChannelsPerFrame,
mBitsPerChannel: bitsPerChannel,
mReserved: 0
)
return AVAudioFormat(streamDescription: &streamDescription)
}
}

View File

@ -1,3 +1,4 @@
import AVFAudio
import Foundation
/// The AudioCodecSettings class specifying audio compression settings.
@ -6,10 +7,25 @@ public struct AudioCodecSettings: Codable {
public static let `default` = AudioCodecSettings()
/// Specifies the bitRate of audio output.
public var bitRate: UInt32 = 32 * 1000
public var bitRate: Int
/// Create an new AudioCodecSettings instance.
public init(bitRate: UInt32 = 32 * 1000) {
public init(bitRate: Int = 32 * 1000) {
self.bitRate = bitRate
}
func apply(_ converter: AVAudioConverter?, oldValue: AudioCodecSettings?) {
guard let converter else {
return
}
if bitRate != oldValue?.bitRate {
let minAvailableBitRate = converter.applicableEncodeBitRates?.min(by: { a, b in
return a.intValue < b.intValue
})?.intValue ?? bitRate
let maxAvailableBitRate = converter.applicableEncodeBitRates?.max(by: { a, b in
return a.intValue < b.intValue
})?.intValue ?? bitRate
converter.bitRate = min(maxAvailableBitRate, max(minAvailableBitRate, bitRate))
}
}
}

View File

@ -210,27 +210,27 @@ public class TSWriter: Running {
extension TSWriter: AudioCodecDelegate {
// MARK: AudioCodecDelegate
public func audioCodec(_ codec: AudioCodec, didSet formatDescription: CMFormatDescription?) {
guard let formatDescription else {
return
}
public func audioCodec(_ codec: AudioCodec, errorOccurred error: AudioCodec.Error) {
}
public func audioCodec(_ codec: AudioCodec, didSet outputFormat: AVAudioFormat) {
var data = ESSpecificData()
data.streamType = ESType.adtsAac.rawValue
data.elementaryPID = TSWriter.defaultAudioPID
PMT.elementaryStreamSpecificData.append(data)
audioContinuityCounter = 0
audioConfig = AudioSpecificConfig(formatDescription: formatDescription)
audioConfig = AudioSpecificConfig(formatDescription: outputFormat.formatDescription)
}
public func audioCodec(_ codec: AudioCodec, didOutput sample: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime) {
guard !sample.isEmpty && 0 < sample[0].mDataByteSize else {
public func audioCodec(_ codec: AudioCodec, didOutput audioBuffer: AVAudioBuffer, presentationTimeStamp: CMTime) {
guard let audioBuffer = audioBuffer as? AVAudioCompressedBuffer else {
return
}
writeSampleBuffer(
TSWriter.defaultAudioPID,
streamID: 192,
bytes: sample[0].mData?.assumingMemoryBound(to: UInt8.self),
count: sample[0].mDataByteSize,
bytes: audioBuffer.data.assumingMemoryBound(to: UInt8.self),
count: audioBuffer.byteLength,
presentationTimeStamp: presentationTimeStamp,
decodeTimeStamp: .invalid,
randomAccessIndicator: true

View File

@ -4,6 +4,6 @@ import Foundation
/// An object that apply an audio effect.
open class AudioEffect: NSObject {
/// Executes to apply an audio effect.
open func execute(_ buffer: UnsafeMutableAudioBufferListPointer?, format: AudioStreamBasicDescription?) {
open func execute(_ buffer: AVAudioBuffer, presentationTimeStamp: CMTime) {
}
}

View File

@ -37,7 +37,6 @@ final class IOAudioUnit: NSObject, IOUnit {
defer {
mixer.session.commitConfiguration()
}
codec.invalidate()
guard let device else {
try capture.attachDevice(nil, audioUnit: self)
return
@ -62,7 +61,8 @@ final class IOAudioUnit: NSObject, IOUnit {
return
}
mixer?.recorder.appendSampleBuffer(sampleBuffer, mediaType: .audio)
codec.encodeSampleBuffer(sampleBuffer)
codec.inSourceFormat = sampleBuffer.formatDescription?.streamBasicDescription?.pointee
codec.appendSampleBuffer(sampleBuffer)
}
}
@ -114,11 +114,14 @@ extension IOAudioUnit: AVCaptureAudioDataOutputSampleBufferDelegate {
extension IOAudioUnit: AudioCodecDelegate {
// MARK: AudioConverterDelegate
func audioCodec(_ codec: AudioCodec, didSet formatDescription: CMFormatDescription?) {
guard let formatDescription = formatDescription, let audioEngine = audioEngine else {
func audioCodec(_ codec: AudioCodec, errorOccurred error: AudioCodec.Error) {
}
func audioCodec(_ codec: AudioCodec, didSet outputFormat: AVAudioFormat) {
guard let audioEngine = audioEngine else {
return
}
audioFormat = AVAudioFormat(cmAudioFormatDescription: formatDescription)
audioFormat = AVAudioFormat(cmAudioFormatDescription: outputFormat.formatDescription)
nstry({
if let plyerNode = self.mixer?.mediaLink.playerNode, let audioFormat = self.audioFormat {
audioEngine.connect(plyerNode, to: audioEngine.mainMixerNode, format: audioFormat)
@ -133,26 +136,13 @@ extension IOAudioUnit: AudioCodecDelegate {
}
}
func audioCodec(_ codec: AudioCodec, didOutput sample: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime) {
guard !sample.isEmpty, sample[0].mDataByteSize != 0 else {
func audioCodec(_ codec: AudioCodec, didOutput audioBuffer: AVAudioBuffer, presentationTimeStamp: CMTime) {
guard let audioBuffer = audioBuffer as? AVAudioPCMBuffer else {
return
}
guard
let audioFormat = audioFormat,
let buffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: sample[0].mDataByteSize / 4) else {
return
}
buffer.frameLength = buffer.frameCapacity
let bufferList = UnsafeMutableAudioBufferListPointer(buffer.mutableAudioBufferList)
for i in 0..<bufferList.count {
guard let mData = sample[i].mData else { continue }
memcpy(bufferList[i].mData, mData, Int(sample[i].mDataByteSize))
bufferList[i].mDataByteSize = sample[i].mDataByteSize
bufferList[i].mNumberChannels = 1
}
if let mixer = mixer {
mixer.delegate?.mixer(mixer, didOutput: buffer, presentationTimeStamp: presentationTimeStamp)
mixer.delegate?.mixer(mixer, didOutput: audioBuffer, presentationTimeStamp: presentationTimeStamp)
}
mixer?.mediaLink.enqueueAudio(buffer)
mixer?.mediaLink.enqueueAudio(audioBuffer)
}
}

View File

@ -314,7 +314,7 @@ open class RTMPConnection: EventDispatcher {
var outputBufferSize: Int = 0
for stream in streams {
// in bytes.
outputBufferSize += Int(stream.mixer.videoIO.codec.settings.bitRate + stream.mixer.audioIO.codec.settings.bitRate) / 8
outputBufferSize += (Int(stream.mixer.videoIO.codec.settings.bitRate) + stream.mixer.audioIO.codec.settings.bitRate) / 8
}
if socket.outputBufferSize < outputBufferSize {
socket.outputBufferSize = outputBufferSize

View File

@ -588,25 +588,35 @@ final class RTMPAudioMessage: RTMPMessage {
stream.mixer.audioIO.codec.destination = .pcm
stream.mixer.audioIO.codec.inSourceFormat = config?.audioStreamBasicDescription()
case .raw?:
enqueueSampleBuffer(stream, type: type)
if stream.mixer.audioIO.codec.inSourceFormat == nil {
stream.mixer.audioIO.codec.destination = .pcm
stream.mixer.audioIO.codec.inSourceFormat = makeAudioStreamBasicDescription()
}
if let audioBuffer = makeAudioBuffer(stream) {
stream.mixer.audioIO.codec.appendAudioBuffer(audioBuffer, presentationTimeStamp: CMTime(seconds: stream.audioTimestamp / 1000, preferredTimescale: 1000))
}
case .none:
break
}
}
private func enqueueSampleBuffer(_ stream: RTMPStream, type: RTMPChunkType) {
if stream.mixer.audioIO.codec.inSourceFormat == nil {
stream.mixer.audioIO.codec.destination = .pcm
stream.mixer.audioIO.codec.inSourceFormat = codec.audioStreamBasicDescription(soundRate, size: soundSize, type: soundType)
}
payload.withUnsafeMutableBytes { (buffer: UnsafeMutableRawBufferPointer) -> Void in
stream.mixer.audioIO.codec.encodeBytes(
buffer.baseAddress?.advanced(by: codec.headerSize),
count: payload.count - codec.headerSize,
presentationTimeStamp: CMTime(seconds: stream.audioTimestamp / 1000, preferredTimescale: 1000)
)
private func makeAudioBuffer(_ stream: RTMPStream) -> AVAudioBuffer? {
return payload.withUnsafeMutableBytes { (buffer: UnsafeMutableRawBufferPointer) -> AVAudioBuffer? in
guard let baseAddress = buffer.baseAddress, let buffer = stream.mixer.audioIO.codec.makeInputBuffer() as? AVAudioCompressedBuffer else {
return nil
}
let byteCount = payload.count - codec.headerSize
buffer.packetDescriptions?.pointee = AudioStreamPacketDescription(mStartOffset: 0, mVariableFramesInPacket: 0, mDataByteSize: UInt32(byteCount))
buffer.packetCount = 1
buffer.byteLength = UInt32(byteCount)
buffer.data.copyMemory(from: baseAddress.advanced(by: codec.headerSize), byteCount: byteCount)
return buffer
}
}
private func makeAudioStreamBasicDescription() -> AudioStreamBasicDescription? {
return nil
}
}
// MARK: -
@ -646,7 +656,7 @@ final class RTMPVideoMessage: RTMPMessage {
sampleBuffer.isNotSync = !(payload[0] >> 4 == FLVFrameType.key.rawValue)
stream.mixer.mediaLink.enqueueVideo(sampleBuffer)
}
if stream.mixer.mediaLink.isPaused && stream.mixer.audioIO.codec.formatDescription == nil {
if stream.mixer.mediaLink.isPaused && stream.mixer.audioIO.codec.inSourceFormat == nil {
stream.mixer.mediaLink.isPaused = false
}
default:

View File

@ -25,22 +25,22 @@ final class RTMPMuxer {
extension RTMPMuxer: AudioCodecDelegate {
// MARK: AudioCodecDelegate
func audioCodec(_ codec: AudioCodec, didSet formatDescription: CMFormatDescription?) {
guard let formatDescription = formatDescription else {
return
}
func audioCodec(_ codec: AudioCodec, errorOccurred error: AudioCodec.Error) {
}
func audioCodec(_ codec: AudioCodec, didSet outputFormat: AVAudioFormat) {
var buffer = Data([RTMPMuxer.aac, FLVAACPacketType.seq.rawValue])
buffer.append(contentsOf: AudioSpecificConfig(formatDescription: formatDescription).bytes)
buffer.append(contentsOf: AudioSpecificConfig(formatDescription: outputFormat.formatDescription).bytes)
delegate?.muxer(self, didOutputAudio: buffer, withTimestamp: 0)
}
func audioCodec(_ codec: AudioCodec, didOutput sample: UnsafeMutableAudioBufferListPointer, presentationTimeStamp: CMTime) {
func audioCodec(_ codec: AudioCodec, didOutput audioBuffer: AVAudioBuffer, presentationTimeStamp: CMTime) {
let delta = (audioTimeStamp == CMTime.zero ? 0 : presentationTimeStamp.seconds - audioTimeStamp.seconds) * 1000
guard let bytes = sample[0].mData, 0 < sample[0].mDataByteSize && 0 <= delta else {
guard let audioBuffer = audioBuffer as? AVAudioCompressedBuffer, 0 <= delta else {
return
}
var buffer = Data([RTMPMuxer.aac, FLVAACPacketType.raw.rawValue])
buffer.append(bytes.assumingMemoryBound(to: UInt8.self), count: Int(sample[0].mDataByteSize))
buffer.append(audioBuffer.data.assumingMemoryBound(to: UInt8.self), count: Int(audioBuffer.byteLength))
delegate?.muxer(self, didOutputAudio: buffer, withTimestamp: delta)
audioTimeStamp = presentationTimeStamp
}

View File

@ -0,0 +1,108 @@
import Foundation
import XCTest
import AVFoundation
@testable import HaishinKit
final class AudioCodecBufferTests: XCTestCase {
func testMain() {
guard
let sampleBuffer = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 256),
var asbd = sampleBuffer.formatDescription?.audioStreamBasicDescription else {
XCTFail()
return
}
let buffer = AudioCodecBuffer(&asbd, numSamples: 1024)
for _ in 0..<1024/256 {
_ = buffer?.appendSampleBuffer(sampleBuffer, offset: 0)
}
XCTAssertTrue(buffer?.isReady == true)
let sampleBufferData = (try? sampleBuffer.dataBuffer?.dataBytes()) ?? Data()
var expectedData = Data()
expectedData.append(sampleBufferData)
expectedData.append(sampleBufferData)
expectedData.append(sampleBufferData)
expectedData.append(sampleBufferData)
if let pointer = buffer?.current.int16ChannelData?[0] {
let data = Data(bytes: pointer, count: 1024 * 2)
XCTAssertEqual(expectedData, data)
}
}
func testMain2() {
guard
let sampleBuffer_1 = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 920),
let sampleBuffer_2 = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 921),
var asbd = sampleBuffer_1.formatDescription?.audioStreamBasicDescription,
let buffer = AudioCodecBuffer(&asbd, numSamples: 1024) else {
XCTFail()
return
}
let sampleBuffer_1Data = (try? sampleBuffer_1.dataBuffer?.dataBytes()) ?? Data()
let sampleBuffer_2Data = (try? sampleBuffer_2.dataBuffer?.dataBytes()) ?? Data()
var numBuffer = buffer.appendSampleBuffer(sampleBuffer_1, offset: 0)
numBuffer = buffer.appendSampleBuffer(sampleBuffer_2, offset: 0)
XCTAssertTrue(buffer.isReady)
if let pointer = buffer.current.int16ChannelData?[0] {
let data = Data(bytes: pointer, count: 1024 * 2)
var expectedData = Data()
expectedData.append(sampleBuffer_1Data)
expectedData.append(sampleBuffer_2Data.subdata(in: 0..<numBuffer * 2))
XCTAssertEqual(expectedData.bytes, data.bytes)
} else {
XCTFail()
}
buffer.next()
XCTAssertFalse(buffer.isReady)
XCTAssertEqual(numBuffer, 104)
var expectedData = Data()
expectedData.append(sampleBuffer_2Data.subdata(in: numBuffer * 2..<sampleBuffer_2Data.count))
numBuffer = buffer.appendSampleBuffer(sampleBuffer_2, offset: numBuffer)
if let pointer = buffer.current.int16ChannelData?[0] {
let data = Data(bytes: pointer, count: expectedData.count)
XCTAssertEqual(expectedData.bytes, data.bytes)
} else {
XCTFail()
}
}
func testMain3() {
guard
let sampleBuffer_1 = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 920),
let sampleBuffer_2 = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 921),
var asbd = sampleBuffer_1.formatDescription?.audioStreamBasicDescription,
let buffer = AudioCodecBuffer(&asbd, numSamples: 1024) else {
XCTFail()
return
}
let sampleBuffer_2Data = (try? sampleBuffer_2.dataBuffer?.dataBytes()) ?? Data()
appendSampleBuffer(buffer, sampleBuffer: sampleBuffer_1, offset: 0)
appendSampleBuffer(buffer, sampleBuffer: sampleBuffer_2, offset: 0)
var expectedData = Data()
expectedData.append(sampleBuffer_2Data.subdata(in: 104 * 2..<sampleBuffer_2Data.count))
if let pointer = buffer.current.int16ChannelData?[0] {
let data = Data(bytes: pointer, count: expectedData.count)
XCTAssertEqual(expectedData.bytes, data.bytes)
} else {
XCTFail()
}
}
private func appendSampleBuffer(_ buffer: AudioCodecBuffer, sampleBuffer: CMSampleBuffer, offset: Int = 0) {
let numSamples = buffer.appendSampleBuffer(sampleBuffer, offset: offset)
if buffer.isReady {
buffer.next()
}
if offset + numSamples < sampleBuffer.numSamples {
appendSampleBuffer(buffer, sampleBuffer: sampleBuffer, offset: offset + numSamples)
}
}
}

View File

@ -9,8 +9,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 1024) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(44100, numSamples: 1024) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -19,8 +19,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(48000.0, numSamples: 1024) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(48000.0, numSamples: 1024) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -29,8 +29,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(24000.0, numSamples: 1024) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(24000.0, numSamples: 1024) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -39,8 +39,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(16000.0, numSamples: 1024) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(16000.0, numSamples: 1024) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -49,8 +49,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(8000.0, numSamples: 256) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(8000.0, numSamples: 256) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -59,8 +59,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(8000.0, numSamples: 960) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(8000.0, numSamples: 960) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -69,8 +69,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for _ in 0..<10 {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(44100.0, numSamples: 1224) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(44100.0, numSamples: 1224) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}
@ -80,8 +80,8 @@ final class AudioCodecTests: XCTestCase {
let encoder = AudioCodec()
encoder.startRunning()
for numSample in numSamples {
if let sampleBuffer: CMSampleBuffer = SinWaveUtil.createCMSampleBuffer(44100.0, numSamples: numSample) {
encoder.encodeSampleBuffer(sampleBuffer)
if let sampleBuffer = SinWaveUtil.createCMSampleBuffer(44100.0, numSamples: numSample) {
encoder.appendSampleBuffer(sampleBuffer)
}
}
}