從這裡抄過來的:https://github.com/LevyGG/iOS-H.264-hareware-encode-and-decode/blob/master/VTDemoOniPad/H264HwEncoderImpl.h
#import <Foundation/Foundation.h>
@import AVFoundation;
@protocol H264HwEncoderImplDelegate <NSObject>
- (void)gotSpsPps:(NSData*)sps pps:(NSData*)pps;
- (void)gotEncodedData:(NSData*)data isKeyFrame:(BOOL)isKeyFrame;
@end
@interface H264HwEncoderImpl : NSObject
- (void) initWithConfiguration;
- (void) initEncode:(int)width height:(int)height;
- (void) encode:(CMSampleBufferRef )sampleBuffer;
- (void) End;
@property (weak, nonatomic) NSString *error;
@property (weak, nonatomic) id<H264HwEncoderImplDelegate> delegate;
@end
#import "H264HwEncoderImpl.h"
@import VideoToolbox;
@import AVFoundation;
@implementation H264HwEncoderImpl
{
VTCompressionSessionRef EncodingSession;
dispatch_queue_t aQueue;
CMFormatDescriptionRef format;
CMSampleTimingInfo * timingInfo;
BOOL initialized;
int frameCount;
NSData *sps;
NSData *pps;
}
@synthesize error;
- (void) initWithConfiguration
{
EncodingSession = nil;
initialized = true;
aQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
frameCount = 0;
sps = NULL;
pps = NULL;
}
// VTCompressionOutputCallback(回調方法) 由VTCompressionSessionCreate調用
void didCompressH264(void *outputCallbackRefCon, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags,
CMSampleBufferRef sampleBuffer )
{
NSLog(@"didCompressH264 called with status %d infoFlags %d", (int)status, (int)infoFlags);
if (status != 0) return;
if (!CMSampleBufferDataIsReady(sampleBuffer))
{
NSLog(@"didCompressH264 data is not ready ");
return;
}
H264HwEncoderImpl* encoder = (__bridge H264HwEncoderImpl*)outputCallbackRefCon;
// Check if we have got a key frame first
bool keyframe = !CFDictionaryContainsKey( (CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true), 0)), kCMSampleAttachmentKey_NotSync);
if (keyframe)
{
CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
// CFDictionaryRef extensionDict = CMFormatDescriptionGetExtensions(format);
// Get the extensions
// From the extensions get the dictionary with key "SampleDescriptionExtensionAtoms"
// From the dict, get the value for the key "avcC"
size_t sparameterSetSize, sparameterSetCount;
const uint8_t *sparameterSet;
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sparameterSet, &sparameterSetSize, &sparameterSetCount, 0 );
if (statusCode == noErr)
{
// Found sps and now check for pps
size_t pparameterSetSize, pparameterSetCount;
const uint8_t *pparameterSet;
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, 0 );
if (statusCode == noErr)
{
// Found pps
encoder->sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
encoder->pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
if (encoder->_delegate)
{
[encoder->_delegate gotSpsPps:encoder->sps pps:encoder->pps];
}
}
}
}
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int AVCCHeaderLength = 4;
while (bufferOffset < totalLength - AVCCHeaderLength) {
// Read the NAL unit length
uint32_t NALUnitLength = 0;
memcpy(&NALUnitLength, dataPointer + bufferOffset, AVCCHeaderLength);
// Convert the length value from Big-endian to Little-endian
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + AVCCHeaderLength) length:NALUnitLength];
[encoder->_delegate gotEncodedData:data isKeyFrame:keyframe];
// Move to the next NAL unit in the block buffer
bufferOffset += AVCCHeaderLength + NALUnitLength;
}
}
}
- (void) initEncode:(int)width height:(int)height // 僅調用一次
{
dispatch_sync(aQueue, ^{
CFMutableDictionaryRef sessionAttributes = CFDictionaryCreateMutable(
NULL,
0,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
// bitrate 隻有當壓縮frame設定的時候才起作用,有時候不起作用,當不設定的時候大小根據視訊的大小而定
// int fixedBitrate = 2000 * 1024; // 2000 * 1024 -> assume 2 Mbits/s
// CFNumberRef bitrateNum = CFNumberCreate(NULL, kCFNumberSInt32Type, &fixedBitrate);
// CFDictionarySetValue(sessionAttributes, kVTCompressionPropertyKey_AverageBitRate, bitrateNum);
// CFRelease(bitrateNum);
// CMTime CMTimeMake(int64_t value, int32_t timescale)當timescale設定為1的時候更改這個參數就看不到效果了
// float fixedQuality = 1.0;
// CFNumberRef qualityNum = CFNumberCreate(NULL, kCFNumberFloat32Type, &fixedQuality);
// CFDictionarySetValue(sessionAttributes, kVTCompressionPropertyKey_Quality, qualityNum);
// CFRelease(qualityNum);
//貌似沒作用
// int DataRateLimits = 2;
// CFNumberRef DataRateLimitsNum = CFNumberCreate(NULL, kCFNumberSInt8Type, &DataRateLimits);
// CFDictionarySetValue(sessionAttributes, kVTCompressionPropertyKey_DataRateLimits, DataRateLimitsNum);
// CFRelease(DataRateLimitsNum);
// 建立編碼
OSStatus status = VTCompressionSessionCreate(NULL, width, height, kCMVideoCodecType_H264, sessionAttributes, NULL, NULL, didCompressH264, (__bridge void *)(self), &EncodingSession);
NSLog(@"H264: VTCompressionSessionCreate %d", (int)status);
if (status != 0)
{
NSLog(@"H264: Unable to create a H264 session");
error = @"H264: Unable to create a H264 session";
return ;
}
//設定properties(這些參數設定了也沒用)
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_ProfileLevel, kVTProfileLevel_H264_High_5_2);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_AllowFrameReordering, kCFBooleanFalse);
// 啟動編碼
VTCompressionSessionPrepareToEncodeFrames(EncodingSession);
});
}
// 從控制的AVCaptureVideoDataOutputSampleBufferDelegate代理方法中調用至此
- (void) encode:(CMSampleBufferRef )sampleBuffer // 頻繁調用
{
dispatch_sync(aQueue, ^{
frameCount++;
// Get the CV Image buffer
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
// CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
// Create properties
CMTime presentationTimeStamp = CMTimeMake(frameCount, 1); // 這個值越大畫面越模糊
// CMTime duration = CMTimeMake(1, DURATION);
VTEncodeInfoFlags flags;
// Pass it to the encoder
OSStatus statusCode = VTCompressionSessionEncodeFrame(EncodingSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL, NULL, &flags);
// Check for error
if (statusCode != noErr) {
NSLog(@"H264: VTCompressionSessionEncodeFrame failed with %d", (int)statusCode);
error = @"H264: VTCompressionSessionEncodeFrame failed ";
// End the session
VTCompressionSessionInvalidate(EncodingSession);
CFRelease(EncodingSession);
EncodingSession = NULL;
error = NULL;
return;
}
NSLog(@"H264: VTCompressionSessionEncodeFrame Success");
});
}
- (void) End
{
// Mark the completion
VTCompressionSessionCompleteFrames(EncodingSession, kCMTimeInvalid);
// End the session
VTCompressionSessionInvalidate(EncodingSession);
CFRelease(EncodingSession);
EncodingSession = NULL;
error = NULL;
}
@end