Initial commit: SimVision tvOS streaming app
Features: - VOD library with movie grouping and version detection - TV show library with season/episode organization - TMDB integration for trending shows and recently aired episodes - Recent releases section with TMDB release date sorting - Watch history tracking with continue watching - Playlist caching (12-hour TTL) for offline support - M3U playlist parsing with XStream API support - Authentication with credential storage Technical: - SwiftUI for tvOS - Actor-based services for thread safety - Persistent caching for playlists, TMDB data, and watch history - KSPlayer integration for video playback Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
550
KSPlayer-main/Sources/KSPlayer/MEPlayer/AVFFmpegExtension.swift
Normal file
550
KSPlayer-main/Sources/KSPlayer/MEPlayer/AVFFmpegExtension.swift
Normal file
@@ -0,0 +1,550 @@
|
||||
import CoreMedia
|
||||
import FFmpegKit
|
||||
import Libavcodec
|
||||
import Libavfilter
|
||||
import Libavformat
|
||||
|
||||
func toDictionary(_ native: OpaquePointer?) -> [String: String] {
|
||||
var dict = [String: String]()
|
||||
if let native {
|
||||
var prev: UnsafeMutablePointer<AVDictionaryEntry>?
|
||||
while let tag = av_dict_get(native, "", prev, AV_DICT_IGNORE_SUFFIX) {
|
||||
dict[String(cString: tag.pointee.key)] = String(cString: tag.pointee.value)
|
||||
prev = tag
|
||||
}
|
||||
}
|
||||
return dict
|
||||
}
|
||||
|
||||
extension UnsafeMutablePointer where Pointee == AVCodecContext {
|
||||
func getFormat() {
|
||||
pointee.get_format = { ctx, fmt -> AVPixelFormat in
|
||||
guard let fmt, let ctx else {
|
||||
return AV_PIX_FMT_NONE
|
||||
}
|
||||
var i = 0
|
||||
while fmt[i] != AV_PIX_FMT_NONE {
|
||||
if fmt[i] == AV_PIX_FMT_VIDEOTOOLBOX {
|
||||
let deviceCtx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VIDEOTOOLBOX)
|
||||
if deviceCtx == nil {
|
||||
break
|
||||
}
|
||||
// 只要有hw_device_ctx就可以了。不需要hw_frames_ctx
|
||||
ctx.pointee.hw_device_ctx = deviceCtx
|
||||
// var framesCtx = av_hwframe_ctx_alloc(deviceCtx)
|
||||
// if let framesCtx {
|
||||
// let framesCtxData = UnsafeMutableRawPointer(framesCtx.pointee.data)
|
||||
// .bindMemory(to: AVHWFramesContext.self, capacity: 1)
|
||||
// framesCtxData.pointee.format = AV_PIX_FMT_VIDEOTOOLBOX
|
||||
// framesCtxData.pointee.sw_format = ctx.pointee.pix_fmt.bestPixelFormat
|
||||
// framesCtxData.pointee.width = ctx.pointee.width
|
||||
// framesCtxData.pointee.height = ctx.pointee.height
|
||||
// }
|
||||
// if av_hwframe_ctx_init(framesCtx) != 0 {
|
||||
// av_buffer_unref(&framesCtx)
|
||||
// break
|
||||
// }
|
||||
// ctx.pointee.hw_frames_ctx = framesCtx
|
||||
return fmt[i]
|
||||
}
|
||||
i += 1
|
||||
}
|
||||
return fmt[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVCodecContext {
|
||||
func parseASSEvents() -> Int {
|
||||
var subtitleASSEvents = 10
|
||||
if subtitle_header_size > 0, let events = String(data: Data(bytes: subtitle_header, count: Int(subtitle_header_size)), encoding: .ascii), let eventsRange = events.range(of: "[Events]") {
|
||||
var range = eventsRange.upperBound ..< events.endIndex
|
||||
if let eventsRange = events.range(of: "Format:", options: String.CompareOptions(rawValue: 0), range: range, locale: nil) {
|
||||
range = eventsRange.upperBound ..< events.endIndex
|
||||
if let eventsRange = events.rangeOfCharacter(from: CharacterSet.newlines, options: String.CompareOptions(rawValue: 0), range: range) {
|
||||
range = range.lowerBound ..< eventsRange.upperBound
|
||||
let format = events[range]
|
||||
let fields = format.components(separatedBy: ",")
|
||||
let text = fields.last
|
||||
if let text, text.trimmingCharacters(in: .whitespacesAndNewlines) == "Text" {
|
||||
subtitleASSEvents = fields.count
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return subtitleASSEvents
|
||||
}
|
||||
}
|
||||
|
||||
extension AVCodecParameters {
|
||||
mutating func createContext(options: KSOptions?) throws -> UnsafeMutablePointer<AVCodecContext> {
|
||||
var codecContextOption = avcodec_alloc_context3(nil)
|
||||
guard let codecContext = codecContextOption else {
|
||||
throw NSError(errorCode: .codecContextCreate)
|
||||
}
|
||||
var result = avcodec_parameters_to_context(codecContext, &self)
|
||||
guard result == 0 else {
|
||||
avcodec_free_context(&codecContextOption)
|
||||
throw NSError(errorCode: .codecContextSetParam, avErrorCode: result)
|
||||
}
|
||||
if codec_type == AVMEDIA_TYPE_VIDEO, options?.hardwareDecode ?? false {
|
||||
codecContext.getFormat()
|
||||
}
|
||||
guard let codec = avcodec_find_decoder(codecContext.pointee.codec_id) else {
|
||||
avcodec_free_context(&codecContextOption)
|
||||
throw NSError(errorCode: .codecContextFindDecoder, avErrorCode: result)
|
||||
}
|
||||
codecContext.pointee.codec_id = codec.pointee.id
|
||||
codecContext.pointee.flags2 |= AV_CODEC_FLAG2_FAST
|
||||
if options?.codecLowDelay == true {
|
||||
codecContext.pointee.flags |= AV_CODEC_FLAG_LOW_DELAY
|
||||
}
|
||||
var avOptions = options?.decoderOptions.avOptions
|
||||
if let options {
|
||||
var lowres = options.lowres
|
||||
if lowres > codec.pointee.max_lowres {
|
||||
lowres = codec.pointee.max_lowres
|
||||
}
|
||||
codecContext.pointee.lowres = Int32(lowres)
|
||||
if lowres > 0 {
|
||||
av_dict_set_int(&avOptions, "lowres", Int64(lowres), 0)
|
||||
}
|
||||
}
|
||||
result = avcodec_open2(codecContext, codec, &avOptions)
|
||||
av_dict_free(&avOptions)
|
||||
guard result == 0 else {
|
||||
avcodec_free_context(&codecContextOption)
|
||||
throw NSError(errorCode: .codesContextOpen, avErrorCode: result)
|
||||
}
|
||||
return codecContext
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Clients who specify AVVideoColorPropertiesKey must specify a color primary, transfer function, and Y'CbCr matrix.
|
||||
Most clients will want to specify HD, which consists of:
|
||||
|
||||
AVVideoColorPrimaries_ITU_R_709_2
|
||||
AVVideoTransferFunction_ITU_R_709_2
|
||||
AVVideoYCbCrMatrix_ITU_R_709_2
|
||||
|
||||
If you require SD colorimetry use:
|
||||
|
||||
AVVideoColorPrimaries_SMPTE_C
|
||||
AVVideoTransferFunction_ITU_R_709_2
|
||||
AVVideoYCbCrMatrix_ITU_R_601_4
|
||||
|
||||
If you require wide gamut HD colorimetry, you can use:
|
||||
|
||||
AVVideoColorPrimaries_P3_D65
|
||||
AVVideoTransferFunction_ITU_R_709_2
|
||||
AVVideoYCbCrMatrix_ITU_R_709_2
|
||||
|
||||
If you require 10-bit wide gamut HD colorimetry, you can use:
|
||||
|
||||
AVVideoColorPrimaries_P3_D65
|
||||
AVVideoTransferFunction_ITU_R_2100_HLG
|
||||
AVVideoYCbCrMatrix_ITU_R_709_2
|
||||
*/
|
||||
extension AVColorPrimaries {
|
||||
var colorPrimaries: CFString? {
|
||||
switch self {
|
||||
case AVCOL_PRI_BT470BG:
|
||||
return kCVImageBufferColorPrimaries_EBU_3213
|
||||
case AVCOL_PRI_SMPTE170M:
|
||||
return kCVImageBufferColorPrimaries_SMPTE_C
|
||||
case AVCOL_PRI_BT709:
|
||||
return kCVImageBufferColorPrimaries_ITU_R_709_2
|
||||
case AVCOL_PRI_BT2020:
|
||||
return kCVImageBufferColorPrimaries_ITU_R_2020
|
||||
default:
|
||||
return CVColorPrimariesGetStringForIntegerCodePoint(Int32(rawValue))?.takeUnretainedValue()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVColorTransferCharacteristic {
|
||||
var transferFunction: CFString? {
|
||||
switch self {
|
||||
case AVCOL_TRC_SMPTE2084:
|
||||
return kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ
|
||||
case AVCOL_TRC_BT2020_10, AVCOL_TRC_BT2020_12:
|
||||
return kCVImageBufferTransferFunction_ITU_R_2020
|
||||
case AVCOL_TRC_BT709:
|
||||
return kCVImageBufferTransferFunction_ITU_R_709_2
|
||||
case AVCOL_TRC_SMPTE240M:
|
||||
return kCVImageBufferTransferFunction_SMPTE_240M_1995
|
||||
case AVCOL_TRC_LINEAR:
|
||||
return kCVImageBufferTransferFunction_Linear
|
||||
case AVCOL_TRC_SMPTE428:
|
||||
return kCVImageBufferTransferFunction_SMPTE_ST_428_1
|
||||
case AVCOL_TRC_ARIB_STD_B67:
|
||||
return kCVImageBufferTransferFunction_ITU_R_2100_HLG
|
||||
case AVCOL_TRC_GAMMA22, AVCOL_TRC_GAMMA28:
|
||||
return kCVImageBufferTransferFunction_UseGamma
|
||||
default:
|
||||
return CVTransferFunctionGetStringForIntegerCodePoint(Int32(rawValue))?.takeUnretainedValue()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVColorSpace {
|
||||
var ycbcrMatrix: CFString? {
|
||||
switch self {
|
||||
case AVCOL_SPC_BT709:
|
||||
return kCVImageBufferYCbCrMatrix_ITU_R_709_2
|
||||
case AVCOL_SPC_BT470BG, AVCOL_SPC_SMPTE170M:
|
||||
return kCVImageBufferYCbCrMatrix_ITU_R_601_4
|
||||
case AVCOL_SPC_SMPTE240M:
|
||||
return kCVImageBufferYCbCrMatrix_SMPTE_240M_1995
|
||||
case AVCOL_SPC_BT2020_CL, AVCOL_SPC_BT2020_NCL:
|
||||
return kCVImageBufferYCbCrMatrix_ITU_R_2020
|
||||
default:
|
||||
return CVYCbCrMatrixGetStringForIntegerCodePoint(Int32(rawValue))?.takeUnretainedValue()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVChromaLocation {
|
||||
var chroma: CFString? {
|
||||
switch self {
|
||||
case AVCHROMA_LOC_LEFT:
|
||||
return kCVImageBufferChromaLocation_Left
|
||||
case AVCHROMA_LOC_CENTER:
|
||||
return kCVImageBufferChromaLocation_Center
|
||||
case AVCHROMA_LOC_TOP:
|
||||
return kCVImageBufferChromaLocation_Top
|
||||
case AVCHROMA_LOC_BOTTOM:
|
||||
return kCVImageBufferChromaLocation_Bottom
|
||||
case AVCHROMA_LOC_TOPLEFT:
|
||||
return kCVImageBufferChromaLocation_TopLeft
|
||||
case AVCHROMA_LOC_BOTTOMLEFT:
|
||||
return kCVImageBufferChromaLocation_BottomLeft
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVPixelFormat {
|
||||
var bitDepth: Int32 {
|
||||
let descriptor = av_pix_fmt_desc_get(self)
|
||||
return descriptor?.pointee.comp.0.depth ?? 8
|
||||
}
|
||||
|
||||
var planeCount: UInt8 {
|
||||
if let desc = av_pix_fmt_desc_get(self) {
|
||||
switch desc.pointee.nb_components {
|
||||
case 3:
|
||||
return UInt8(desc.pointee.comp.2.plane + 1)
|
||||
case 2:
|
||||
return UInt8(desc.pointee.comp.1.plane + 1)
|
||||
default:
|
||||
return UInt8(desc.pointee.comp.0.plane + 1)
|
||||
}
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
var leftShift: UInt8 {
|
||||
if [AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV444P10LE].contains(self) {
|
||||
return 6
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// videotoolbox_best_pixel_format
|
||||
var bestPixelFormat: AVPixelFormat {
|
||||
if let desc = av_pix_fmt_desc_get(self) {
|
||||
if desc.pointee.flags & UInt64(AV_PIX_FMT_FLAG_ALPHA) != 0 {
|
||||
return AV_PIX_FMT_AYUV64LE
|
||||
}
|
||||
let depth = desc.pointee.comp.0.depth
|
||||
if depth > 10 {
|
||||
return desc.pointee.log2_chroma_w == 0 ? AV_PIX_FMT_P416LE : AV_PIX_FMT_P216LE
|
||||
}
|
||||
if desc.pointee.log2_chroma_w == 0 {
|
||||
return depth <= 8 ? AV_PIX_FMT_NV24 : AV_PIX_FMT_P410LE
|
||||
}
|
||||
if desc.pointee.log2_chroma_h == 0 {
|
||||
return depth <= 8 ? AV_PIX_FMT_NV16 : AV_PIX_FMT_P210LE
|
||||
}
|
||||
return depth <= 8 ? AV_PIX_FMT_NV12 : AV_PIX_FMT_P010LE
|
||||
} else {
|
||||
return AV_PIX_FMT_NV12
|
||||
}
|
||||
}
|
||||
|
||||
// swiftlint:disable cyclomatic_complexity
|
||||
// avfoundation.m
|
||||
func osType(fullRange: Bool = false) -> OSType? {
|
||||
switch self {
|
||||
case AV_PIX_FMT_MONOBLACK: return kCVPixelFormatType_1Monochrome
|
||||
// case AV_PIX_FMT_PAL8: return kCVPixelFormatType_32RGBA
|
||||
case AV_PIX_FMT_GRAY8: return kCVPixelFormatType_OneComponent8
|
||||
case AV_PIX_FMT_RGB555BE: return kCVPixelFormatType_16BE555
|
||||
case AV_PIX_FMT_RGB555LE: return kCVPixelFormatType_16LE555
|
||||
case AV_PIX_FMT_RGB565BE: return kCVPixelFormatType_16BE565
|
||||
case AV_PIX_FMT_RGB565LE: return kCVPixelFormatType_16LE565
|
||||
// PixelBufferPool 无法支持24BGR
|
||||
// case AV_PIX_FMT_BGR24: return kCVPixelFormatType_24BGR
|
||||
case AV_PIX_FMT_RGB24: return kCVPixelFormatType_24RGB
|
||||
case AV_PIX_FMT_0RGB: return kCVPixelFormatType_32ARGB
|
||||
case AV_PIX_FMT_ARGB: return kCVPixelFormatType_32ARGB
|
||||
case AV_PIX_FMT_BGR0: return kCVPixelFormatType_32BGRA
|
||||
case AV_PIX_FMT_BGRA: return kCVPixelFormatType_32BGRA
|
||||
case AV_PIX_FMT_0BGR: return kCVPixelFormatType_32ABGR
|
||||
case AV_PIX_FMT_RGB0: return kCVPixelFormatType_32RGBA
|
||||
case AV_PIX_FMT_RGBA: return kCVPixelFormatType_32RGBA
|
||||
case AV_PIX_FMT_BGR48BE, AV_PIX_FMT_BGR48LE: return kCVPixelFormatType_48RGB
|
||||
case AV_PIX_FMT_NV12: return fullRange ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange
|
||||
// AVSampleBufferDisplayLayer不能显示 kCVPixelFormatType_420YpCbCr8PlanarFullRange,所以换成是kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
|
||||
case AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P: return fullRange ? kCVPixelFormatType_420YpCbCr8BiPlanarFullRange : kCVPixelFormatType_420YpCbCr8Planar
|
||||
case AV_PIX_FMT_P010BE, AV_PIX_FMT_P010LE, AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE: return fullRange ? kCVPixelFormatType_420YpCbCr10BiPlanarFullRange : kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
|
||||
case AV_PIX_FMT_UYVY422: return kCVPixelFormatType_422YpCbCr8
|
||||
case AV_PIX_FMT_YUYV422: return kCVPixelFormatType_422YpCbCr8_yuvs
|
||||
case AV_PIX_FMT_NV16: return fullRange ? kCVPixelFormatType_422YpCbCr8BiPlanarFullRange : kCVPixelFormatType_422YpCbCr8BiPlanarVideoRange
|
||||
case AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P: return fullRange ? kCVPixelFormatType_422YpCbCr8BiPlanarFullRange : kCVPixelFormatType_422YpCbCr8BiPlanarVideoRange
|
||||
case AV_PIX_FMT_Y210BE, AV_PIX_FMT_Y210LE: return kCVPixelFormatType_422YpCbCr10
|
||||
case AV_PIX_FMT_P210BE, AV_PIX_FMT_P210LE, AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE: return fullRange ? kCVPixelFormatType_422YpCbCr10BiPlanarFullRange : kCVPixelFormatType_422YpCbCr10BiPlanarVideoRange
|
||||
case AV_PIX_FMT_P216BE, AV_PIX_FMT_P216LE, AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE: return kCVPixelFormatType_422YpCbCr16BiPlanarVideoRange
|
||||
case AV_PIX_FMT_NV24, AV_PIX_FMT_YUV444P: return fullRange ? kCVPixelFormatType_444YpCbCr8BiPlanarFullRange : kCVPixelFormatType_444YpCbCr8BiPlanarVideoRange
|
||||
case AV_PIX_FMT_YUVA444P: return kCVPixelFormatType_4444YpCbCrA8R
|
||||
case AV_PIX_FMT_P410BE, AV_PIX_FMT_P410LE, AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE: return fullRange ? kCVPixelFormatType_444YpCbCr10BiPlanarFullRange : kCVPixelFormatType_444YpCbCr10BiPlanarVideoRange
|
||||
case AV_PIX_FMT_P416BE, AV_PIX_FMT_P416LE: return kCVPixelFormatType_444YpCbCr16BiPlanarVideoRange
|
||||
case AV_PIX_FMT_AYUV64BE, AV_PIX_FMT_AYUV64LE: return kCVPixelFormatType_4444AYpCbCr16
|
||||
case AV_PIX_FMT_YUVA444P16BE, AV_PIX_FMT_YUVA444P16LE: return kCVPixelFormatType_4444AYpCbCr16
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// swiftlint:enable cyclomatic_complexity
|
||||
}
|
||||
|
||||
extension AVCodecID {
|
||||
var mediaSubType: CMFormatDescription.MediaSubType {
|
||||
switch self {
|
||||
case AV_CODEC_ID_H263:
|
||||
return .h263
|
||||
case AV_CODEC_ID_H264:
|
||||
return .h264
|
||||
case AV_CODEC_ID_HEVC:
|
||||
return .hevc
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
return .mpeg1Video
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
return .mpeg2Video
|
||||
case AV_CODEC_ID_MPEG4:
|
||||
return .mpeg4Video
|
||||
case AV_CODEC_ID_VP9:
|
||||
return CMFormatDescription.MediaSubType(rawValue: kCMVideoCodecType_VP9)
|
||||
case AV_CODEC_ID_AAC:
|
||||
return .mpeg4AAC
|
||||
case AV_CODEC_ID_AC3:
|
||||
return .ac3
|
||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||
return .appleIMA4
|
||||
case AV_CODEC_ID_ALAC:
|
||||
return .appleLossless
|
||||
case AV_CODEC_ID_AMR_NB:
|
||||
return .amr
|
||||
case AV_CODEC_ID_EAC3:
|
||||
return .enhancedAC3
|
||||
case AV_CODEC_ID_GSM_MS:
|
||||
return .microsoftGSM
|
||||
case AV_CODEC_ID_ILBC:
|
||||
return .iLBC
|
||||
case AV_CODEC_ID_MP1:
|
||||
return .mpegLayer1
|
||||
case AV_CODEC_ID_MP2:
|
||||
return .mpegLayer2
|
||||
case AV_CODEC_ID_MP3:
|
||||
return .mpegLayer3
|
||||
case AV_CODEC_ID_PCM_ALAW:
|
||||
return .aLaw
|
||||
case AV_CODEC_ID_PCM_MULAW:
|
||||
return .uLaw
|
||||
case AV_CODEC_ID_QDMC:
|
||||
return .qDesign
|
||||
case AV_CODEC_ID_QDM2:
|
||||
return .qDesign2
|
||||
default:
|
||||
return CMFormatDescription.MediaSubType(rawValue: 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVRational {
|
||||
var size: CGSize {
|
||||
num > 0 && den > 0 ? CGSize(width: Int(num), height: Int(den)) : CGSize(width: 1, height: 1)
|
||||
}
|
||||
}
|
||||
|
||||
extension AVBufferSrcParameters: Equatable {
|
||||
public static func == (lhs: AVBufferSrcParameters, rhs: AVBufferSrcParameters) -> Bool {
|
||||
lhs.format == rhs.format && lhs.width == rhs.width && lhs.height == rhs.height && lhs.sample_aspect_ratio == rhs.sample_aspect_ratio && lhs.sample_rate == rhs.sample_rate && lhs.ch_layout == rhs.ch_layout
|
||||
}
|
||||
|
||||
var arg: String {
|
||||
if sample_rate > 0 {
|
||||
let fmt = String(cString: av_get_sample_fmt_name(AVSampleFormat(rawValue: format)))
|
||||
return "sample_rate=\(sample_rate):sample_fmt=\(fmt):time_base=\(time_base.num)/\(time_base.den):channels=\(ch_layout.nb_channels):channel_layout=\(ch_layout.description)"
|
||||
} else {
|
||||
return "video_size=\(width)x\(height):pix_fmt=\(format):time_base=\(time_base.num)/\(time_base.den):pixel_aspect=\(sample_aspect_ratio.num)/\(sample_aspect_ratio.den)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVChannelLayout: Equatable {
|
||||
public static func == (lhs: AVChannelLayout, rhs: AVChannelLayout) -> Bool {
|
||||
var lhs = lhs
|
||||
var rhs = rhs
|
||||
return av_channel_layout_compare(&lhs, &rhs) == 0
|
||||
}
|
||||
}
|
||||
|
||||
extension AVChannelLayout: CustomStringConvertible {
|
||||
static let defaultValue = AVChannelLayout(order: AV_CHANNEL_ORDER_NATIVE, nb_channels: 2, u: AVChannelLayout.__Unnamed_union_u(mask: swift_AV_CH_LAYOUT_STEREO), opaque: nil)
|
||||
var layoutTag: AudioChannelLayoutTag? {
|
||||
KSLog("[audio] FFmepg AVChannelLayout: \(self) order: \(order) mask: \(u.mask)")
|
||||
let tag = layoutMapTuple.first { _, mask in
|
||||
u.mask == mask
|
||||
}?.tag
|
||||
if let tag {
|
||||
return tag
|
||||
} else {
|
||||
KSLog("[audio] can not find AudioChannelLayoutTag FFmepg channelLayout: \(self) order: \(order) mask: \(u.mask)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
public var description: String {
|
||||
var channelLayout = self
|
||||
var str = [Int8](repeating: 0, count: 64)
|
||||
_ = av_channel_layout_describe(&channelLayout, &str, str.count)
|
||||
return String(cString: str)
|
||||
}
|
||||
}
|
||||
|
||||
extension AVRational: Equatable {
|
||||
public static func == (lhs: AVRational, rhs: AVRational) -> Bool {
|
||||
lhs.num == rhs.num && rhs.den == rhs.den
|
||||
}
|
||||
}
|
||||
|
||||
public struct AVError: Error, Equatable {
|
||||
public var code: Int32
|
||||
public var message: String
|
||||
|
||||
init(code: Int32) {
|
||||
self.code = code
|
||||
message = String(avErrorCode: code)
|
||||
}
|
||||
}
|
||||
|
||||
public extension Dictionary where Key == String {
|
||||
var avOptions: OpaquePointer? {
|
||||
var avOptions: OpaquePointer?
|
||||
forEach { key, value in
|
||||
if let i = value as? Int64 {
|
||||
av_dict_set_int(&avOptions, key, i, 0)
|
||||
} else if let i = value as? Int {
|
||||
av_dict_set_int(&avOptions, key, Int64(i), 0)
|
||||
} else if let string = value as? String {
|
||||
av_dict_set(&avOptions, key, string, 0)
|
||||
} else if let dic = value as? Dictionary {
|
||||
let string = dic.map { "\($0.0)=\($0.1)" }.joined(separator: "\r\n")
|
||||
av_dict_set(&avOptions, key, string, 0)
|
||||
} else if let array = value as? [String] {
|
||||
let string = array.joined(separator: "+")
|
||||
av_dict_set(&avOptions, key, string, 0)
|
||||
}
|
||||
}
|
||||
return avOptions
|
||||
}
|
||||
}
|
||||
|
||||
extension String {
|
||||
init(avErrorCode code: Int32) {
|
||||
let buf = UnsafeMutablePointer<Int8>.allocate(capacity: Int(AV_ERROR_MAX_STRING_SIZE))
|
||||
buf.initialize(repeating: 0, count: Int(AV_ERROR_MAX_STRING_SIZE))
|
||||
defer { buf.deallocate() }
|
||||
self = String(cString: av_make_error_string(buf, Int(AV_ERROR_MAX_STRING_SIZE), code))
|
||||
}
|
||||
}
|
||||
|
||||
public extension NSError {
|
||||
convenience init(errorCode: KSPlayerErrorCode, avErrorCode: Int32) {
|
||||
let underlyingError = AVError(code: avErrorCode)
|
||||
self.init(errorCode: errorCode, userInfo: [NSUnderlyingErrorKey: underlyingError])
|
||||
}
|
||||
}
|
||||
|
||||
public extension AVError {
|
||||
/// Resource temporarily unavailable
|
||||
static let tryAgain = AVError(code: swift_AVERROR(EAGAIN))
|
||||
/// Invalid argument
|
||||
static let invalidArgument = AVError(code: swift_AVERROR(EINVAL))
|
||||
/// Cannot allocate memory
|
||||
static let outOfMemory = AVError(code: swift_AVERROR(ENOMEM))
|
||||
/// The value is out of range
|
||||
static let outOfRange = AVError(code: swift_AVERROR(ERANGE))
|
||||
/// The value is not valid
|
||||
static let invalidValue = AVError(code: swift_AVERROR(EINVAL))
|
||||
/// Function not implemented
|
||||
static let noSystem = AVError(code: swift_AVERROR(ENOSYS))
|
||||
|
||||
/// Bitstream filter not found
|
||||
static let bitstreamFilterNotFound = AVError(code: swift_AVERROR_BSF_NOT_FOUND)
|
||||
/// Internal bug, also see `bug2`
|
||||
static let bug = AVError(code: swift_AVERROR_BUG)
|
||||
/// Buffer too small
|
||||
static let bufferTooSmall = AVError(code: swift_AVERROR_BUFFER_TOO_SMALL)
|
||||
/// Decoder not found
|
||||
static let decoderNotFound = AVError(code: swift_AVERROR_DECODER_NOT_FOUND)
|
||||
/// Demuxer not found
|
||||
static let demuxerNotFound = AVError(code: swift_AVERROR_DEMUXER_NOT_FOUND)
|
||||
/// Encoder not found
|
||||
static let encoderNotFound = AVError(code: swift_AVERROR_ENCODER_NOT_FOUND)
|
||||
/// End of file
|
||||
static let eof = AVError(code: swift_AVERROR_EOF)
|
||||
/// Immediate exit was requested; the called function should not be restarted
|
||||
static let exit = AVError(code: swift_AVERROR_EXIT)
|
||||
/// Generic error in an external library
|
||||
static let external = AVError(code: swift_AVERROR_EXTERNAL)
|
||||
/// Filter not found
|
||||
static let filterNotFound = AVError(code: swift_AVERROR_FILTER_NOT_FOUND)
|
||||
/// Invalid data found when processing input
|
||||
static let invalidData = AVError(code: swift_AVERROR_INVALIDDATA)
|
||||
/// Muxer not found
|
||||
static let muxerNotFound = AVError(code: swift_AVERROR_MUXER_NOT_FOUND)
|
||||
/// Option not found
|
||||
static let optionNotFound = AVError(code: swift_AVERROR_OPTION_NOT_FOUND)
|
||||
/// Not yet implemented in FFmpeg, patches welcome
|
||||
static let patchWelcome = AVError(code: swift_AVERROR_PATCHWELCOME)
|
||||
/// Protocol not found
|
||||
static let protocolNotFound = AVError(code: swift_AVERROR_PROTOCOL_NOT_FOUND)
|
||||
/// Stream not found
|
||||
static let streamNotFound = AVError(code: swift_AVERROR_STREAM_NOT_FOUND)
|
||||
/// This is semantically identical to `bug`. It has been introduced in Libav after our `bug` and
|
||||
/// with a modified value.
|
||||
static let bug2 = AVError(code: swift_AVERROR_BUG2)
|
||||
/// Unknown error, typically from an external library
|
||||
static let unknown = AVError(code: swift_AVERROR_UNKNOWN)
|
||||
/// Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
|
||||
static let experimental = AVError(code: swift_AVERROR_EXPERIMENTAL)
|
||||
/// Input changed between calls. Reconfiguration is required. (can be OR-ed with `outputChanged`)
|
||||
static let inputChanged = AVError(code: swift_AVERROR_INPUT_CHANGED)
|
||||
/// Output changed between calls. Reconfiguration is required. (can be OR-ed with `inputChanged`)
|
||||
static let outputChanged = AVError(code: swift_AVERROR_OUTPUT_CHANGED)
|
||||
|
||||
/* HTTP & RTSP errors */
|
||||
static let httpBadRequest = AVError(code: swift_AVERROR_HTTP_BAD_REQUEST)
|
||||
static let httpUnauthorized = AVError(code: swift_AVERROR_HTTP_UNAUTHORIZED)
|
||||
static let httpForbidden = AVError(code: swift_AVERROR_HTTP_FORBIDDEN)
|
||||
static let httpNotFound = AVError(code: swift_AVERROR_HTTP_NOT_FOUND)
|
||||
static let httpOther4xx = AVError(code: swift_AVERROR_HTTP_OTHER_4XX)
|
||||
static let httpServerError = AVError(code: swift_AVERROR_HTTP_SERVER_ERROR)
|
||||
}
|
||||
@@ -0,0 +1,333 @@
|
||||
//
|
||||
// AVFoundationExtension.swift
|
||||
//
|
||||
//
|
||||
// Created by kintan on 2023/1/9.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import CoreMedia
|
||||
import FFmpegKit
|
||||
import Libavutil
|
||||
|
||||
extension OSType {
|
||||
var bitDepth: Int32 {
|
||||
switch self {
|
||||
case kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange, kCVPixelFormatType_422YpCbCr10BiPlanarVideoRange, kCVPixelFormatType_444YpCbCr10BiPlanarVideoRange, kCVPixelFormatType_420YpCbCr10BiPlanarFullRange, kCVPixelFormatType_422YpCbCr10BiPlanarFullRange, kCVPixelFormatType_444YpCbCr10BiPlanarFullRange:
|
||||
return 10
|
||||
default:
|
||||
return 8
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension CVPixelBufferPool {
|
||||
static func create(width: Int32, height: Int32, bytesPerRowAlignment: Int32, pixelFormatType: OSType, bufferCount: Int = 24) -> CVPixelBufferPool? {
|
||||
let sourcePixelBufferOptions: NSMutableDictionary = [
|
||||
kCVPixelBufferPixelFormatTypeKey: pixelFormatType,
|
||||
kCVPixelBufferWidthKey: width,
|
||||
kCVPixelBufferHeightKey: height,
|
||||
kCVPixelBufferBytesPerRowAlignmentKey: bytesPerRowAlignment.alignment(value: 64),
|
||||
kCVPixelBufferMetalCompatibilityKey: true,
|
||||
kCVPixelBufferIOSurfacePropertiesKey: NSDictionary(),
|
||||
]
|
||||
var outputPool: CVPixelBufferPool?
|
||||
let pixelBufferPoolOptions: NSDictionary = [kCVPixelBufferPoolMinimumBufferCountKey: bufferCount]
|
||||
CVPixelBufferPoolCreate(kCFAllocatorDefault, pixelBufferPoolOptions, sourcePixelBufferOptions, &outputPool)
|
||||
return outputPool
|
||||
}
|
||||
}
|
||||
|
||||
extension AudioUnit {
|
||||
var channelLayout: UnsafeMutablePointer<AudioChannelLayout> {
|
||||
var size = UInt32(0)
|
||||
AudioUnitGetPropertyInfo(self, kAudioUnitProperty_AudioChannelLayout, kAudioUnitScope_Output, 0, &size, nil)
|
||||
let data = UnsafeMutableRawPointer.allocate(byteCount: Int(size), alignment: MemoryLayout<Int8>.alignment)
|
||||
AudioUnitGetProperty(self, kAudioUnitProperty_AudioChannelLayout, kAudioUnitScope_Output, 0, data, &size)
|
||||
let layout = data.bindMemory(to: AudioChannelLayout.self, capacity: 1)
|
||||
let tag = layout.pointee.mChannelLayoutTag
|
||||
KSLog("[audio] unit tag: \(tag)")
|
||||
if tag == kAudioChannelLayoutTag_UseChannelDescriptions {
|
||||
KSLog("[audio] unit channelDescriptions: \(layout.channelDescriptions)")
|
||||
return layout
|
||||
}
|
||||
if tag == kAudioChannelLayoutTag_UseChannelBitmap {
|
||||
return layout.pointee.mChannelBitmap.channelLayout
|
||||
} else {
|
||||
let layout = tag.channelLayout
|
||||
KSLog("[audio] unit channelDescriptions: \(layout.channelDescriptions)")
|
||||
return layout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AudioChannelLayoutTag {
|
||||
var channelLayout: UnsafeMutablePointer<AudioChannelLayout> {
|
||||
var tag = self
|
||||
var size = UInt32(0)
|
||||
AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForTag, UInt32(MemoryLayout<AudioChannelLayoutTag>.size), &tag, &size)
|
||||
let data = UnsafeMutableRawPointer.allocate(byteCount: Int(size), alignment: MemoryLayout<Int8>.alignment)
|
||||
AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForTag, UInt32(MemoryLayout<AudioChannelLayoutTag>.size), &tag, &size, data)
|
||||
let newLayout = data.bindMemory(to: AudioChannelLayout.self, capacity: 1)
|
||||
newLayout.pointee.mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions
|
||||
return newLayout
|
||||
}
|
||||
}
|
||||
|
||||
extension AudioChannelBitmap {
|
||||
var channelLayout: UnsafeMutablePointer<AudioChannelLayout> {
|
||||
var mChannelBitmap = self
|
||||
var size = UInt32(0)
|
||||
AudioFormatGetPropertyInfo(kAudioFormatProperty_ChannelLayoutForBitmap, UInt32(MemoryLayout<AudioChannelBitmap>.size), &mChannelBitmap, &size)
|
||||
let data = UnsafeMutableRawPointer.allocate(byteCount: Int(size), alignment: MemoryLayout<Int8>.alignment)
|
||||
AudioFormatGetProperty(kAudioFormatProperty_ChannelLayoutForBitmap, UInt32(MemoryLayout<AudioChannelBitmap>.size), &mChannelBitmap, &size, data)
|
||||
let newLayout = data.bindMemory(to: AudioChannelLayout.self, capacity: 1)
|
||||
newLayout.pointee.mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions
|
||||
return newLayout
|
||||
}
|
||||
}
|
||||
|
||||
extension UnsafePointer<AudioChannelLayout> {
|
||||
var channelDescriptions: [AudioChannelDescription] {
|
||||
UnsafeMutablePointer(mutating: self).channelDescriptions
|
||||
}
|
||||
}
|
||||
|
||||
extension UnsafeMutablePointer<AudioChannelLayout> {
|
||||
var channelDescriptions: [AudioChannelDescription] {
|
||||
let n = pointee.mNumberChannelDescriptions
|
||||
return withUnsafeMutablePointer(to: &pointee.mChannelDescriptions) { start in
|
||||
let buffers = UnsafeBufferPointer<AudioChannelDescription>(start: start, count: Int(n))
|
||||
return (0 ..< Int(n)).map {
|
||||
buffers[$0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AudioChannelLayout: CustomStringConvertible {
|
||||
public var description: String {
|
||||
"AudioChannelLayoutTag: \(mChannelLayoutTag), mNumberChannelDescriptions: \(mNumberChannelDescriptions)"
|
||||
}
|
||||
}
|
||||
|
||||
extension AVAudioChannelLayout {
|
||||
func channelLayout() -> AVChannelLayout {
|
||||
KSLog("[audio] channelLayout: \(layout.pointee.description)")
|
||||
var mask: UInt64?
|
||||
if layoutTag == kAudioChannelLayoutTag_UseChannelDescriptions {
|
||||
var newMask = UInt64(0)
|
||||
for description in layout.channelDescriptions {
|
||||
let label = description.mChannelLabel
|
||||
KSLog("[audio] label: \(label)")
|
||||
let channel = label.avChannel.rawValue
|
||||
KSLog("[audio] avChannel: \(channel)")
|
||||
if channel >= 0 {
|
||||
newMask |= 1 << channel
|
||||
}
|
||||
}
|
||||
mask = newMask
|
||||
} else {
|
||||
mask = layoutMapTuple.first { tag, _ in
|
||||
tag == layoutTag
|
||||
}?.mask
|
||||
}
|
||||
var outChannel = AVChannelLayout()
|
||||
if let mask {
|
||||
// 不能用AV_CHANNEL_ORDER_CUSTOM
|
||||
av_channel_layout_from_mask(&outChannel, mask)
|
||||
} else {
|
||||
av_channel_layout_default(&outChannel, Int32(channelCount))
|
||||
}
|
||||
KSLog("[audio] out mask: \(outChannel.u.mask) nb_channels: \(outChannel.nb_channels)")
|
||||
return outChannel
|
||||
}
|
||||
|
||||
public var channelDescriptions: String {
|
||||
"tag: \(layoutTag), channelDescriptions: \(layout.channelDescriptions)"
|
||||
}
|
||||
}
|
||||
|
||||
extension AVAudioFormat {
|
||||
var sampleFormat: AVSampleFormat {
|
||||
switch commonFormat {
|
||||
case .pcmFormatFloat32:
|
||||
return isInterleaved ? AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_FLTP
|
||||
case .pcmFormatFloat64:
|
||||
return isInterleaved ? AV_SAMPLE_FMT_DBL : AV_SAMPLE_FMT_DBLP
|
||||
case .pcmFormatInt16:
|
||||
return isInterleaved ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_S16P
|
||||
case .pcmFormatInt32:
|
||||
return isInterleaved ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S32P
|
||||
case .otherFormat:
|
||||
return isInterleaved ? AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_FLTP
|
||||
@unknown default:
|
||||
return isInterleaved ? AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_FLTP
|
||||
}
|
||||
}
|
||||
|
||||
var sampleSize: UInt32 {
|
||||
switch commonFormat {
|
||||
case .pcmFormatFloat32:
|
||||
return isInterleaved ? channelCount * 4 : 4
|
||||
case .pcmFormatFloat64:
|
||||
return isInterleaved ? channelCount * 8 : 8
|
||||
case .pcmFormatInt16:
|
||||
return isInterleaved ? channelCount * 2 : 2
|
||||
case .pcmFormatInt32:
|
||||
return isInterleaved ? channelCount * 4 : 4
|
||||
case .otherFormat:
|
||||
return isInterleaved ? channelCount * 4 : channelCount * 4
|
||||
@unknown default:
|
||||
return isInterleaved ? channelCount * 4 : channelCount * 4
|
||||
}
|
||||
}
|
||||
|
||||
func isChannelEqual(_ object: AVAudioFormat) -> Bool {
|
||||
sampleRate == object.sampleRate && channelCount == object.channelCount && commonFormat == object.commonFormat && sampleRate == object.sampleRate && isInterleaved == object.isInterleaved
|
||||
}
|
||||
}
|
||||
|
||||
let layoutMapTuple =
|
||||
[(tag: kAudioChannelLayoutTag_Mono, mask: swift_AV_CH_LAYOUT_MONO),
|
||||
(tag: kAudioChannelLayoutTag_Stereo, mask: swift_AV_CH_LAYOUT_STEREO),
|
||||
(tag: kAudioChannelLayoutTag_WAVE_2_1, mask: swift_AV_CH_LAYOUT_2POINT1),
|
||||
(tag: kAudioChannelLayoutTag_ITU_2_1, mask: swift_AV_CH_LAYOUT_2_1),
|
||||
(tag: kAudioChannelLayoutTag_MPEG_3_0_A, mask: swift_AV_CH_LAYOUT_SURROUND),
|
||||
(tag: kAudioChannelLayoutTag_DVD_10, mask: swift_AV_CH_LAYOUT_3POINT1),
|
||||
(tag: kAudioChannelLayoutTag_Logic_4_0_A, mask: swift_AV_CH_LAYOUT_4POINT0),
|
||||
(tag: kAudioChannelLayoutTag_Logic_Quadraphonic, mask: swift_AV_CH_LAYOUT_2_2),
|
||||
(tag: kAudioChannelLayoutTag_WAVE_4_0_B, mask: swift_AV_CH_LAYOUT_QUAD),
|
||||
(tag: kAudioChannelLayoutTag_DVD_11, mask: swift_AV_CH_LAYOUT_4POINT1),
|
||||
(tag: kAudioChannelLayoutTag_Logic_5_0_A, mask: swift_AV_CH_LAYOUT_5POINT0),
|
||||
(tag: kAudioChannelLayoutTag_WAVE_5_0_B, mask: swift_AV_CH_LAYOUT_5POINT0_BACK),
|
||||
(tag: kAudioChannelLayoutTag_Logic_5_1_A, mask: swift_AV_CH_LAYOUT_5POINT1),
|
||||
(tag: kAudioChannelLayoutTag_WAVE_5_1_B, mask: swift_AV_CH_LAYOUT_5POINT1_BACK),
|
||||
(tag: kAudioChannelLayoutTag_Logic_6_0_A, mask: swift_AV_CH_LAYOUT_6POINT0),
|
||||
(tag: kAudioChannelLayoutTag_DTS_6_0_A, mask: swift_AV_CH_LAYOUT_6POINT0_FRONT),
|
||||
(tag: kAudioChannelLayoutTag_DTS_6_0_C, mask: swift_AV_CH_LAYOUT_HEXAGONAL),
|
||||
(tag: kAudioChannelLayoutTag_Logic_6_1_C, mask: swift_AV_CH_LAYOUT_6POINT1),
|
||||
(tag: kAudioChannelLayoutTag_DTS_6_1_A, mask: swift_AV_CH_LAYOUT_6POINT1_FRONT),
|
||||
(tag: kAudioChannelLayoutTag_DTS_6_1_C, mask: swift_AV_CH_LAYOUT_6POINT1_BACK),
|
||||
(tag: kAudioChannelLayoutTag_AAC_7_0, mask: swift_AV_CH_LAYOUT_7POINT0),
|
||||
(tag: kAudioChannelLayoutTag_Logic_7_1_A, mask: swift_AV_CH_LAYOUT_7POINT1),
|
||||
(tag: kAudioChannelLayoutTag_Logic_7_1_SDDS_A, mask: swift_AV_CH_LAYOUT_7POINT1_WIDE),
|
||||
(tag: kAudioChannelLayoutTag_AAC_Octagonal, mask: swift_AV_CH_LAYOUT_OCTAGONAL),
|
||||
// (tag: kAudioChannelLayoutTag_Logic_Atmos_5_1_2, mask: swift_AV_CH_LAYOUT_7POINT1_WIDE_BACK),
|
||||
]
|
||||
|
||||
// Some channel abbreviations used below:
|
||||
// Lss - left side surround
|
||||
// Rss - right side surround
|
||||
// Leos - Left edge of screen
|
||||
// Reos - Right edge of screen
|
||||
// Lbs - Left back surround
|
||||
// Rbs - Right back surround
|
||||
// Lt - left matrix total. for matrix encoded stereo.
|
||||
// Rt - right matrix total. for matrix encoded stereo.
|
||||
|
||||
extension AudioChannelLabel {
|
||||
var avChannel: AVChannel {
|
||||
switch self {
|
||||
case kAudioChannelLabel_Left:
|
||||
// L - left
|
||||
return AV_CHAN_FRONT_LEFT
|
||||
case kAudioChannelLabel_Right:
|
||||
// R - right
|
||||
return AV_CHAN_FRONT_RIGHT
|
||||
case kAudioChannelLabel_Center:
|
||||
// C - center
|
||||
return AV_CHAN_FRONT_CENTER
|
||||
case kAudioChannelLabel_LFEScreen:
|
||||
// Lfe
|
||||
return AV_CHAN_LOW_FREQUENCY
|
||||
case kAudioChannelLabel_LeftSurround:
|
||||
// Ls - left surround
|
||||
return AV_CHAN_SIDE_LEFT
|
||||
case kAudioChannelLabel_RightSurround:
|
||||
// Rs - right surround
|
||||
return AV_CHAN_SIDE_RIGHT
|
||||
case kAudioChannelLabel_LeftCenter:
|
||||
// Lc - left center
|
||||
return AV_CHAN_FRONT_LEFT_OF_CENTER
|
||||
case kAudioChannelLabel_RightCenter:
|
||||
// Rc - right center
|
||||
return AV_CHAN_FRONT_RIGHT_OF_CENTER
|
||||
case kAudioChannelLabel_CenterSurround:
|
||||
// Cs - center surround "Back Center" or plain "Rear Surround"
|
||||
return AV_CHAN_BACK_CENTER
|
||||
case kAudioChannelLabel_LeftSurroundDirect:
|
||||
// Lsd - left surround direct
|
||||
return AV_CHAN_SURROUND_DIRECT_LEFT
|
||||
case kAudioChannelLabel_RightSurroundDirect:
|
||||
// Rsd - right surround direct
|
||||
return AV_CHAN_SURROUND_DIRECT_RIGHT
|
||||
case kAudioChannelLabel_TopCenterSurround:
|
||||
// Ts - top surround
|
||||
return AV_CHAN_TOP_CENTER
|
||||
case kAudioChannelLabel_VerticalHeightLeft:
|
||||
// Vhl - vertical height left Top Front Left
|
||||
return AV_CHAN_TOP_FRONT_LEFT
|
||||
case kAudioChannelLabel_VerticalHeightCenter:
|
||||
// Vhc - vertical height center Top Front Center
|
||||
return AV_CHAN_TOP_FRONT_CENTER
|
||||
case kAudioChannelLabel_VerticalHeightRight:
|
||||
// Vhr - vertical height right Top Front right
|
||||
return AV_CHAN_TOP_FRONT_RIGHT
|
||||
case kAudioChannelLabel_TopBackLeft:
|
||||
// Ltr - left top rear
|
||||
return AV_CHAN_TOP_BACK_LEFT
|
||||
case kAudioChannelLabel_TopBackCenter:
|
||||
// Ctr - center top rear
|
||||
return AV_CHAN_TOP_BACK_CENTER
|
||||
case kAudioChannelLabel_TopBackRight:
|
||||
// Rtr - right top rear
|
||||
return AV_CHAN_TOP_BACK_RIGHT
|
||||
case kAudioChannelLabel_RearSurroundLeft:
|
||||
// Rls - rear left surround
|
||||
return AV_CHAN_BACK_LEFT
|
||||
case kAudioChannelLabel_RearSurroundRight:
|
||||
// Rrs - rear right surround
|
||||
return AV_CHAN_BACK_RIGHT
|
||||
case kAudioChannelLabel_LeftWide:
|
||||
// Lw - left wide
|
||||
return AV_CHAN_WIDE_LEFT
|
||||
case kAudioChannelLabel_RightWide:
|
||||
// Rw - right wide
|
||||
return AV_CHAN_WIDE_RIGHT
|
||||
case kAudioChannelLabel_LFE2:
|
||||
// LFE2
|
||||
return AV_CHAN_LOW_FREQUENCY_2
|
||||
case kAudioChannelLabel_Mono:
|
||||
// C - center
|
||||
return AV_CHAN_FRONT_CENTER
|
||||
case kAudioChannelLabel_LeftTopMiddle:
|
||||
// Ltm - left top middle
|
||||
return AV_CHAN_NONE
|
||||
case kAudioChannelLabel_RightTopMiddle:
|
||||
// Rtm - right top middle
|
||||
return AV_CHAN_NONE
|
||||
case kAudioChannelLabel_LeftTopSurround:
|
||||
// Lts - Left top surround
|
||||
return AV_CHAN_TOP_SIDE_LEFT
|
||||
case kAudioChannelLabel_RightTopSurround:
|
||||
// Rts - Right top surround
|
||||
return AV_CHAN_TOP_SIDE_RIGHT
|
||||
case kAudioChannelLabel_LeftBottom:
|
||||
// Lb - left bottom
|
||||
return AV_CHAN_BOTTOM_FRONT_LEFT
|
||||
case kAudioChannelLabel_RightBottom:
|
||||
// Rb - Right bottom
|
||||
return AV_CHAN_BOTTOM_FRONT_RIGHT
|
||||
case kAudioChannelLabel_CenterBottom:
|
||||
// Cb - Center bottom
|
||||
return AV_CHAN_BOTTOM_FRONT_CENTER
|
||||
case kAudioChannelLabel_HeadphonesLeft:
|
||||
return AV_CHAN_STEREO_LEFT
|
||||
case kAudioChannelLabel_HeadphonesRight:
|
||||
return AV_CHAN_STEREO_RIGHT
|
||||
default:
|
||||
return AV_CHAN_NONE
|
||||
}
|
||||
}
|
||||
}
|
||||
338
KSPlayer-main/Sources/KSPlayer/MEPlayer/AudioEnginePlayer.swift
Normal file
338
KSPlayer-main/Sources/KSPlayer/MEPlayer/AudioEnginePlayer.swift
Normal file
@@ -0,0 +1,338 @@
|
||||
//
|
||||
// AudioEnginePlayer.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/11.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import CoreAudio
|
||||
|
||||
public protocol AudioOutput: FrameOutput {
|
||||
var playbackRate: Float { get set }
|
||||
var volume: Float { get set }
|
||||
var isMuted: Bool { get set }
|
||||
init()
|
||||
func prepare(audioFormat: AVAudioFormat)
|
||||
}
|
||||
|
||||
public protocol AudioDynamicsProcessor {
|
||||
var audioUnitForDynamicsProcessor: AudioUnit { get }
|
||||
}
|
||||
|
||||
public extension AudioDynamicsProcessor {
|
||||
var attackTime: Float {
|
||||
get {
|
||||
var value = AudioUnitParameterValue(1.0)
|
||||
AudioUnitGetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_AttackTime, kAudioUnitScope_Global, 0, &value)
|
||||
return value
|
||||
}
|
||||
set {
|
||||
AudioUnitSetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_AttackTime, kAudioUnitScope_Global, 0, AudioUnitParameterValue(newValue), 0)
|
||||
}
|
||||
}
|
||||
|
||||
var releaseTime: Float {
|
||||
get {
|
||||
var value = AudioUnitParameterValue(1.0)
|
||||
AudioUnitGetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_ReleaseTime, kAudioUnitScope_Global, 0, &value)
|
||||
return value
|
||||
}
|
||||
set {
|
||||
AudioUnitSetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_ReleaseTime, kAudioUnitScope_Global, 0, AudioUnitParameterValue(newValue), 0)
|
||||
}
|
||||
}
|
||||
|
||||
var threshold: Float {
|
||||
get {
|
||||
var value = AudioUnitParameterValue(1.0)
|
||||
AudioUnitGetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_Threshold, kAudioUnitScope_Global, 0, &value)
|
||||
return value
|
||||
}
|
||||
set {
|
||||
AudioUnitSetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_Threshold, kAudioUnitScope_Global, 0, AudioUnitParameterValue(newValue), 0)
|
||||
}
|
||||
}
|
||||
|
||||
var expansionRatio: Float {
|
||||
get {
|
||||
var value = AudioUnitParameterValue(1.0)
|
||||
AudioUnitGetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_ExpansionRatio, kAudioUnitScope_Global, 0, &value)
|
||||
return value
|
||||
}
|
||||
set {
|
||||
AudioUnitSetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_ExpansionRatio, kAudioUnitScope_Global, 0, AudioUnitParameterValue(newValue), 0)
|
||||
}
|
||||
}
|
||||
|
||||
var overallGain: Float {
|
||||
get {
|
||||
var value = AudioUnitParameterValue(1.0)
|
||||
AudioUnitGetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_OverallGain, kAudioUnitScope_Global, 0, &value)
|
||||
return value
|
||||
}
|
||||
set {
|
||||
AudioUnitSetParameter(audioUnitForDynamicsProcessor, kDynamicsProcessorParam_OverallGain, kAudioUnitScope_Global, 0, AudioUnitParameterValue(newValue), 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public final class AudioEngineDynamicsPlayer: AudioEnginePlayer, AudioDynamicsProcessor {
|
||||
private let dynamicsProcessor = AVAudioUnitEffect(audioComponentDescription:
|
||||
AudioComponentDescription(componentType: kAudioUnitType_Effect,
|
||||
componentSubType: kAudioUnitSubType_DynamicsProcessor,
|
||||
componentManufacturer: kAudioUnitManufacturer_Apple,
|
||||
componentFlags: 0,
|
||||
componentFlagsMask: 0))
|
||||
public var audioUnitForDynamicsProcessor: AudioUnit {
|
||||
dynamicsProcessor.audioUnit
|
||||
}
|
||||
|
||||
override func audioNodes() -> [AVAudioNode] {
|
||||
var nodes: [AVAudioNode] = [dynamicsProcessor]
|
||||
nodes.append(contentsOf: super.audioNodes())
|
||||
return nodes
|
||||
}
|
||||
|
||||
public required init() {
|
||||
super.init()
|
||||
engine.attach(dynamicsProcessor)
|
||||
}
|
||||
}
|
||||
|
||||
public class AudioEnginePlayer: AudioOutput {
|
||||
public let engine = AVAudioEngine()
|
||||
private var sourceNode: AVAudioSourceNode?
|
||||
private var sourceNodeAudioFormat: AVAudioFormat?
|
||||
|
||||
// private let reverb = AVAudioUnitReverb()
|
||||
// private let nbandEQ = AVAudioUnitEQ()
|
||||
// private let distortion = AVAudioUnitDistortion()
|
||||
// private let delay = AVAudioUnitDelay()
|
||||
private let timePitch = AVAudioUnitTimePitch()
|
||||
private var sampleSize = UInt32(MemoryLayout<Float>.size)
|
||||
private var currentRenderReadOffset = UInt32(0)
|
||||
private var outputLatency = TimeInterval(0)
|
||||
public weak var renderSource: OutputRenderSourceDelegate?
|
||||
private var currentRender: AudioFrame? {
|
||||
didSet {
|
||||
if currentRender == nil {
|
||||
currentRenderReadOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public var playbackRate: Float {
|
||||
get {
|
||||
timePitch.rate
|
||||
}
|
||||
set {
|
||||
timePitch.rate = min(32, max(1 / 32, newValue))
|
||||
}
|
||||
}
|
||||
|
||||
public var volume: Float {
|
||||
get {
|
||||
sourceNode?.volume ?? 1
|
||||
}
|
||||
set {
|
||||
sourceNode?.volume = newValue
|
||||
}
|
||||
}
|
||||
|
||||
public var isMuted: Bool {
|
||||
get {
|
||||
engine.mainMixerNode.outputVolume == 0.0
|
||||
}
|
||||
set {
|
||||
engine.mainMixerNode.outputVolume = newValue ? 0.0 : 1.0
|
||||
}
|
||||
}
|
||||
|
||||
public required init() {
|
||||
engine.attach(timePitch)
|
||||
if let audioUnit = engine.outputNode.audioUnit {
|
||||
addRenderNotify(audioUnit: audioUnit)
|
||||
}
|
||||
#if !os(macOS)
|
||||
outputLatency = AVAudioSession.sharedInstance().outputLatency
|
||||
#endif
|
||||
}
|
||||
|
||||
public func prepare(audioFormat: AVAudioFormat) {
|
||||
if sourceNodeAudioFormat == audioFormat {
|
||||
return
|
||||
}
|
||||
sourceNodeAudioFormat = audioFormat
|
||||
#if !os(macOS)
|
||||
try? AVAudioSession.sharedInstance().setPreferredOutputNumberOfChannels(Int(audioFormat.channelCount))
|
||||
KSLog("[audio] set preferredOutputNumberOfChannels: \(audioFormat.channelCount)")
|
||||
#endif
|
||||
KSLog("[audio] outputFormat AudioFormat: \(audioFormat)")
|
||||
if let channelLayout = audioFormat.channelLayout {
|
||||
KSLog("[audio] outputFormat channelLayout \(channelLayout.channelDescriptions)")
|
||||
}
|
||||
let isRunning = engine.isRunning
|
||||
engine.stop()
|
||||
engine.reset()
|
||||
sourceNode = AVAudioSourceNode(format: audioFormat) { [weak self] _, timestamp, frameCount, audioBufferList in
|
||||
if timestamp.pointee.mSampleTime == 0 {
|
||||
return noErr
|
||||
}
|
||||
self?.audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer(audioBufferList), numberOfFrames: frameCount)
|
||||
return noErr
|
||||
}
|
||||
guard let sourceNode else {
|
||||
return
|
||||
}
|
||||
KSLog("[audio] new sourceNode inputFormat: \(sourceNode.inputFormat(forBus: 0))")
|
||||
sampleSize = audioFormat.sampleSize
|
||||
engine.attach(sourceNode)
|
||||
var nodes: [AVAudioNode] = [sourceNode]
|
||||
nodes.append(contentsOf: audioNodes())
|
||||
if audioFormat.channelCount > 2 {
|
||||
nodes.append(engine.outputNode)
|
||||
}
|
||||
// 一定要传入format,这样多音轨音响才不会有问题。
|
||||
engine.connect(nodes: nodes, format: audioFormat)
|
||||
engine.prepare()
|
||||
if isRunning {
|
||||
try? engine.start()
|
||||
// 从多声道切换到2声道马上调用start会不生效。需要异步主线程才可以
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
self?.play()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func audioNodes() -> [AVAudioNode] {
|
||||
[timePitch, engine.mainMixerNode]
|
||||
}
|
||||
|
||||
public func play() {
|
||||
if !engine.isRunning {
|
||||
do {
|
||||
try engine.start()
|
||||
} catch {
|
||||
KSLog(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func pause() {
|
||||
if engine.isRunning {
|
||||
engine.pause()
|
||||
}
|
||||
}
|
||||
|
||||
public func flush() {
|
||||
currentRender = nil
|
||||
#if !os(macOS)
|
||||
// 这个要在主线程执行,如果在音频的线程,那就会有中断杂音
|
||||
outputLatency = AVAudioSession.sharedInstance().outputLatency
|
||||
#endif
|
||||
}
|
||||
|
||||
private func addRenderNotify(audioUnit: AudioUnit) {
|
||||
AudioUnitAddRenderNotify(audioUnit, { refCon, ioActionFlags, inTimeStamp, _, _, _ in
|
||||
let `self` = Unmanaged<AudioEnginePlayer>.fromOpaque(refCon).takeUnretainedValue()
|
||||
autoreleasepool {
|
||||
if ioActionFlags.pointee.contains(.unitRenderAction_PostRender) {
|
||||
self.audioPlayerDidRenderSample(sampleTimestamp: inTimeStamp.pointee)
|
||||
}
|
||||
}
|
||||
return noErr
|
||||
}, Unmanaged.passUnretained(self).toOpaque())
|
||||
}
|
||||
|
||||
// private func addRenderCallback(audioUnit: AudioUnit, streamDescription: UnsafePointer<AudioStreamBasicDescription>) {
|
||||
// _ = AudioUnitSetProperty(audioUnit,
|
||||
// kAudioUnitProperty_StreamFormat,
|
||||
// kAudioUnitScope_Input,
|
||||
// 0,
|
||||
// streamDescription,
|
||||
// UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
|
||||
// var inputCallbackStruct = AURenderCallbackStruct()
|
||||
// inputCallbackStruct.inputProcRefCon = Unmanaged.passUnretained(self).toOpaque()
|
||||
// inputCallbackStruct.inputProc = { refCon, _, _, _, inNumberFrames, ioData in
|
||||
// guard let ioData else {
|
||||
// return noErr
|
||||
// }
|
||||
// let `self` = Unmanaged<AudioEnginePlayer>.fromOpaque(refCon).takeUnretainedValue()
|
||||
// self.audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer(ioData), numberOfFrames: inNumberFrames)
|
||||
// return noErr
|
||||
// }
|
||||
// _ = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &inputCallbackStruct, UInt32(MemoryLayout<AURenderCallbackStruct>.size))
|
||||
// }
|
||||
|
||||
private func audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer, numberOfFrames: UInt32) {
|
||||
var ioDataWriteOffset = 0
|
||||
var numberOfSamples = numberOfFrames
|
||||
while numberOfSamples > 0 {
|
||||
if currentRender == nil {
|
||||
currentRender = renderSource?.getAudioOutputRender()
|
||||
}
|
||||
guard let currentRender else {
|
||||
break
|
||||
}
|
||||
let residueLinesize = currentRender.numberOfSamples - currentRenderReadOffset
|
||||
guard residueLinesize > 0 else {
|
||||
self.currentRender = nil
|
||||
continue
|
||||
}
|
||||
if sourceNodeAudioFormat != currentRender.audioFormat {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
self.prepare(audioFormat: currentRender.audioFormat)
|
||||
}
|
||||
return
|
||||
}
|
||||
let framesToCopy = min(numberOfSamples, residueLinesize)
|
||||
let bytesToCopy = Int(framesToCopy * sampleSize)
|
||||
let offset = Int(currentRenderReadOffset * sampleSize)
|
||||
for i in 0 ..< min(ioData.count, currentRender.data.count) {
|
||||
if let source = currentRender.data[i], let destination = ioData[i].mData {
|
||||
(destination + ioDataWriteOffset).copyMemory(from: source + offset, byteCount: bytesToCopy)
|
||||
}
|
||||
}
|
||||
numberOfSamples -= framesToCopy
|
||||
ioDataWriteOffset += bytesToCopy
|
||||
currentRenderReadOffset += framesToCopy
|
||||
}
|
||||
let sizeCopied = (numberOfFrames - numberOfSamples) * sampleSize
|
||||
for i in 0 ..< ioData.count {
|
||||
let sizeLeft = Int(ioData[i].mDataByteSize - sizeCopied)
|
||||
if sizeLeft > 0 {
|
||||
memset(ioData[i].mData! + Int(sizeCopied), 0, sizeLeft)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func audioPlayerDidRenderSample(sampleTimestamp _: AudioTimeStamp) {
|
||||
if let currentRender {
|
||||
let currentPreparePosition = currentRender.timestamp + currentRender.duration * Int64(currentRenderReadOffset) / Int64(currentRender.numberOfSamples)
|
||||
if currentPreparePosition > 0 {
|
||||
var time = currentRender.timebase.cmtime(for: currentPreparePosition)
|
||||
if outputLatency != 0 {
|
||||
/// AVSampleBufferAudioRenderer不需要处理outputLatency。其他音频输出的都要处理。
|
||||
/// 没有蓝牙的话,outputLatency为0.015,有蓝牙耳机的话为0.176
|
||||
time = time - CMTime(seconds: outputLatency, preferredTimescale: time.timescale)
|
||||
}
|
||||
renderSource?.setAudio(time: time, position: currentRender.position)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AVAudioEngine {
|
||||
func connect(nodes: [AVAudioNode], format: AVAudioFormat?) {
|
||||
if nodes.count < 2 {
|
||||
return
|
||||
}
|
||||
for i in 0 ..< nodes.count - 1 {
|
||||
connect(nodes[i], to: nodes[i + 1], format: format)
|
||||
}
|
||||
}
|
||||
}
|
||||
303
KSPlayer-main/Sources/KSPlayer/MEPlayer/AudioGraphPlayer.swift
Normal file
303
KSPlayer-main/Sources/KSPlayer/MEPlayer/AudioGraphPlayer.swift
Normal file
@@ -0,0 +1,303 @@
|
||||
//
|
||||
// AudioGraphPlayer.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/16.
|
||||
//
|
||||
|
||||
import AudioToolbox
|
||||
import AVFAudio
|
||||
import CoreAudio
|
||||
|
||||
public final class AudioGraphPlayer: AudioOutput, AudioDynamicsProcessor {
|
||||
public private(set) var audioUnitForDynamicsProcessor: AudioUnit
|
||||
private let graph: AUGraph
|
||||
private var audioUnitForMixer: AudioUnit!
|
||||
private var audioUnitForTimePitch: AudioUnit!
|
||||
private var audioUnitForOutput: AudioUnit!
|
||||
private var currentRenderReadOffset = UInt32(0)
|
||||
private var sourceNodeAudioFormat: AVAudioFormat?
|
||||
private var sampleSize = UInt32(MemoryLayout<Float>.size)
|
||||
#if os(macOS)
|
||||
private var volumeBeforeMute: Float = 0.0
|
||||
#endif
|
||||
private var outputLatency = TimeInterval(0)
|
||||
public weak var renderSource: OutputRenderSourceDelegate?
|
||||
private var currentRender: AudioFrame? {
|
||||
didSet {
|
||||
if currentRender == nil {
|
||||
currentRenderReadOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func play() {
|
||||
AUGraphStart(graph)
|
||||
}
|
||||
|
||||
public func pause() {
|
||||
AUGraphStop(graph)
|
||||
}
|
||||
|
||||
public var playbackRate: Float {
|
||||
get {
|
||||
var playbackRate = AudioUnitParameterValue(0.0)
|
||||
AudioUnitGetParameter(audioUnitForTimePitch, kNewTimePitchParam_Rate, kAudioUnitScope_Global, 0, &playbackRate)
|
||||
return playbackRate
|
||||
}
|
||||
set {
|
||||
AudioUnitSetParameter(audioUnitForTimePitch, kNewTimePitchParam_Rate, kAudioUnitScope_Global, 0, newValue, 0)
|
||||
}
|
||||
}
|
||||
|
||||
public var volume: Float {
|
||||
get {
|
||||
var volume = AudioUnitParameterValue(0.0)
|
||||
#if os(macOS)
|
||||
let inID = kStereoMixerParam_Volume
|
||||
#else
|
||||
let inID = kMultiChannelMixerParam_Volume
|
||||
#endif
|
||||
AudioUnitGetParameter(audioUnitForMixer, inID, kAudioUnitScope_Input, 0, &volume)
|
||||
return volume
|
||||
}
|
||||
set {
|
||||
#if os(macOS)
|
||||
let inID = kStereoMixerParam_Volume
|
||||
#else
|
||||
let inID = kMultiChannelMixerParam_Volume
|
||||
#endif
|
||||
AudioUnitSetParameter(audioUnitForMixer, inID, kAudioUnitScope_Input, 0, newValue, 0)
|
||||
}
|
||||
}
|
||||
|
||||
public var isMuted: Bool {
|
||||
get {
|
||||
var value = AudioUnitParameterValue(1.0)
|
||||
#if os(macOS)
|
||||
AudioUnitGetParameter(audioUnitForMixer, kStereoMixerParam_Volume, kAudioUnitScope_Input, 0, &value)
|
||||
#else
|
||||
AudioUnitGetParameter(audioUnitForMixer, kMultiChannelMixerParam_Enable, kAudioUnitScope_Input, 0, &value)
|
||||
#endif
|
||||
return value == 0
|
||||
}
|
||||
set {
|
||||
let value = newValue ? 0 : 1
|
||||
#if os(macOS)
|
||||
if value == 0 {
|
||||
volumeBeforeMute = volume
|
||||
}
|
||||
AudioUnitSetParameter(audioUnitForMixer, kStereoMixerParam_Volume, kAudioUnitScope_Input, 0, min(Float(value), volumeBeforeMute), 0)
|
||||
#else
|
||||
AudioUnitSetParameter(audioUnitForMixer, kMultiChannelMixerParam_Enable, kAudioUnitScope_Input, 0, AudioUnitParameterValue(value), 0)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
public init() {
|
||||
var newGraph: AUGraph!
|
||||
NewAUGraph(&newGraph)
|
||||
graph = newGraph
|
||||
var descriptionForTimePitch = AudioComponentDescription()
|
||||
descriptionForTimePitch.componentType = kAudioUnitType_FormatConverter
|
||||
descriptionForTimePitch.componentSubType = kAudioUnitSubType_NewTimePitch
|
||||
descriptionForTimePitch.componentManufacturer = kAudioUnitManufacturer_Apple
|
||||
var descriptionForDynamicsProcessor = AudioComponentDescription()
|
||||
descriptionForDynamicsProcessor.componentType = kAudioUnitType_Effect
|
||||
descriptionForDynamicsProcessor.componentManufacturer = kAudioUnitManufacturer_Apple
|
||||
descriptionForDynamicsProcessor.componentSubType = kAudioUnitSubType_DynamicsProcessor
|
||||
var descriptionForMixer = AudioComponentDescription()
|
||||
descriptionForMixer.componentType = kAudioUnitType_Mixer
|
||||
descriptionForMixer.componentManufacturer = kAudioUnitManufacturer_Apple
|
||||
#if os(macOS)
|
||||
descriptionForMixer.componentSubType = kAudioUnitSubType_StereoMixer
|
||||
#else
|
||||
descriptionForMixer.componentSubType = kAudioUnitSubType_MultiChannelMixer
|
||||
#endif
|
||||
var descriptionForOutput = AudioComponentDescription()
|
||||
descriptionForOutput.componentType = kAudioUnitType_Output
|
||||
descriptionForOutput.componentManufacturer = kAudioUnitManufacturer_Apple
|
||||
#if os(macOS)
|
||||
descriptionForOutput.componentSubType = kAudioUnitSubType_DefaultOutput
|
||||
#else
|
||||
descriptionForOutput.componentSubType = kAudioUnitSubType_RemoteIO
|
||||
#endif
|
||||
var nodeForTimePitch = AUNode()
|
||||
var nodeForDynamicsProcessor = AUNode()
|
||||
var nodeForMixer = AUNode()
|
||||
var nodeForOutput = AUNode()
|
||||
AUGraphAddNode(graph, &descriptionForTimePitch, &nodeForTimePitch)
|
||||
AUGraphAddNode(graph, &descriptionForMixer, &nodeForMixer)
|
||||
AUGraphAddNode(graph, &descriptionForDynamicsProcessor, &nodeForDynamicsProcessor)
|
||||
AUGraphAddNode(graph, &descriptionForOutput, &nodeForOutput)
|
||||
AUGraphOpen(graph)
|
||||
AUGraphConnectNodeInput(graph, nodeForTimePitch, 0, nodeForDynamicsProcessor, 0)
|
||||
AUGraphConnectNodeInput(graph, nodeForDynamicsProcessor, 0, nodeForMixer, 0)
|
||||
AUGraphConnectNodeInput(graph, nodeForMixer, 0, nodeForOutput, 0)
|
||||
AUGraphNodeInfo(graph, nodeForTimePitch, &descriptionForTimePitch, &audioUnitForTimePitch)
|
||||
var audioUnitForDynamicsProcessor: AudioUnit?
|
||||
AUGraphNodeInfo(graph, nodeForDynamicsProcessor, &descriptionForDynamicsProcessor, &audioUnitForDynamicsProcessor)
|
||||
self.audioUnitForDynamicsProcessor = audioUnitForDynamicsProcessor!
|
||||
AUGraphNodeInfo(graph, nodeForMixer, &descriptionForMixer, &audioUnitForMixer)
|
||||
AUGraphNodeInfo(graph, nodeForOutput, &descriptionForOutput, &audioUnitForOutput)
|
||||
addRenderNotify(audioUnit: audioUnitForOutput)
|
||||
var value = UInt32(1)
|
||||
AudioUnitSetProperty(audioUnitForTimePitch,
|
||||
kAudioOutputUnitProperty_EnableIO,
|
||||
kAudioUnitScope_Output, 0,
|
||||
&value,
|
||||
UInt32(MemoryLayout<UInt32>.size))
|
||||
#if !os(macOS)
|
||||
outputLatency = AVAudioSession.sharedInstance().outputLatency
|
||||
#endif
|
||||
}
|
||||
|
||||
public func prepare(audioFormat: AVAudioFormat) {
|
||||
if sourceNodeAudioFormat == audioFormat {
|
||||
return
|
||||
}
|
||||
sourceNodeAudioFormat = audioFormat
|
||||
#if !os(macOS)
|
||||
try? AVAudioSession.sharedInstance().setPreferredOutputNumberOfChannels(Int(audioFormat.channelCount))
|
||||
KSLog("[audio] set preferredOutputNumberOfChannels: \(audioFormat.channelCount)")
|
||||
#endif
|
||||
sampleSize = audioFormat.sampleSize
|
||||
var audioStreamBasicDescription = audioFormat.formatDescription.audioStreamBasicDescription
|
||||
let audioStreamBasicDescriptionSize = UInt32(MemoryLayout<AudioStreamBasicDescription>.size)
|
||||
let channelLayout = audioFormat.channelLayout?.layout
|
||||
for unit in [audioUnitForTimePitch, audioUnitForDynamicsProcessor, audioUnitForMixer, audioUnitForOutput] {
|
||||
guard let unit else { continue }
|
||||
AudioUnitSetProperty(unit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input, 0,
|
||||
&audioStreamBasicDescription,
|
||||
audioStreamBasicDescriptionSize)
|
||||
AudioUnitSetProperty(unit,
|
||||
kAudioUnitProperty_AudioChannelLayout,
|
||||
kAudioUnitScope_Input, 0,
|
||||
channelLayout,
|
||||
UInt32(MemoryLayout<AudioChannelLayout>.size))
|
||||
if unit != audioUnitForOutput {
|
||||
AudioUnitSetProperty(unit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Output, 0,
|
||||
&audioStreamBasicDescription,
|
||||
audioStreamBasicDescriptionSize)
|
||||
AudioUnitSetProperty(unit,
|
||||
kAudioUnitProperty_AudioChannelLayout,
|
||||
kAudioUnitScope_Output, 0,
|
||||
channelLayout,
|
||||
UInt32(MemoryLayout<AudioChannelLayout>.size))
|
||||
}
|
||||
if unit == audioUnitForTimePitch {
|
||||
var inputCallbackStruct = renderCallbackStruct()
|
||||
AudioUnitSetProperty(unit,
|
||||
kAudioUnitProperty_SetRenderCallback,
|
||||
kAudioUnitScope_Input, 0,
|
||||
&inputCallbackStruct,
|
||||
UInt32(MemoryLayout<AURenderCallbackStruct>.size))
|
||||
}
|
||||
}
|
||||
AUGraphInitialize(graph)
|
||||
}
|
||||
|
||||
public func flush() {
|
||||
currentRender = nil
|
||||
#if !os(macOS)
|
||||
outputLatency = AVAudioSession.sharedInstance().outputLatency
|
||||
#endif
|
||||
}
|
||||
|
||||
deinit {
|
||||
AUGraphStop(graph)
|
||||
AUGraphUninitialize(graph)
|
||||
AUGraphClose(graph)
|
||||
DisposeAUGraph(graph)
|
||||
}
|
||||
}
|
||||
|
||||
extension AudioGraphPlayer {
|
||||
private func renderCallbackStruct() -> AURenderCallbackStruct {
|
||||
var inputCallbackStruct = AURenderCallbackStruct()
|
||||
inputCallbackStruct.inputProcRefCon = Unmanaged.passUnretained(self).toOpaque()
|
||||
inputCallbackStruct.inputProc = { refCon, _, _, _, inNumberFrames, ioData in
|
||||
guard let ioData else {
|
||||
return noErr
|
||||
}
|
||||
let `self` = Unmanaged<AudioGraphPlayer>.fromOpaque(refCon).takeUnretainedValue()
|
||||
self.audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer(ioData), numberOfFrames: inNumberFrames)
|
||||
return noErr
|
||||
}
|
||||
return inputCallbackStruct
|
||||
}
|
||||
|
||||
private func addRenderNotify(audioUnit: AudioUnit) {
|
||||
AudioUnitAddRenderNotify(audioUnit, { refCon, ioActionFlags, inTimeStamp, _, _, _ in
|
||||
let `self` = Unmanaged<AudioGraphPlayer>.fromOpaque(refCon).takeUnretainedValue()
|
||||
autoreleasepool {
|
||||
if ioActionFlags.pointee.contains(.unitRenderAction_PostRender) {
|
||||
self.audioPlayerDidRenderSample(sampleTimestamp: inTimeStamp.pointee)
|
||||
}
|
||||
}
|
||||
return noErr
|
||||
}, Unmanaged.passUnretained(self).toOpaque())
|
||||
}
|
||||
|
||||
private func audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer, numberOfFrames: UInt32) {
|
||||
var ioDataWriteOffset = 0
|
||||
var numberOfSamples = numberOfFrames
|
||||
while numberOfSamples > 0 {
|
||||
if currentRender == nil {
|
||||
currentRender = renderSource?.getAudioOutputRender()
|
||||
}
|
||||
guard let currentRender else {
|
||||
break
|
||||
}
|
||||
let residueLinesize = currentRender.numberOfSamples - currentRenderReadOffset
|
||||
guard residueLinesize > 0 else {
|
||||
self.currentRender = nil
|
||||
continue
|
||||
}
|
||||
if sourceNodeAudioFormat != currentRender.audioFormat {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
self.prepare(audioFormat: currentRender.audioFormat)
|
||||
}
|
||||
return
|
||||
}
|
||||
let framesToCopy = min(numberOfSamples, residueLinesize)
|
||||
let bytesToCopy = Int(framesToCopy * sampleSize)
|
||||
let offset = Int(currentRenderReadOffset * sampleSize)
|
||||
for i in 0 ..< min(ioData.count, currentRender.data.count) {
|
||||
if let source = currentRender.data[i], let destination = ioData[i].mData {
|
||||
(destination + ioDataWriteOffset).copyMemory(from: source + offset, byteCount: bytesToCopy)
|
||||
}
|
||||
}
|
||||
numberOfSamples -= framesToCopy
|
||||
ioDataWriteOffset += bytesToCopy
|
||||
currentRenderReadOffset += framesToCopy
|
||||
}
|
||||
let sizeCopied = (numberOfFrames - numberOfSamples) * sampleSize
|
||||
for i in 0 ..< ioData.count {
|
||||
let sizeLeft = Int(ioData[i].mDataByteSize - sizeCopied)
|
||||
if sizeLeft > 0 {
|
||||
memset(ioData[i].mData! + Int(sizeCopied), 0, sizeLeft)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func audioPlayerDidRenderSample(sampleTimestamp _: AudioTimeStamp) {
|
||||
if let currentRender {
|
||||
let currentPreparePosition = currentRender.timestamp + currentRender.duration * Int64(currentRenderReadOffset) / Int64(currentRender.numberOfSamples)
|
||||
if currentPreparePosition > 0 {
|
||||
var time = currentRender.timebase.cmtime(for: currentPreparePosition)
|
||||
if outputLatency != 0 {
|
||||
time = time - CMTime(seconds: outputLatency, preferredTimescale: time.timescale)
|
||||
}
|
||||
renderSource?.setAudio(time: time, position: currentRender.position)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,143 @@
|
||||
//
|
||||
// AudioRendererPlayer.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2022/12/2.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
|
||||
public class AudioRendererPlayer: AudioOutput {
|
||||
public var playbackRate: Float = 1 {
|
||||
didSet {
|
||||
if !isPaused {
|
||||
synchronizer.rate = playbackRate
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public var volume: Float {
|
||||
get {
|
||||
renderer.volume
|
||||
}
|
||||
set {
|
||||
renderer.volume = newValue
|
||||
}
|
||||
}
|
||||
|
||||
public var isMuted: Bool {
|
||||
get {
|
||||
renderer.isMuted
|
||||
}
|
||||
set {
|
||||
renderer.isMuted = newValue
|
||||
}
|
||||
}
|
||||
|
||||
public weak var renderSource: OutputRenderSourceDelegate?
|
||||
private var periodicTimeObserver: Any?
|
||||
private let renderer = AVSampleBufferAudioRenderer()
|
||||
private let synchronizer = AVSampleBufferRenderSynchronizer()
|
||||
private let serializationQueue = DispatchQueue(label: "ks.player.serialization.queue")
|
||||
var isPaused: Bool {
|
||||
synchronizer.rate == 0
|
||||
}
|
||||
|
||||
public required init() {
|
||||
synchronizer.addRenderer(renderer)
|
||||
if #available(macOS 11.3, iOS 14.5, tvOS 14.5, *) {
|
||||
synchronizer.delaysRateChangeUntilHasSufficientMediaData = false
|
||||
}
|
||||
// if #available(tvOS 15.0, iOS 15.0, macOS 12.0, *) {
|
||||
// renderer.allowedAudioSpatializationFormats = .monoStereoAndMultichannel
|
||||
// }
|
||||
}
|
||||
|
||||
public func prepare(audioFormat: AVAudioFormat) {
|
||||
#if !os(macOS)
|
||||
try? AVAudioSession.sharedInstance().setPreferredOutputNumberOfChannels(Int(audioFormat.channelCount))
|
||||
KSLog("[audio] set preferredOutputNumberOfChannels: \(audioFormat.channelCount)")
|
||||
#endif
|
||||
}
|
||||
|
||||
public func play() {
|
||||
let time: CMTime
|
||||
if #available(macOS 11.3, iOS 14.5, tvOS 14.5, *) {
|
||||
// 判断是否有足够的缓存,有的话就用当前的时间。seek的话,需要清空缓存,这样才能取到最新的时间。
|
||||
if renderer.hasSufficientMediaDataForReliablePlaybackStart {
|
||||
time = synchronizer.currentTime()
|
||||
} else {
|
||||
if let currentRender = renderSource?.getAudioOutputRender() {
|
||||
time = currentRender.cmtime
|
||||
} else {
|
||||
time = .zero
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if let currentRender = renderSource?.getAudioOutputRender() {
|
||||
time = currentRender.cmtime
|
||||
} else {
|
||||
time = .zero
|
||||
}
|
||||
}
|
||||
synchronizer.setRate(playbackRate, time: time)
|
||||
// 要手动的调用下,这样才能及时的更新音频的时间
|
||||
renderSource?.setAudio(time: time, position: -1)
|
||||
renderer.requestMediaDataWhenReady(on: serializationQueue) { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
self.request()
|
||||
}
|
||||
periodicTimeObserver = synchronizer.addPeriodicTimeObserver(forInterval: CMTime(seconds: 0.01), queue: .main) { [weak self] time in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
self.renderSource?.setAudio(time: time, position: -1)
|
||||
}
|
||||
}
|
||||
|
||||
public func pause() {
|
||||
synchronizer.rate = 0
|
||||
renderer.stopRequestingMediaData()
|
||||
if let periodicTimeObserver {
|
||||
synchronizer.removeTimeObserver(periodicTimeObserver)
|
||||
self.periodicTimeObserver = nil
|
||||
}
|
||||
}
|
||||
|
||||
public func flush() {
|
||||
renderer.flush()
|
||||
}
|
||||
|
||||
private func request() {
|
||||
while renderer.isReadyForMoreMediaData, !isPaused {
|
||||
guard var render = renderSource?.getAudioOutputRender() else {
|
||||
break
|
||||
}
|
||||
var array = [render]
|
||||
let loopCount = Int32(render.audioFormat.sampleRate) / 20 / Int32(render.numberOfSamples) - 2
|
||||
if loopCount > 0 {
|
||||
for _ in 0 ..< loopCount {
|
||||
if let render = renderSource?.getAudioOutputRender() {
|
||||
array.append(render)
|
||||
}
|
||||
}
|
||||
}
|
||||
if array.count > 1 {
|
||||
render = AudioFrame(array: array)
|
||||
}
|
||||
if let sampleBuffer = render.toCMSampleBuffer() {
|
||||
let channelCount = render.audioFormat.channelCount
|
||||
renderer.audioTimePitchAlgorithm = channelCount > 2 ? .spectral : .timeDomain
|
||||
renderer.enqueue(sampleBuffer)
|
||||
#if !os(macOS)
|
||||
if AVAudioSession.sharedInstance().preferredInputNumberOfChannels != channelCount {
|
||||
try? AVAudioSession.sharedInstance().setPreferredOutputNumberOfChannels(Int(channelCount))
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
197
KSPlayer-main/Sources/KSPlayer/MEPlayer/AudioUnitPlayer.swift
Normal file
197
KSPlayer-main/Sources/KSPlayer/MEPlayer/AudioUnitPlayer.swift
Normal file
@@ -0,0 +1,197 @@
|
||||
//
|
||||
// AudioUnitPlayer.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/16.
|
||||
//
|
||||
|
||||
import AudioToolbox
|
||||
import AVFAudio
|
||||
import CoreAudio
|
||||
|
||||
public final class AudioUnitPlayer: AudioOutput {
|
||||
private var audioUnitForOutput: AudioUnit!
|
||||
private var currentRenderReadOffset = UInt32(0)
|
||||
private var sourceNodeAudioFormat: AVAudioFormat?
|
||||
private var sampleSize = UInt32(MemoryLayout<Float>.size)
|
||||
public weak var renderSource: OutputRenderSourceDelegate?
|
||||
private var currentRender: AudioFrame? {
|
||||
didSet {
|
||||
if currentRender == nil {
|
||||
currentRenderReadOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private var isPlaying = false
|
||||
public func play() {
|
||||
if !isPlaying {
|
||||
isPlaying = true
|
||||
AudioOutputUnitStart(audioUnitForOutput)
|
||||
}
|
||||
}
|
||||
|
||||
public func pause() {
|
||||
if isPlaying {
|
||||
isPlaying = false
|
||||
AudioOutputUnitStop(audioUnitForOutput)
|
||||
}
|
||||
}
|
||||
|
||||
public var playbackRate: Float = 1
|
||||
public var volume: Float = 1
|
||||
public var isMuted: Bool = false
|
||||
private var outputLatency = TimeInterval(0)
|
||||
public init() {
|
||||
var descriptionForOutput = AudioComponentDescription()
|
||||
descriptionForOutput.componentType = kAudioUnitType_Output
|
||||
descriptionForOutput.componentManufacturer = kAudioUnitManufacturer_Apple
|
||||
#if os(macOS)
|
||||
descriptionForOutput.componentSubType = kAudioUnitSubType_HALOutput
|
||||
#else
|
||||
descriptionForOutput.componentSubType = kAudioUnitSubType_RemoteIO
|
||||
outputLatency = AVAudioSession.sharedInstance().outputLatency
|
||||
#endif
|
||||
let nodeForOutput = AudioComponentFindNext(nil, &descriptionForOutput)
|
||||
AudioComponentInstanceNew(nodeForOutput!, &audioUnitForOutput)
|
||||
var value = UInt32(1)
|
||||
AudioUnitSetProperty(audioUnitForOutput,
|
||||
kAudioOutputUnitProperty_EnableIO,
|
||||
kAudioUnitScope_Output, 0,
|
||||
&value,
|
||||
UInt32(MemoryLayout<UInt32>.size))
|
||||
}
|
||||
|
||||
public func prepare(audioFormat: AVAudioFormat) {
|
||||
if sourceNodeAudioFormat == audioFormat {
|
||||
return
|
||||
}
|
||||
sourceNodeAudioFormat = audioFormat
|
||||
#if !os(macOS)
|
||||
try? AVAudioSession.sharedInstance().setPreferredOutputNumberOfChannels(Int(audioFormat.channelCount))
|
||||
KSLog("[audio] set preferredOutputNumberOfChannels: \(audioFormat.channelCount)")
|
||||
#endif
|
||||
sampleSize = audioFormat.sampleSize
|
||||
var audioStreamBasicDescription = audioFormat.formatDescription.audioStreamBasicDescription
|
||||
AudioUnitSetProperty(audioUnitForOutput,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input, 0,
|
||||
&audioStreamBasicDescription,
|
||||
UInt32(MemoryLayout<AudioStreamBasicDescription>.size))
|
||||
let channelLayout = audioFormat.channelLayout?.layout
|
||||
AudioUnitSetProperty(audioUnitForOutput,
|
||||
kAudioUnitProperty_AudioChannelLayout,
|
||||
kAudioUnitScope_Input, 0,
|
||||
channelLayout,
|
||||
UInt32(MemoryLayout<AudioChannelLayout>.size))
|
||||
var inputCallbackStruct = renderCallbackStruct()
|
||||
AudioUnitSetProperty(audioUnitForOutput,
|
||||
kAudioUnitProperty_SetRenderCallback,
|
||||
kAudioUnitScope_Input, 0,
|
||||
&inputCallbackStruct,
|
||||
UInt32(MemoryLayout<AURenderCallbackStruct>.size))
|
||||
addRenderNotify(audioUnit: audioUnitForOutput)
|
||||
AudioUnitInitialize(audioUnitForOutput)
|
||||
}
|
||||
|
||||
public func flush() {
|
||||
currentRender = nil
|
||||
#if !os(macOS)
|
||||
outputLatency = AVAudioSession.sharedInstance().outputLatency
|
||||
#endif
|
||||
}
|
||||
|
||||
deinit {
|
||||
AudioUnitUninitialize(audioUnitForOutput)
|
||||
}
|
||||
}
|
||||
|
||||
extension AudioUnitPlayer {
|
||||
private func renderCallbackStruct() -> AURenderCallbackStruct {
|
||||
var inputCallbackStruct = AURenderCallbackStruct()
|
||||
inputCallbackStruct.inputProcRefCon = Unmanaged.passUnretained(self).toOpaque()
|
||||
inputCallbackStruct.inputProc = { refCon, _, _, _, inNumberFrames, ioData in
|
||||
guard let ioData else {
|
||||
return noErr
|
||||
}
|
||||
let `self` = Unmanaged<AudioUnitPlayer>.fromOpaque(refCon).takeUnretainedValue()
|
||||
self.audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer(ioData), numberOfFrames: inNumberFrames)
|
||||
return noErr
|
||||
}
|
||||
return inputCallbackStruct
|
||||
}
|
||||
|
||||
private func addRenderNotify(audioUnit: AudioUnit) {
|
||||
AudioUnitAddRenderNotify(audioUnit, { refCon, ioActionFlags, inTimeStamp, _, _, _ in
|
||||
let `self` = Unmanaged<AudioUnitPlayer>.fromOpaque(refCon).takeUnretainedValue()
|
||||
autoreleasepool {
|
||||
if ioActionFlags.pointee.contains(.unitRenderAction_PostRender) {
|
||||
self.audioPlayerDidRenderSample(sampleTimestamp: inTimeStamp.pointee)
|
||||
}
|
||||
}
|
||||
return noErr
|
||||
}, Unmanaged.passUnretained(self).toOpaque())
|
||||
}
|
||||
|
||||
private func audioPlayerShouldInputData(ioData: UnsafeMutableAudioBufferListPointer, numberOfFrames: UInt32) {
|
||||
var ioDataWriteOffset = 0
|
||||
var numberOfSamples = numberOfFrames
|
||||
while numberOfSamples > 0 {
|
||||
if currentRender == nil {
|
||||
currentRender = renderSource?.getAudioOutputRender()
|
||||
}
|
||||
guard let currentRender else {
|
||||
break
|
||||
}
|
||||
let residueLinesize = currentRender.numberOfSamples - currentRenderReadOffset
|
||||
guard residueLinesize > 0 else {
|
||||
self.currentRender = nil
|
||||
continue
|
||||
}
|
||||
if sourceNodeAudioFormat != currentRender.audioFormat {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
self.prepare(audioFormat: currentRender.audioFormat)
|
||||
}
|
||||
return
|
||||
}
|
||||
let framesToCopy = min(numberOfSamples, residueLinesize)
|
||||
let bytesToCopy = Int(framesToCopy * sampleSize)
|
||||
let offset = Int(currentRenderReadOffset * sampleSize)
|
||||
for i in 0 ..< min(ioData.count, currentRender.data.count) {
|
||||
if let source = currentRender.data[i], let destination = ioData[i].mData {
|
||||
if isMuted {
|
||||
memset(destination + ioDataWriteOffset, 0, bytesToCopy)
|
||||
} else {
|
||||
(destination + ioDataWriteOffset).copyMemory(from: source + offset, byteCount: bytesToCopy)
|
||||
}
|
||||
}
|
||||
}
|
||||
numberOfSamples -= framesToCopy
|
||||
ioDataWriteOffset += bytesToCopy
|
||||
currentRenderReadOffset += framesToCopy
|
||||
}
|
||||
let sizeCopied = (numberOfFrames - numberOfSamples) * sampleSize
|
||||
for i in 0 ..< ioData.count {
|
||||
let sizeLeft = Int(ioData[i].mDataByteSize - sizeCopied)
|
||||
if sizeLeft > 0 {
|
||||
memset(ioData[i].mData! + Int(sizeCopied), 0, sizeLeft)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func audioPlayerDidRenderSample(sampleTimestamp _: AudioTimeStamp) {
|
||||
if let currentRender {
|
||||
let currentPreparePosition = currentRender.timestamp + currentRender.duration * Int64(currentRenderReadOffset) / Int64(currentRender.numberOfSamples)
|
||||
if currentPreparePosition > 0 {
|
||||
var time = currentRender.timebase.cmtime(for: currentPreparePosition)
|
||||
if outputLatency != 0 {
|
||||
time = time - CMTime(seconds: outputLatency, preferredTimescale: time.timescale)
|
||||
}
|
||||
renderSource?.setAudio(time: time, position: currentRender.position)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
183
KSPlayer-main/Sources/KSPlayer/MEPlayer/CircularBuffer.swift
Normal file
183
KSPlayer-main/Sources/KSPlayer/MEPlayer/CircularBuffer.swift
Normal file
@@ -0,0 +1,183 @@
|
||||
//
|
||||
// CircularBuffer.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/9.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
|
||||
/// 这个是单生产者,多消费者的阻塞队列和单生产者,多消费者的阻塞环形队列。并且环形队列还要有排序的能力。
|
||||
/// 因为seek需要清空队列,所以导致他是多消费者。后续可以看下能不能改成单消费者的。
|
||||
public class CircularBuffer<Item: ObjectQueueItem> {
|
||||
private var _buffer = ContiguousArray<Item?>()
|
||||
// private let semaphore = DispatchSemaphore(value: 0)
|
||||
private let condition = NSCondition()
|
||||
private var headIndex = UInt(0)
|
||||
private var tailIndex = UInt(0)
|
||||
private let expanding: Bool
|
||||
private let sorted: Bool
|
||||
private var destroyed = false
|
||||
@inline(__always)
|
||||
private var _count: Int { Int(tailIndex &- headIndex) }
|
||||
@inline(__always)
|
||||
public var count: Int {
|
||||
// condition.lock()
|
||||
// defer { condition.unlock() }
|
||||
Int(tailIndex &- headIndex)
|
||||
}
|
||||
|
||||
public internal(set) var fps: Float = 24
|
||||
public private(set) var maxCount: Int
|
||||
private var mask: UInt
|
||||
public init(initialCapacity: Int = 256, sorted: Bool = false, expanding: Bool = true) {
|
||||
self.expanding = expanding
|
||||
self.sorted = sorted
|
||||
let capacity = initialCapacity.nextPowerOf2()
|
||||
_buffer = ContiguousArray<Item?>(repeating: nil, count: Int(capacity))
|
||||
maxCount = Int(capacity)
|
||||
mask = UInt(maxCount - 1)
|
||||
assert(_buffer.count == capacity)
|
||||
}
|
||||
|
||||
public func push(_ value: Item) {
|
||||
condition.lock()
|
||||
defer { condition.unlock() }
|
||||
if destroyed {
|
||||
return
|
||||
}
|
||||
if _buffer[Int(tailIndex & mask)] != nil {
|
||||
assertionFailure("value is not nil of headIndex: \(headIndex),tailIndex: \(tailIndex), bufferCount: \(_buffer.count), mask: \(mask)")
|
||||
}
|
||||
_buffer[Int(tailIndex & mask)] = value
|
||||
if sorted {
|
||||
// 不用sort进行排序,这个比较高效
|
||||
var index = tailIndex
|
||||
while index > headIndex {
|
||||
guard let item = _buffer[Int((index - 1) & mask)] else {
|
||||
assertionFailure("value is nil of index: \((index - 1) & mask) headIndex: \(headIndex),tailIndex: \(tailIndex), bufferCount: \(_buffer.count), mask: \(mask)")
|
||||
break
|
||||
}
|
||||
if item.timestamp <= _buffer[Int(index & mask)]!.timestamp {
|
||||
break
|
||||
}
|
||||
_buffer.swapAt(Int((index - 1) & mask), Int(index & mask))
|
||||
index -= 1
|
||||
}
|
||||
}
|
||||
tailIndex &+= 1
|
||||
if _count >= maxCount {
|
||||
if expanding {
|
||||
// No more room left for another append so grow the buffer now.
|
||||
_doubleCapacity()
|
||||
} else {
|
||||
condition.wait()
|
||||
}
|
||||
} else {
|
||||
// 只有数据了。就signal。因为有可能这是最后的数据了。
|
||||
if _count == 1 {
|
||||
condition.signal()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func pop(wait: Bool = false, where predicate: ((Item, Int) -> Bool)? = nil) -> Item? {
|
||||
condition.lock()
|
||||
defer { condition.unlock() }
|
||||
if destroyed {
|
||||
return nil
|
||||
}
|
||||
if headIndex == tailIndex {
|
||||
if wait {
|
||||
condition.wait()
|
||||
if destroyed || headIndex == tailIndex {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
let index = Int(headIndex & mask)
|
||||
guard let item = _buffer[index] else {
|
||||
assertionFailure("value is nil of index: \(index) headIndex: \(headIndex),tailIndex: \(tailIndex), bufferCount: \(_buffer.count), mask: \(mask)")
|
||||
return nil
|
||||
}
|
||||
if let predicate, !predicate(item, _count) {
|
||||
return nil
|
||||
} else {
|
||||
headIndex &+= 1
|
||||
_buffer[index] = nil
|
||||
if _count == maxCount >> 1 {
|
||||
condition.signal()
|
||||
}
|
||||
return item
|
||||
}
|
||||
}
|
||||
|
||||
public func search(where predicate: (Item) -> Bool) -> [Item] {
|
||||
condition.lock()
|
||||
defer { condition.unlock() }
|
||||
var i = headIndex
|
||||
var result = [Item]()
|
||||
while i < tailIndex {
|
||||
if let item = _buffer[Int(i & mask)] {
|
||||
if predicate(item) {
|
||||
result.append(item)
|
||||
_buffer[Int(i & mask)] = nil
|
||||
headIndex = i + 1
|
||||
}
|
||||
} else {
|
||||
assertionFailure("value is nil of index: \(i) headIndex: \(headIndex), tailIndex: \(tailIndex), bufferCount: \(_buffer.count), mask: \(mask)")
|
||||
return result
|
||||
}
|
||||
i += 1
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
public func flush() {
|
||||
condition.lock()
|
||||
defer { condition.unlock() }
|
||||
headIndex = 0
|
||||
tailIndex = 0
|
||||
_buffer.removeAll(keepingCapacity: !destroyed)
|
||||
_buffer.append(contentsOf: ContiguousArray<Item?>(repeating: nil, count: destroyed ? 1 : maxCount))
|
||||
condition.broadcast()
|
||||
}
|
||||
|
||||
public func shutdown() {
|
||||
destroyed = true
|
||||
flush()
|
||||
}
|
||||
|
||||
private func _doubleCapacity() {
|
||||
var newBacking: ContiguousArray<Item?> = []
|
||||
let newCapacity = maxCount << 1 // Double the storage.
|
||||
precondition(newCapacity > 0, "Can't double capacity of \(_buffer.count)")
|
||||
assert(newCapacity % 2 == 0)
|
||||
newBacking.reserveCapacity(newCapacity)
|
||||
let head = Int(headIndex & mask)
|
||||
newBacking.append(contentsOf: _buffer[head ..< maxCount])
|
||||
if head > 0 {
|
||||
newBacking.append(contentsOf: _buffer[0 ..< head])
|
||||
}
|
||||
let repeatitionCount = newCapacity &- newBacking.count
|
||||
newBacking.append(contentsOf: repeatElement(nil, count: repeatitionCount))
|
||||
headIndex = 0
|
||||
tailIndex = UInt(newBacking.count &- repeatitionCount)
|
||||
_buffer = newBacking
|
||||
maxCount = newCapacity
|
||||
mask = UInt(maxCount - 1)
|
||||
}
|
||||
}
|
||||
|
||||
extension FixedWidthInteger {
|
||||
/// Returns the next power of two.
|
||||
@inline(__always)
|
||||
func nextPowerOf2() -> Self {
|
||||
guard self != 0 else {
|
||||
return 1
|
||||
}
|
||||
return 1 << (Self.bitWidth - (self - 1).leadingZeroBitCount)
|
||||
}
|
||||
}
|
||||
29
KSPlayer-main/Sources/KSPlayer/MEPlayer/EmbedDataSouce.swift
Normal file
29
KSPlayer-main/Sources/KSPlayer/MEPlayer/EmbedDataSouce.swift
Normal file
@@ -0,0 +1,29 @@
|
||||
//
|
||||
// EmbedDataSouce.swift
|
||||
// KSPlayer-7de52535
|
||||
//
|
||||
// Created by kintan on 2018/8/7.
|
||||
//
|
||||
import Foundation
|
||||
import Libavcodec
|
||||
import Libavutil
|
||||
|
||||
extension FFmpegAssetTrack: SubtitleInfo {
|
||||
public var subtitleID: String {
|
||||
String(trackID)
|
||||
}
|
||||
}
|
||||
|
||||
extension FFmpegAssetTrack: KSSubtitleProtocol {
|
||||
public func search(for time: TimeInterval) -> [SubtitlePart] {
|
||||
subtitle?.outputRenderQueue.search { item -> Bool in
|
||||
item.part == time
|
||||
}.map(\.part) ?? []
|
||||
}
|
||||
}
|
||||
|
||||
extension KSMEPlayer: SubtitleDataSouce {
|
||||
public var infos: [any SubtitleInfo] {
|
||||
tracks(mediaType: .subtitle).compactMap { $0 as? (any SubtitleInfo) }
|
||||
}
|
||||
}
|
||||
277
KSPlayer-main/Sources/KSPlayer/MEPlayer/FFmpegAssetTrack.swift
Normal file
277
KSPlayer-main/Sources/KSPlayer/MEPlayer/FFmpegAssetTrack.swift
Normal file
@@ -0,0 +1,277 @@
|
||||
//
|
||||
// FFmpegAssetTrack.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2023/2/12.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import FFmpegKit
|
||||
import Libavformat
|
||||
|
||||
public class FFmpegAssetTrack: MediaPlayerTrack {
|
||||
public private(set) var trackID: Int32 = 0
|
||||
public let codecName: String
|
||||
public var name: String = ""
|
||||
public private(set) var languageCode: String?
|
||||
public var nominalFrameRate: Float = 0
|
||||
public private(set) var avgFrameRate = Timebase.defaultValue
|
||||
public private(set) var realFrameRate = Timebase.defaultValue
|
||||
public private(set) var bitRate: Int64 = 0
|
||||
public let mediaType: AVFoundation.AVMediaType
|
||||
public let formatName: String?
|
||||
public let bitDepth: Int32
|
||||
private var stream: UnsafeMutablePointer<AVStream>?
|
||||
var startTime = CMTime.zero
|
||||
var codecpar: AVCodecParameters
|
||||
var timebase: Timebase = .defaultValue
|
||||
let bitsPerRawSample: Int32
|
||||
// audio
|
||||
public let audioDescriptor: AudioDescriptor?
|
||||
// subtitle
|
||||
public let isImageSubtitle: Bool
|
||||
public var delay: TimeInterval = 0
|
||||
var subtitle: SyncPlayerItemTrack<SubtitleFrame>?
|
||||
// video
|
||||
public private(set) var rotation: Int16 = 0
|
||||
public var dovi: DOVIDecoderConfigurationRecord?
|
||||
public let fieldOrder: FFmpegFieldOrder
|
||||
public let formatDescription: CMFormatDescription?
|
||||
var closedCaptionsTrack: FFmpegAssetTrack?
|
||||
let isConvertNALSize: Bool
|
||||
var seekByBytes = false
|
||||
public var description: String {
|
||||
var description = codecName
|
||||
if let formatName {
|
||||
description += ", \(formatName)"
|
||||
}
|
||||
if bitsPerRawSample > 0 {
|
||||
description += "(\(bitsPerRawSample.kmFormatted) bit)"
|
||||
}
|
||||
if let audioDescriptor {
|
||||
description += ", \(audioDescriptor.sampleRate)Hz"
|
||||
description += ", \(audioDescriptor.channel.description)"
|
||||
}
|
||||
if let formatDescription {
|
||||
if mediaType == .video {
|
||||
let naturalSize = formatDescription.naturalSize
|
||||
description += ", \(Int(naturalSize.width))x\(Int(naturalSize.height))"
|
||||
description += String(format: ", %.2f fps", nominalFrameRate)
|
||||
}
|
||||
}
|
||||
if bitRate > 0 {
|
||||
description += ", \(bitRate.kmFormatted)bps"
|
||||
}
|
||||
if let language {
|
||||
description += "(\(language))"
|
||||
}
|
||||
return description
|
||||
}
|
||||
|
||||
convenience init?(stream: UnsafeMutablePointer<AVStream>) {
|
||||
let codecpar = stream.pointee.codecpar.pointee
|
||||
self.init(codecpar: codecpar)
|
||||
self.stream = stream
|
||||
let metadata = toDictionary(stream.pointee.metadata)
|
||||
if let value = metadata["variant_bitrate"] ?? metadata["BPS"], let bitRate = Int64(value) {
|
||||
self.bitRate = bitRate
|
||||
}
|
||||
trackID = stream.pointee.index
|
||||
var timebase = Timebase(stream.pointee.time_base)
|
||||
if timebase.num <= 0 || timebase.den <= 0 {
|
||||
timebase = Timebase(num: 1, den: 1000)
|
||||
}
|
||||
if stream.pointee.start_time != Int64.min {
|
||||
startTime = timebase.cmtime(for: stream.pointee.start_time)
|
||||
}
|
||||
self.timebase = timebase
|
||||
avgFrameRate = Timebase(stream.pointee.avg_frame_rate)
|
||||
realFrameRate = Timebase(stream.pointee.r_frame_rate)
|
||||
if mediaType == .audio {
|
||||
var frameSize = codecpar.frame_size
|
||||
if frameSize < 1 {
|
||||
frameSize = timebase.den / timebase.num
|
||||
}
|
||||
nominalFrameRate = max(Float(codecpar.sample_rate / frameSize), 48)
|
||||
} else {
|
||||
if stream.pointee.duration > 0, stream.pointee.nb_frames > 0, stream.pointee.nb_frames != stream.pointee.duration {
|
||||
nominalFrameRate = Float(stream.pointee.nb_frames) * Float(timebase.den) / Float(stream.pointee.duration) * Float(timebase.num)
|
||||
} else if avgFrameRate.den > 0, avgFrameRate.num > 0 {
|
||||
nominalFrameRate = Float(avgFrameRate.num) / Float(avgFrameRate.den)
|
||||
} else {
|
||||
nominalFrameRate = 24
|
||||
}
|
||||
}
|
||||
|
||||
if let value = metadata["language"], value != "und" {
|
||||
languageCode = value
|
||||
} else {
|
||||
languageCode = nil
|
||||
}
|
||||
if let value = metadata["title"] {
|
||||
name = value
|
||||
} else {
|
||||
name = languageCode ?? codecName
|
||||
}
|
||||
// AV_DISPOSITION_DEFAULT
|
||||
if mediaType == .subtitle {
|
||||
isEnabled = !isImageSubtitle || stream.pointee.disposition & AV_DISPOSITION_FORCED == AV_DISPOSITION_FORCED
|
||||
if stream.pointee.disposition & AV_DISPOSITION_HEARING_IMPAIRED == AV_DISPOSITION_HEARING_IMPAIRED {
|
||||
name += "(hearing impaired)"
|
||||
}
|
||||
}
|
||||
// var buf = [Int8](repeating: 0, count: 256)
|
||||
// avcodec_string(&buf, buf.count, codecpar, 0)
|
||||
}
|
||||
|
||||
init?(codecpar: AVCodecParameters) {
|
||||
self.codecpar = codecpar
|
||||
bitRate = codecpar.bit_rate
|
||||
// codec_tag byte order is LSB first CMFormatDescription.MediaSubType(rawValue: codecpar.codec_tag.bigEndian)
|
||||
let codecType = codecpar.codec_id.mediaSubType
|
||||
var codecName = ""
|
||||
if let descriptor = avcodec_descriptor_get(codecpar.codec_id) {
|
||||
codecName += String(cString: descriptor.pointee.name)
|
||||
if let profile = descriptor.pointee.profiles {
|
||||
codecName += " (\(String(cString: profile.pointee.name)))"
|
||||
}
|
||||
} else {
|
||||
codecName = ""
|
||||
}
|
||||
self.codecName = codecName
|
||||
fieldOrder = FFmpegFieldOrder(rawValue: UInt8(codecpar.field_order.rawValue)) ?? .unknown
|
||||
var formatDescriptionOut: CMFormatDescription?
|
||||
if codecpar.codec_type == AVMEDIA_TYPE_AUDIO {
|
||||
mediaType = .audio
|
||||
audioDescriptor = AudioDescriptor(codecpar: codecpar)
|
||||
isConvertNALSize = false
|
||||
bitDepth = 0
|
||||
let layout = codecpar.ch_layout
|
||||
let channelsPerFrame = UInt32(layout.nb_channels)
|
||||
let sampleFormat = AVSampleFormat(codecpar.format)
|
||||
let bytesPerSample = UInt32(av_get_bytes_per_sample(sampleFormat))
|
||||
let formatFlags = ((sampleFormat == AV_SAMPLE_FMT_FLT || sampleFormat == AV_SAMPLE_FMT_DBL) ? kAudioFormatFlagIsFloat : sampleFormat == AV_SAMPLE_FMT_U8 ? 0 : kAudioFormatFlagIsSignedInteger) | kAudioFormatFlagIsPacked
|
||||
var audioStreamBasicDescription = AudioStreamBasicDescription(mSampleRate: Float64(codecpar.sample_rate), mFormatID: codecType.rawValue, mFormatFlags: formatFlags, mBytesPerPacket: bytesPerSample * channelsPerFrame, mFramesPerPacket: 1, mBytesPerFrame: bytesPerSample * channelsPerFrame, mChannelsPerFrame: channelsPerFrame, mBitsPerChannel: bytesPerSample * 8, mReserved: 0)
|
||||
_ = CMAudioFormatDescriptionCreate(allocator: kCFAllocatorDefault, asbd: &audioStreamBasicDescription, layoutSize: 0, layout: nil, magicCookieSize: 0, magicCookie: nil, extensions: nil, formatDescriptionOut: &formatDescriptionOut)
|
||||
if let name = av_get_sample_fmt_name(sampleFormat) {
|
||||
formatName = String(cString: name)
|
||||
} else {
|
||||
formatName = nil
|
||||
}
|
||||
} else if codecpar.codec_type == AVMEDIA_TYPE_VIDEO {
|
||||
audioDescriptor = nil
|
||||
mediaType = .video
|
||||
if codecpar.nb_coded_side_data > 0, let sideDatas = codecpar.coded_side_data {
|
||||
for i in 0 ..< codecpar.nb_coded_side_data {
|
||||
let sideData = sideDatas[Int(i)]
|
||||
if sideData.type == AV_PKT_DATA_DOVI_CONF {
|
||||
dovi = sideData.data.withMemoryRebound(to: DOVIDecoderConfigurationRecord.self, capacity: 1) { $0 }.pointee
|
||||
} else if sideData.type == AV_PKT_DATA_DISPLAYMATRIX {
|
||||
let matrix = sideData.data.withMemoryRebound(to: Int32.self, capacity: 1) { $0 }
|
||||
rotation = Int16(Int(-av_display_rotation_get(matrix)) % 360)
|
||||
}
|
||||
}
|
||||
}
|
||||
let sar = codecpar.sample_aspect_ratio.size
|
||||
var extradataSize = Int32(0)
|
||||
var extradata = codecpar.extradata
|
||||
let atomsData: Data?
|
||||
if let extradata {
|
||||
extradataSize = codecpar.extradata_size
|
||||
if extradataSize >= 5, extradata[4] == 0xFE {
|
||||
extradata[4] = 0xFF
|
||||
isConvertNALSize = true
|
||||
} else {
|
||||
isConvertNALSize = false
|
||||
}
|
||||
atomsData = Data(bytes: extradata, count: Int(extradataSize))
|
||||
} else {
|
||||
if codecType.rawValue == kCMVideoCodecType_VP9 {
|
||||
// ff_videotoolbox_vpcc_extradata_create
|
||||
var ioContext: UnsafeMutablePointer<AVIOContext>?
|
||||
guard avio_open_dyn_buf(&ioContext) == 0 else {
|
||||
return nil
|
||||
}
|
||||
ff_isom_write_vpcc(nil, ioContext, nil, 0, &self.codecpar)
|
||||
extradataSize = avio_close_dyn_buf(ioContext, &extradata)
|
||||
guard let extradata else {
|
||||
return nil
|
||||
}
|
||||
var data = Data()
|
||||
var array: [UInt8] = [1, 0, 0, 0]
|
||||
data.append(&array, count: 4)
|
||||
data.append(extradata, count: Int(extradataSize))
|
||||
atomsData = data
|
||||
} else {
|
||||
atomsData = nil
|
||||
}
|
||||
isConvertNALSize = false
|
||||
}
|
||||
let format = AVPixelFormat(rawValue: codecpar.format)
|
||||
bitDepth = format.bitDepth
|
||||
let fullRange = codecpar.color_range == AVCOL_RANGE_JPEG
|
||||
let dic: NSMutableDictionary = [
|
||||
kCVImageBufferChromaLocationBottomFieldKey: kCVImageBufferChromaLocation_Left,
|
||||
kCVImageBufferChromaLocationTopFieldKey: kCVImageBufferChromaLocation_Left,
|
||||
kCMFormatDescriptionExtension_Depth: format.bitDepth * Int32(format.planeCount),
|
||||
kCMFormatDescriptionExtension_FullRangeVideo: fullRange,
|
||||
codecType.rawValue == kCMVideoCodecType_HEVC ? "EnableHardwareAcceleratedVideoDecoder" : "RequireHardwareAcceleratedVideoDecoder": true,
|
||||
]
|
||||
// kCMFormatDescriptionExtension_BitsPerComponent
|
||||
if let atomsData {
|
||||
dic[kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms] = [codecType.rawValue.avc: atomsData]
|
||||
}
|
||||
dic[kCVPixelBufferPixelFormatTypeKey] = format.osType(fullRange: fullRange)
|
||||
dic[kCVImageBufferPixelAspectRatioKey] = sar.aspectRatio
|
||||
dic[kCVImageBufferColorPrimariesKey] = codecpar.color_primaries.colorPrimaries as String?
|
||||
dic[kCVImageBufferTransferFunctionKey] = codecpar.color_trc.transferFunction as String?
|
||||
dic[kCVImageBufferYCbCrMatrixKey] = codecpar.color_space.ycbcrMatrix as String?
|
||||
// swiftlint:disable line_length
|
||||
_ = CMVideoFormatDescriptionCreate(allocator: kCFAllocatorDefault, codecType: codecType.rawValue, width: codecpar.width, height: codecpar.height, extensions: dic, formatDescriptionOut: &formatDescriptionOut)
|
||||
// swiftlint:enable line_length
|
||||
if let name = av_get_pix_fmt_name(format) {
|
||||
formatName = String(cString: name)
|
||||
} else {
|
||||
formatName = nil
|
||||
}
|
||||
} else if codecpar.codec_type == AVMEDIA_TYPE_SUBTITLE {
|
||||
mediaType = .subtitle
|
||||
audioDescriptor = nil
|
||||
formatName = nil
|
||||
bitDepth = 0
|
||||
isConvertNALSize = false
|
||||
_ = CMFormatDescriptionCreate(allocator: kCFAllocatorDefault, mediaType: kCMMediaType_Subtitle, mediaSubType: codecType.rawValue, extensions: nil, formatDescriptionOut: &formatDescriptionOut)
|
||||
} else {
|
||||
bitDepth = 0
|
||||
return nil
|
||||
}
|
||||
formatDescription = formatDescriptionOut
|
||||
bitsPerRawSample = codecpar.bits_per_raw_sample
|
||||
isImageSubtitle = [AV_CODEC_ID_DVD_SUBTITLE, AV_CODEC_ID_DVB_SUBTITLE, AV_CODEC_ID_DVB_TELETEXT, AV_CODEC_ID_HDMV_PGS_SUBTITLE].contains(codecpar.codec_id)
|
||||
trackID = 0
|
||||
}
|
||||
|
||||
func createContext(options: KSOptions) throws -> UnsafeMutablePointer<AVCodecContext> {
|
||||
try codecpar.createContext(options: options)
|
||||
}
|
||||
|
||||
public var isEnabled: Bool {
|
||||
get {
|
||||
stream?.pointee.discard == AVDISCARD_DEFAULT
|
||||
}
|
||||
set {
|
||||
var discard = newValue ? AVDISCARD_DEFAULT : AVDISCARD_ALL
|
||||
if mediaType == .subtitle, !isImageSubtitle {
|
||||
discard = AVDISCARD_DEFAULT
|
||||
}
|
||||
stream?.pointee.discard = discard
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension FFmpegAssetTrack {
|
||||
var pixelFormatType: OSType? {
|
||||
let format = AVPixelFormat(codecpar.format)
|
||||
return format.osType(fullRange: formatDescription?.fullRangeVideo ?? false)
|
||||
}
|
||||
}
|
||||
206
KSPlayer-main/Sources/KSPlayer/MEPlayer/FFmpegDecode.swift
Normal file
206
KSPlayer-main/Sources/KSPlayer/MEPlayer/FFmpegDecode.swift
Normal file
@@ -0,0 +1,206 @@
|
||||
//
|
||||
// FFmpegDecode.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/9.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
import Libavcodec
|
||||
|
||||
class FFmpegDecode: DecodeProtocol {
|
||||
private let options: KSOptions
|
||||
private var coreFrame: UnsafeMutablePointer<AVFrame>? = av_frame_alloc()
|
||||
private var codecContext: UnsafeMutablePointer<AVCodecContext>?
|
||||
private var bestEffortTimestamp = Int64(0)
|
||||
private let frameChange: FrameChange
|
||||
private let filter: MEFilter
|
||||
private let seekByBytes: Bool
|
||||
required init(assetTrack: FFmpegAssetTrack, options: KSOptions) {
|
||||
self.options = options
|
||||
seekByBytes = assetTrack.seekByBytes
|
||||
do {
|
||||
codecContext = try assetTrack.createContext(options: options)
|
||||
} catch {
|
||||
KSLog(error as CustomStringConvertible)
|
||||
}
|
||||
codecContext?.pointee.time_base = assetTrack.timebase.rational
|
||||
filter = MEFilter(timebase: assetTrack.timebase, isAudio: assetTrack.mediaType == .audio, nominalFrameRate: assetTrack.nominalFrameRate, options: options)
|
||||
if assetTrack.mediaType == .video {
|
||||
frameChange = VideoSwresample(fps: assetTrack.nominalFrameRate, isDovi: assetTrack.dovi != nil)
|
||||
} else {
|
||||
frameChange = AudioSwresample(audioDescriptor: assetTrack.audioDescriptor!)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeFrame(from packet: Packet, completionHandler: @escaping (Result<MEFrame, Error>) -> Void) {
|
||||
guard let codecContext, avcodec_send_packet(codecContext, packet.corePacket) == 0 else {
|
||||
return
|
||||
}
|
||||
// 需要avcodec_send_packet之后,properties的值才会变成FF_CODEC_PROPERTY_CLOSED_CAPTIONS
|
||||
if packet.assetTrack.mediaType == .video {
|
||||
if Int32(codecContext.pointee.properties) & FF_CODEC_PROPERTY_CLOSED_CAPTIONS != 0, packet.assetTrack.closedCaptionsTrack == nil {
|
||||
var codecpar = AVCodecParameters()
|
||||
codecpar.codec_type = AVMEDIA_TYPE_SUBTITLE
|
||||
codecpar.codec_id = AV_CODEC_ID_EIA_608
|
||||
if let subtitleAssetTrack = FFmpegAssetTrack(codecpar: codecpar) {
|
||||
subtitleAssetTrack.name = "Closed Captions"
|
||||
subtitleAssetTrack.startTime = packet.assetTrack.startTime
|
||||
subtitleAssetTrack.timebase = packet.assetTrack.timebase
|
||||
let subtitle = SyncPlayerItemTrack<SubtitleFrame>(mediaType: .subtitle, frameCapacity: 255, options: options)
|
||||
subtitleAssetTrack.subtitle = subtitle
|
||||
packet.assetTrack.closedCaptionsTrack = subtitleAssetTrack
|
||||
subtitle.decode()
|
||||
}
|
||||
}
|
||||
}
|
||||
while true {
|
||||
let result = avcodec_receive_frame(codecContext, coreFrame)
|
||||
if result == 0, let inputFrame = coreFrame {
|
||||
var displayData: MasteringDisplayMetadata?
|
||||
var contentData: ContentLightMetadata?
|
||||
var ambientViewingEnvironment: AmbientViewingEnvironment?
|
||||
// filter之后,side_data信息会丢失,所以放在这里
|
||||
if inputFrame.pointee.nb_side_data > 0 {
|
||||
for i in 0 ..< inputFrame.pointee.nb_side_data {
|
||||
if let sideData = inputFrame.pointee.side_data[Int(i)]?.pointee {
|
||||
if sideData.type == AV_FRAME_DATA_A53_CC {
|
||||
if let closedCaptionsTrack = packet.assetTrack.closedCaptionsTrack,
|
||||
let subtitle = closedCaptionsTrack.subtitle
|
||||
{
|
||||
let closedCaptionsPacket = Packet()
|
||||
if let corePacket = packet.corePacket {
|
||||
closedCaptionsPacket.corePacket?.pointee.pts = corePacket.pointee.pts
|
||||
closedCaptionsPacket.corePacket?.pointee.dts = corePacket.pointee.dts
|
||||
closedCaptionsPacket.corePacket?.pointee.pos = corePacket.pointee.pos
|
||||
closedCaptionsPacket.corePacket?.pointee.time_base = corePacket.pointee.time_base
|
||||
closedCaptionsPacket.corePacket?.pointee.stream_index = corePacket.pointee.stream_index
|
||||
}
|
||||
closedCaptionsPacket.corePacket?.pointee.flags |= AV_PKT_FLAG_KEY
|
||||
closedCaptionsPacket.corePacket?.pointee.size = Int32(sideData.size)
|
||||
let buffer = av_buffer_ref(sideData.buf)
|
||||
closedCaptionsPacket.corePacket?.pointee.data = buffer?.pointee.data
|
||||
closedCaptionsPacket.corePacket?.pointee.buf = buffer
|
||||
closedCaptionsPacket.assetTrack = closedCaptionsTrack
|
||||
subtitle.putPacket(packet: closedCaptionsPacket)
|
||||
}
|
||||
} else if sideData.type == AV_FRAME_DATA_SEI_UNREGISTERED {
|
||||
let size = sideData.size
|
||||
if size > AV_UUID_LEN {
|
||||
let str = String(cString: sideData.data.advanced(by: Int(AV_UUID_LEN)))
|
||||
options.sei(string: str)
|
||||
}
|
||||
} else if sideData.type == AV_FRAME_DATA_DOVI_RPU_BUFFER {
|
||||
let data = sideData.data.withMemoryRebound(to: [UInt8].self, capacity: 1) { $0 }
|
||||
} else if sideData.type == AV_FRAME_DATA_DOVI_METADATA { // AVDOVIMetadata
|
||||
let data = sideData.data.withMemoryRebound(to: AVDOVIMetadata.self, capacity: 1) { $0 }
|
||||
let header = av_dovi_get_header(data)
|
||||
let mapping = av_dovi_get_mapping(data)
|
||||
let color = av_dovi_get_color(data)
|
||||
// frame.corePixelBuffer?.transferFunction = kCVImageBufferTransferFunction_ITU_R_2020
|
||||
} else if sideData.type == AV_FRAME_DATA_DYNAMIC_HDR_PLUS { // AVDynamicHDRPlus
|
||||
let data = sideData.data.withMemoryRebound(to: AVDynamicHDRPlus.self, capacity: 1) { $0 }.pointee
|
||||
} else if sideData.type == AV_FRAME_DATA_DYNAMIC_HDR_VIVID { // AVDynamicHDRVivid
|
||||
let data = sideData.data.withMemoryRebound(to: AVDynamicHDRVivid.self, capacity: 1) { $0 }.pointee
|
||||
} else if sideData.type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA {
|
||||
let data = sideData.data.withMemoryRebound(to: AVMasteringDisplayMetadata.self, capacity: 1) { $0 }.pointee
|
||||
displayData = MasteringDisplayMetadata(
|
||||
display_primaries_r_x: UInt16(data.display_primaries.0.0.num).bigEndian,
|
||||
display_primaries_r_y: UInt16(data.display_primaries.0.1.num).bigEndian,
|
||||
display_primaries_g_x: UInt16(data.display_primaries.1.0.num).bigEndian,
|
||||
display_primaries_g_y: UInt16(data.display_primaries.1.1.num).bigEndian,
|
||||
display_primaries_b_x: UInt16(data.display_primaries.2.1.num).bigEndian,
|
||||
display_primaries_b_y: UInt16(data.display_primaries.2.1.num).bigEndian,
|
||||
white_point_x: UInt16(data.white_point.0.num).bigEndian,
|
||||
white_point_y: UInt16(data.white_point.1.num).bigEndian,
|
||||
minLuminance: UInt32(data.min_luminance.num).bigEndian,
|
||||
maxLuminance: UInt32(data.max_luminance.num).bigEndian
|
||||
)
|
||||
} else if sideData.type == AV_FRAME_DATA_CONTENT_LIGHT_LEVEL {
|
||||
let data = sideData.data.withMemoryRebound(to: AVContentLightMetadata.self, capacity: 1) { $0 }.pointee
|
||||
contentData = ContentLightMetadata(
|
||||
MaxCLL: UInt16(data.MaxCLL).bigEndian,
|
||||
MaxFALL: UInt16(data.MaxFALL).bigEndian
|
||||
)
|
||||
} else if sideData.type == AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT {
|
||||
let data = sideData.data.withMemoryRebound(to: AVAmbientViewingEnvironment.self, capacity: 1) { $0 }.pointee
|
||||
ambientViewingEnvironment = AmbientViewingEnvironment(
|
||||
ambient_illuminance: UInt32(data.ambient_illuminance.num).bigEndian,
|
||||
ambient_light_x: UInt16(data.ambient_light_x.num).bigEndian,
|
||||
ambient_light_y: UInt16(data.ambient_light_y.num).bigEndian
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
filter.filter(options: options, inputFrame: inputFrame) { avframe in
|
||||
do {
|
||||
var frame = try frameChange.change(avframe: avframe)
|
||||
if let videoFrame = frame as? VideoVTBFrame, let pixelBuffer = videoFrame.corePixelBuffer {
|
||||
if let pixelBuffer = pixelBuffer as? PixelBuffer {
|
||||
pixelBuffer.formatDescription = packet.assetTrack.formatDescription
|
||||
}
|
||||
if displayData != nil || contentData != nil || ambientViewingEnvironment != nil {
|
||||
videoFrame.edrMetaData = EDRMetaData(displayData: displayData, contentData: contentData, ambientViewingEnvironment: ambientViewingEnvironment)
|
||||
}
|
||||
}
|
||||
frame.timebase = filter.timebase
|
||||
// frame.timebase = Timebase(avframe.pointee.time_base)
|
||||
frame.size = packet.size
|
||||
frame.position = packet.position
|
||||
frame.duration = avframe.pointee.duration
|
||||
if frame.duration == 0, avframe.pointee.sample_rate != 0, frame.timebase.num != 0 {
|
||||
frame.duration = Int64(avframe.pointee.nb_samples) * Int64(frame.timebase.den) / (Int64(avframe.pointee.sample_rate) * Int64(frame.timebase.num))
|
||||
}
|
||||
var timestamp = avframe.pointee.best_effort_timestamp
|
||||
if timestamp < 0 {
|
||||
timestamp = avframe.pointee.pts
|
||||
}
|
||||
if timestamp < 0 {
|
||||
timestamp = avframe.pointee.pkt_dts
|
||||
}
|
||||
if timestamp < 0 {
|
||||
timestamp = bestEffortTimestamp
|
||||
}
|
||||
frame.timestamp = timestamp
|
||||
bestEffortTimestamp = timestamp &+ frame.duration
|
||||
completionHandler(.success(frame))
|
||||
} catch {
|
||||
completionHandler(.failure(error))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if result == AVError.eof.code {
|
||||
avcodec_flush_buffers(codecContext)
|
||||
break
|
||||
} else if result == AVError.tryAgain.code {
|
||||
break
|
||||
} else {
|
||||
let error = NSError(errorCode: packet.assetTrack.mediaType == .audio ? .codecAudioReceiveFrame : .codecVideoReceiveFrame, avErrorCode: result)
|
||||
KSLog(error)
|
||||
completionHandler(.failure(error))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doFlushCodec() {
|
||||
bestEffortTimestamp = Int64(0)
|
||||
// seek之后要清空下,不然解码可能还会有缓存,导致返回的数据是之前seek的。
|
||||
avcodec_flush_buffers(codecContext)
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
av_frame_free(&coreFrame)
|
||||
avcodec_free_context(&codecContext)
|
||||
frameChange.shutdown()
|
||||
}
|
||||
|
||||
func decode() {
|
||||
bestEffortTimestamp = Int64(0)
|
||||
if codecContext != nil {
|
||||
avcodec_flush_buffers(codecContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
150
KSPlayer-main/Sources/KSPlayer/MEPlayer/Filter.swift
Normal file
150
KSPlayer-main/Sources/KSPlayer/MEPlayer/Filter.swift
Normal file
@@ -0,0 +1,150 @@
|
||||
//
|
||||
// Filter.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2021/8/7.
|
||||
//
|
||||
|
||||
import Foundation
|
||||
import Libavfilter
|
||||
import Libavutil
|
||||
|
||||
class MEFilter {
|
||||
private var graph: UnsafeMutablePointer<AVFilterGraph>?
|
||||
private var bufferSrcContext: UnsafeMutablePointer<AVFilterContext>?
|
||||
private var bufferSinkContext: UnsafeMutablePointer<AVFilterContext>?
|
||||
private var filters: String?
|
||||
let timebase: Timebase
|
||||
private let isAudio: Bool
|
||||
private var params = AVBufferSrcParameters()
|
||||
private let nominalFrameRate: Float
|
||||
deinit {
|
||||
graph?.pointee.opaque = nil
|
||||
avfilter_graph_free(&graph)
|
||||
}
|
||||
|
||||
public init(timebase: Timebase, isAudio: Bool, nominalFrameRate: Float, options: KSOptions) {
|
||||
graph = avfilter_graph_alloc()
|
||||
graph?.pointee.opaque = Unmanaged.passUnretained(options).toOpaque()
|
||||
self.timebase = timebase
|
||||
self.isAudio = isAudio
|
||||
self.nominalFrameRate = nominalFrameRate
|
||||
}
|
||||
|
||||
private func setup(filters: String) -> Bool {
|
||||
var inputs = avfilter_inout_alloc()
|
||||
var outputs = avfilter_inout_alloc()
|
||||
var ret = avfilter_graph_parse2(graph, filters, &inputs, &outputs)
|
||||
guard ret >= 0, let graph, let inputs, let outputs else {
|
||||
avfilter_inout_free(&inputs)
|
||||
avfilter_inout_free(&outputs)
|
||||
return false
|
||||
}
|
||||
let bufferSink = avfilter_get_by_name(isAudio ? "abuffersink" : "buffersink")
|
||||
ret = avfilter_graph_create_filter(&bufferSinkContext, bufferSink, "out", nil, nil, graph)
|
||||
guard ret >= 0 else { return false }
|
||||
ret = avfilter_link(outputs.pointee.filter_ctx, UInt32(outputs.pointee.pad_idx), bufferSinkContext, 0)
|
||||
guard ret >= 0 else { return false }
|
||||
let buffer = avfilter_get_by_name(isAudio ? "abuffer" : "buffer")
|
||||
bufferSrcContext = avfilter_graph_alloc_filter(graph, buffer, "in")
|
||||
guard bufferSrcContext != nil else { return false }
|
||||
av_buffersrc_parameters_set(bufferSrcContext, ¶ms)
|
||||
ret = avfilter_init_str(bufferSrcContext, nil)
|
||||
guard ret >= 0 else { return false }
|
||||
ret = avfilter_link(bufferSrcContext, 0, inputs.pointee.filter_ctx, UInt32(inputs.pointee.pad_idx))
|
||||
guard ret >= 0 else { return false }
|
||||
if let ctx = params.hw_frames_ctx {
|
||||
let framesCtxData = UnsafeMutableRawPointer(ctx.pointee.data).bindMemory(to: AVHWFramesContext.self, capacity: 1)
|
||||
inputs.pointee.filter_ctx.pointee.hw_device_ctx = framesCtxData.pointee.device_ref
|
||||
// outputs.pointee.filter_ctx.pointee.hw_device_ctx = framesCtxData.pointee.device_ref
|
||||
// bufferSrcContext?.pointee.hw_device_ctx = framesCtxData.pointee.device_ref
|
||||
// bufferSinkContext?.pointee.hw_device_ctx = framesCtxData.pointee.device_ref
|
||||
}
|
||||
ret = avfilter_graph_config(graph, nil)
|
||||
guard ret >= 0 else { return false }
|
||||
return true
|
||||
}
|
||||
|
||||
private func setup2(filters: String) -> Bool {
|
||||
guard let graph else {
|
||||
return false
|
||||
}
|
||||
let bufferName = isAudio ? "abuffer" : "buffer"
|
||||
let bufferSrc = avfilter_get_by_name(bufferName)
|
||||
var ret = avfilter_graph_create_filter(&bufferSrcContext, bufferSrc, "ksplayer_\(bufferName)", params.arg, nil, graph)
|
||||
av_buffersrc_parameters_set(bufferSrcContext, ¶ms)
|
||||
let bufferSink = avfilter_get_by_name(bufferName + "sink")
|
||||
ret = avfilter_graph_create_filter(&bufferSinkContext, bufferSink, "ksplayer_\(bufferName)sink", nil, nil, graph)
|
||||
guard ret >= 0 else { return false }
|
||||
// av_opt_set_int_list(bufferSinkContext, "pix_fmts", [AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE] AV_PIX_FMT_NONE,AV_OPT_SEARCH_CHILDREN)
|
||||
var inputs = avfilter_inout_alloc()
|
||||
var outputs = avfilter_inout_alloc()
|
||||
outputs?.pointee.name = strdup("in")
|
||||
outputs?.pointee.filter_ctx = bufferSrcContext
|
||||
outputs?.pointee.pad_idx = 0
|
||||
outputs?.pointee.next = nil
|
||||
inputs?.pointee.name = strdup("out")
|
||||
inputs?.pointee.filter_ctx = bufferSinkContext
|
||||
inputs?.pointee.pad_idx = 0
|
||||
inputs?.pointee.next = nil
|
||||
let filterNb = Int(graph.pointee.nb_filters)
|
||||
ret = avfilter_graph_parse_ptr(graph, filters, &inputs, &outputs, nil)
|
||||
guard ret >= 0 else {
|
||||
avfilter_inout_free(&inputs)
|
||||
avfilter_inout_free(&outputs)
|
||||
return false
|
||||
}
|
||||
for i in 0 ..< Int(graph.pointee.nb_filters) - filterNb {
|
||||
swap(&graph.pointee.filters[i], &graph.pointee.filters[i + filterNb])
|
||||
}
|
||||
ret = avfilter_graph_config(graph, nil)
|
||||
guard ret >= 0 else { return false }
|
||||
return true
|
||||
}
|
||||
|
||||
public func filter(options: KSOptions, inputFrame: UnsafeMutablePointer<AVFrame>, completionHandler: (UnsafeMutablePointer<AVFrame>) -> Void) {
|
||||
let filters: String
|
||||
if isAudio {
|
||||
filters = options.audioFilters.joined(separator: ",")
|
||||
} else {
|
||||
if options.autoDeInterlace, !options.videoFilters.contains("idet") {
|
||||
options.videoFilters.append("idet")
|
||||
}
|
||||
filters = options.videoFilters.joined(separator: ",")
|
||||
}
|
||||
guard !filters.isEmpty else {
|
||||
completionHandler(inputFrame)
|
||||
return
|
||||
}
|
||||
var params = AVBufferSrcParameters()
|
||||
params.format = inputFrame.pointee.format
|
||||
params.time_base = timebase.rational
|
||||
params.width = inputFrame.pointee.width
|
||||
params.height = inputFrame.pointee.height
|
||||
params.sample_aspect_ratio = inputFrame.pointee.sample_aspect_ratio
|
||||
params.frame_rate = AVRational(num: 1, den: Int32(nominalFrameRate))
|
||||
if let ctx = inputFrame.pointee.hw_frames_ctx {
|
||||
params.hw_frames_ctx = av_buffer_ref(ctx)
|
||||
}
|
||||
params.sample_rate = inputFrame.pointee.sample_rate
|
||||
params.ch_layout = inputFrame.pointee.ch_layout
|
||||
if self.params != params || self.filters != filters {
|
||||
self.params = params
|
||||
self.filters = filters
|
||||
if !setup(filters: filters) {
|
||||
completionHandler(inputFrame)
|
||||
return
|
||||
}
|
||||
}
|
||||
let ret = av_buffersrc_add_frame_flags(bufferSrcContext, inputFrame, 0)
|
||||
if ret < 0 {
|
||||
return
|
||||
}
|
||||
while av_buffersink_get_frame_flags(bufferSinkContext, inputFrame, 0) >= 0 {
|
||||
// timebase = Timebase(av_buffersink_get_time_base(bufferSinkContext))
|
||||
completionHandler(inputFrame)
|
||||
// 一定要加av_frame_unref,不然会内存泄漏。
|
||||
av_frame_unref(inputFrame)
|
||||
}
|
||||
}
|
||||
}
|
||||
588
KSPlayer-main/Sources/KSPlayer/MEPlayer/KSMEPlayer.swift
Normal file
588
KSPlayer-main/Sources/KSPlayer/MEPlayer/KSMEPlayer.swift
Normal file
@@ -0,0 +1,588 @@
|
||||
//
|
||||
// KSMEPlayer.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/9.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import AVKit
|
||||
#if canImport(UIKit)
|
||||
import UIKit
|
||||
#else
|
||||
import AppKit
|
||||
#endif
|
||||
|
||||
public class KSMEPlayer: NSObject {
|
||||
private var loopCount = 1
|
||||
private var playerItem: MEPlayerItem
|
||||
public let audioOutput: AudioOutput
|
||||
private var options: KSOptions
|
||||
private var bufferingCountDownTimer: Timer?
|
||||
public private(set) var videoOutput: (VideoOutput & UIView)? {
|
||||
didSet {
|
||||
oldValue?.invalidate()
|
||||
runOnMainThread {
|
||||
oldValue?.removeFromSuperview()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public private(set) var bufferingProgress = 0 {
|
||||
willSet {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
delegate?.changeBuffering(player: self, progress: newValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private lazy var _pipController: Any? = {
|
||||
if #available(iOS 15.0, tvOS 15.0, macOS 12.0, *), let videoOutput {
|
||||
let contentSource = AVPictureInPictureController.ContentSource(sampleBufferDisplayLayer: videoOutput.displayLayer, playbackDelegate: self)
|
||||
let pip = KSPictureInPictureController(contentSource: contentSource)
|
||||
return pip
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
|
||||
@available(tvOS 14.0, *)
|
||||
public var pipController: KSPictureInPictureController? {
|
||||
_pipController as? KSPictureInPictureController
|
||||
}
|
||||
|
||||
private lazy var _playbackCoordinator: Any? = {
|
||||
if #available(macOS 12.0, iOS 15.0, tvOS 15.0, *) {
|
||||
let coordinator = AVDelegatingPlaybackCoordinator(playbackControlDelegate: self)
|
||||
coordinator.suspensionReasonsThatTriggerWaiting = [.stallRecovery]
|
||||
return coordinator
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
|
||||
@available(macOS 12.0, iOS 15.0, tvOS 15.0, *)
|
||||
public var playbackCoordinator: AVPlaybackCoordinator {
|
||||
// swiftlint:disable force_cast
|
||||
_playbackCoordinator as! AVPlaybackCoordinator
|
||||
// swiftlint:enable force_cast
|
||||
}
|
||||
|
||||
public private(set) var playableTime = TimeInterval(0)
|
||||
public weak var delegate: MediaPlayerDelegate?
|
||||
public private(set) var isReadyToPlay = false
|
||||
public var allowsExternalPlayback: Bool = false
|
||||
public var usesExternalPlaybackWhileExternalScreenIsActive: Bool = false
|
||||
|
||||
public var playbackRate: Float = 1 {
|
||||
didSet {
|
||||
if playbackRate != audioOutput.playbackRate {
|
||||
audioOutput.playbackRate = playbackRate
|
||||
if audioOutput is AudioUnitPlayer {
|
||||
var audioFilters = options.audioFilters.filter {
|
||||
!$0.hasPrefix("atempo=")
|
||||
}
|
||||
if playbackRate != 1 {
|
||||
audioFilters.append("atempo=\(playbackRate)")
|
||||
}
|
||||
options.audioFilters = audioFilters
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public private(set) var loadState = MediaLoadState.idle {
|
||||
didSet {
|
||||
if loadState != oldValue {
|
||||
playOrPause()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public private(set) var playbackState = MediaPlaybackState.idle {
|
||||
didSet {
|
||||
if playbackState != oldValue {
|
||||
playOrPause()
|
||||
if playbackState == .finished {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
delegate?.finish(player: self, error: nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public required init(url: URL, options: KSOptions) {
|
||||
KSOptions.setAudioSession()
|
||||
audioOutput = KSOptions.audioPlayerType.init()
|
||||
playerItem = MEPlayerItem(url: url, options: options)
|
||||
if options.videoDisable {
|
||||
videoOutput = nil
|
||||
} else {
|
||||
videoOutput = KSOptions.videoPlayerType.init(options: options)
|
||||
}
|
||||
self.options = options
|
||||
super.init()
|
||||
playerItem.delegate = self
|
||||
audioOutput.renderSource = playerItem
|
||||
videoOutput?.renderSource = playerItem
|
||||
videoOutput?.displayLayerDelegate = self
|
||||
#if !os(macOS)
|
||||
NotificationCenter.default.addObserver(self, selector: #selector(audioRouteChange), name: AVAudioSession.routeChangeNotification, object: AVAudioSession.sharedInstance())
|
||||
if #available(tvOS 15.0, iOS 15.0, *) {
|
||||
NotificationCenter.default.addObserver(self, selector: #selector(spatialCapabilityChange), name: AVAudioSession.spatialPlaybackCapabilitiesChangedNotification, object: nil)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
deinit {
|
||||
#if !os(macOS)
|
||||
try? AVAudioSession.sharedInstance().setPreferredOutputNumberOfChannels(2)
|
||||
#endif
|
||||
NotificationCenter.default.removeObserver(self)
|
||||
videoOutput?.invalidate()
|
||||
playerItem.shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - private functions
|
||||
|
||||
private extension KSMEPlayer {
|
||||
func playOrPause() {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
let isPaused = !(self.playbackState == .playing && self.loadState == .playable)
|
||||
if isPaused {
|
||||
self.audioOutput.pause()
|
||||
self.videoOutput?.pause()
|
||||
} else {
|
||||
self.audioOutput.play()
|
||||
self.videoOutput?.play()
|
||||
}
|
||||
self.delegate?.changeLoadState(player: self)
|
||||
}
|
||||
}
|
||||
|
||||
@objc private func spatialCapabilityChange(notification _: Notification) {
|
||||
KSLog("[audio] spatialCapabilityChange")
|
||||
for track in tracks(mediaType: .audio) {
|
||||
(track as? FFmpegAssetTrack)?.audioDescriptor?.updateAudioFormat()
|
||||
}
|
||||
}
|
||||
|
||||
#if !os(macOS)
|
||||
@objc private func audioRouteChange(notification: Notification) {
|
||||
KSLog("[audio] audioRouteChange")
|
||||
guard let reason = notification.userInfo?[AVAudioSessionRouteChangeReasonKey] as? UInt else {
|
||||
return
|
||||
}
|
||||
// let routeChangeReason = AVAudioSession.RouteChangeReason(rawValue: reason)
|
||||
// guard [AVAudioSession.RouteChangeReason.newDeviceAvailable, .oldDeviceUnavailable, .routeConfigurationChange].contains(routeChangeReason) else {
|
||||
// return
|
||||
// }
|
||||
for track in tracks(mediaType: .audio) {
|
||||
(track as? FFmpegAssetTrack)?.audioDescriptor?.updateAudioFormat()
|
||||
}
|
||||
audioOutput.flush()
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
extension KSMEPlayer: MEPlayerDelegate {
|
||||
func sourceDidOpened() {
|
||||
isReadyToPlay = true
|
||||
options.readyTime = CACurrentMediaTime()
|
||||
let vidoeTracks = tracks(mediaType: .video)
|
||||
if vidoeTracks.isEmpty {
|
||||
videoOutput = nil
|
||||
}
|
||||
let audioDescriptor = tracks(mediaType: .audio).first { $0.isEnabled }.flatMap {
|
||||
$0 as? FFmpegAssetTrack
|
||||
}?.audioDescriptor
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
if let audioDescriptor {
|
||||
KSLog("[audio] audio type: \(audioOutput) prepare audioFormat )")
|
||||
audioOutput.prepare(audioFormat: audioDescriptor.audioFormat)
|
||||
}
|
||||
if let controlTimebase = videoOutput?.displayLayer.controlTimebase, options.startPlayTime > 1 {
|
||||
CMTimebaseSetTime(controlTimebase, time: CMTimeMake(value: Int64(options.startPlayTime), timescale: 1))
|
||||
}
|
||||
delegate?.readyToPlay(player: self)
|
||||
}
|
||||
}
|
||||
|
||||
func sourceDidFailed(error: NSError?) {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
self.delegate?.finish(player: self, error: error)
|
||||
}
|
||||
}
|
||||
|
||||
func sourceDidFinished() {
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
if self.options.isLoopPlay {
|
||||
self.loopCount += 1
|
||||
self.delegate?.playBack(player: self, loopCount: self.loopCount)
|
||||
self.audioOutput.play()
|
||||
self.videoOutput?.play()
|
||||
} else {
|
||||
self.playbackState = .finished
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sourceDidChange(loadingState: LoadingState) {
|
||||
if loadingState.isEndOfFile {
|
||||
playableTime = duration
|
||||
} else {
|
||||
playableTime = currentPlaybackTime + loadingState.loadedTime
|
||||
}
|
||||
if loadState == .playable {
|
||||
if !loadingState.isEndOfFile, loadingState.frameCount == 0, loadingState.packetCount == 0, options.preferredForwardBufferDuration != 0 {
|
||||
loadState = .loading
|
||||
if playbackState == .playing {
|
||||
runOnMainThread { [weak self] in
|
||||
// 在主线程更新进度
|
||||
self?.bufferingProgress = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if loadingState.isFirst {
|
||||
if videoOutput?.pixelBuffer == nil {
|
||||
videoOutput?.readNextFrame()
|
||||
}
|
||||
}
|
||||
var progress = 100
|
||||
if loadingState.isPlayable {
|
||||
loadState = .playable
|
||||
} else {
|
||||
if loadingState.progress.isInfinite {
|
||||
progress = 100
|
||||
} else if loadingState.progress.isNaN {
|
||||
progress = 0
|
||||
} else {
|
||||
progress = min(100, Int(loadingState.progress))
|
||||
}
|
||||
}
|
||||
if playbackState == .playing {
|
||||
runOnMainThread { [weak self] in
|
||||
// 在主线程更新进度
|
||||
self?.bufferingProgress = progress
|
||||
}
|
||||
}
|
||||
}
|
||||
if duration == 0, playbackState == .playing, loadState == .playable {
|
||||
if let rate = options.liveAdaptivePlaybackRate(loadingState: loadingState) {
|
||||
playbackRate = rate
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sourceDidChange(oldBitRate: Int64, newBitrate: Int64) {
|
||||
KSLog("oldBitRate \(oldBitRate) change to newBitrate \(newBitrate)")
|
||||
}
|
||||
}
|
||||
|
||||
extension KSMEPlayer: MediaPlayerProtocol {
|
||||
public var chapters: [Chapter] {
|
||||
playerItem.chapters
|
||||
}
|
||||
|
||||
public var subtitleDataSouce: SubtitleDataSouce? { self }
|
||||
public var playbackVolume: Float {
|
||||
get {
|
||||
audioOutput.volume
|
||||
}
|
||||
set {
|
||||
audioOutput.volume = newValue
|
||||
}
|
||||
}
|
||||
|
||||
public var isPlaying: Bool { playbackState == .playing }
|
||||
|
||||
@MainActor
|
||||
public var naturalSize: CGSize {
|
||||
options.display == .plane ? playerItem.naturalSize : KSOptions.sceneSize
|
||||
}
|
||||
|
||||
public var isExternalPlaybackActive: Bool { false }
|
||||
|
||||
public var view: UIView? { videoOutput }
|
||||
|
||||
public func replace(url: URL, options: KSOptions) {
|
||||
KSLog("replaceUrl \(self)")
|
||||
shutdown()
|
||||
playerItem.delegate = nil
|
||||
playerItem = MEPlayerItem(url: url, options: options)
|
||||
if options.videoDisable {
|
||||
videoOutput = nil
|
||||
} else if videoOutput == nil {
|
||||
videoOutput = KSOptions.videoPlayerType.init(options: options)
|
||||
videoOutput?.displayLayerDelegate = self
|
||||
}
|
||||
self.options = options
|
||||
playerItem.delegate = self
|
||||
audioOutput.flush()
|
||||
audioOutput.renderSource = playerItem
|
||||
videoOutput?.renderSource = playerItem
|
||||
videoOutput?.options = options
|
||||
}
|
||||
|
||||
public var currentPlaybackTime: TimeInterval {
|
||||
get {
|
||||
playerItem.currentPlaybackTime
|
||||
}
|
||||
set {
|
||||
seek(time: newValue) { _ in }
|
||||
}
|
||||
}
|
||||
|
||||
public var duration: TimeInterval { playerItem.duration }
|
||||
|
||||
public var fileSize: Double { playerItem.fileSize }
|
||||
|
||||
public var seekable: Bool { playerItem.seekable }
|
||||
|
||||
public var dynamicInfo: DynamicInfo? {
|
||||
playerItem.dynamicInfo
|
||||
}
|
||||
|
||||
public func seek(time: TimeInterval, completion: @escaping ((Bool) -> Void)) {
|
||||
let time = max(time, 0)
|
||||
playbackState = .seeking
|
||||
runOnMainThread { [weak self] in
|
||||
self?.bufferingProgress = 0
|
||||
}
|
||||
let seekTime: TimeInterval
|
||||
if time >= duration, options.isLoopPlay {
|
||||
seekTime = 0
|
||||
} else {
|
||||
seekTime = time
|
||||
}
|
||||
playerItem.seek(time: seekTime) { [weak self] result in
|
||||
guard let self else { return }
|
||||
if result {
|
||||
self.audioOutput.flush()
|
||||
runOnMainThread { [weak self] in
|
||||
guard let self else { return }
|
||||
if let controlTimebase = self.videoOutput?.displayLayer.controlTimebase {
|
||||
CMTimebaseSetTime(controlTimebase, time: CMTimeMake(value: Int64(self.currentPlaybackTime), timescale: 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
completion(result)
|
||||
}
|
||||
}
|
||||
|
||||
public func prepareToPlay() {
|
||||
KSLog("prepareToPlay \(self)")
|
||||
options.prepareTime = CACurrentMediaTime()
|
||||
playerItem.prepareToPlay()
|
||||
bufferingProgress = 0
|
||||
}
|
||||
|
||||
public func play() {
|
||||
KSLog("play \(self)")
|
||||
playbackState = .playing
|
||||
if #available(iOS 15.0, tvOS 15.0, macOS 12.0, *) {
|
||||
pipController?.invalidatePlaybackState()
|
||||
}
|
||||
}
|
||||
|
||||
public func pause() {
|
||||
KSLog("pause \(self)")
|
||||
playbackState = .paused
|
||||
if #available(iOS 15.0, tvOS 15.0, macOS 12.0, *) {
|
||||
pipController?.invalidatePlaybackState()
|
||||
}
|
||||
}
|
||||
|
||||
public func shutdown() {
|
||||
KSLog("shutdown \(self)")
|
||||
playbackState = .stopped
|
||||
loadState = .idle
|
||||
isReadyToPlay = false
|
||||
loopCount = 0
|
||||
playerItem.shutdown()
|
||||
options.prepareTime = 0
|
||||
options.dnsStartTime = 0
|
||||
options.tcpStartTime = 0
|
||||
options.tcpConnectedTime = 0
|
||||
options.openTime = 0
|
||||
options.findTime = 0
|
||||
options.readyTime = 0
|
||||
options.readAudioTime = 0
|
||||
options.readVideoTime = 0
|
||||
options.decodeAudioTime = 0
|
||||
options.decodeVideoTime = 0
|
||||
if KSOptions.isClearVideoWhereReplace {
|
||||
videoOutput?.flush()
|
||||
}
|
||||
}
|
||||
|
||||
@MainActor
|
||||
public var contentMode: UIViewContentMode {
|
||||
get {
|
||||
view?.contentMode ?? .center
|
||||
}
|
||||
set {
|
||||
view?.contentMode = newValue
|
||||
}
|
||||
}
|
||||
|
||||
public func thumbnailImageAtCurrentTime() async -> CGImage? {
|
||||
videoOutput?.pixelBuffer?.cgImage()
|
||||
}
|
||||
|
||||
public func enterBackground() {}
|
||||
|
||||
public func enterForeground() {}
|
||||
|
||||
public var isMuted: Bool {
|
||||
get {
|
||||
audioOutput.isMuted
|
||||
}
|
||||
set {
|
||||
audioOutput.isMuted = newValue
|
||||
}
|
||||
}
|
||||
|
||||
public func tracks(mediaType: AVFoundation.AVMediaType) -> [MediaPlayerTrack] {
|
||||
playerItem.assetTracks.compactMap { track -> MediaPlayerTrack? in
|
||||
if track.mediaType == mediaType {
|
||||
return track
|
||||
} else if mediaType == .subtitle {
|
||||
return track.closedCaptionsTrack
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
public func select(track: some MediaPlayerTrack) {
|
||||
let isSeek = playerItem.select(track: track)
|
||||
if isSeek {
|
||||
audioOutput.flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@available(tvOS 14.0, *)
|
||||
extension KSMEPlayer: AVPictureInPictureSampleBufferPlaybackDelegate {
|
||||
public func pictureInPictureController(_: AVPictureInPictureController, setPlaying playing: Bool) {
|
||||
playing ? play() : pause()
|
||||
}
|
||||
|
||||
public func pictureInPictureControllerTimeRangeForPlayback(_: AVPictureInPictureController) -> CMTimeRange {
|
||||
// Handle live streams.
|
||||
if duration == 0 {
|
||||
return CMTimeRange(start: .negativeInfinity, duration: .positiveInfinity)
|
||||
}
|
||||
return CMTimeRange(start: 0, end: duration)
|
||||
}
|
||||
|
||||
public func pictureInPictureControllerIsPlaybackPaused(_: AVPictureInPictureController) -> Bool {
|
||||
!isPlaying
|
||||
}
|
||||
|
||||
public func pictureInPictureController(_: AVPictureInPictureController, didTransitionToRenderSize _: CMVideoDimensions) {}
|
||||
public func pictureInPictureController(_: AVPictureInPictureController, skipByInterval skipInterval: CMTime) async {
|
||||
seek(time: currentPlaybackTime + skipInterval.seconds) { _ in }
|
||||
}
|
||||
|
||||
public func pictureInPictureControllerShouldProhibitBackgroundAudioPlayback(_: AVPictureInPictureController) -> Bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 12.0, iOS 15.0, tvOS 15.0, *)
|
||||
extension KSMEPlayer: AVPlaybackCoordinatorPlaybackControlDelegate {
|
||||
public func playbackCoordinator(_: AVDelegatingPlaybackCoordinator, didIssue playCommand: AVDelegatingPlaybackCoordinatorPlayCommand, completionHandler: @escaping () -> Void) {
|
||||
guard playCommand.expectedCurrentItemIdentifier == (playbackCoordinator as? AVDelegatingPlaybackCoordinator)?.currentItemIdentifier else {
|
||||
completionHandler()
|
||||
return
|
||||
}
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
if self.playbackState != .playing {
|
||||
self.play()
|
||||
}
|
||||
completionHandler()
|
||||
}
|
||||
}
|
||||
|
||||
public func playbackCoordinator(_: AVDelegatingPlaybackCoordinator, didIssue pauseCommand: AVDelegatingPlaybackCoordinatorPauseCommand, completionHandler: @escaping () -> Void) {
|
||||
guard pauseCommand.expectedCurrentItemIdentifier == (playbackCoordinator as? AVDelegatingPlaybackCoordinator)?.currentItemIdentifier else {
|
||||
completionHandler()
|
||||
return
|
||||
}
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
if self.playbackState != .paused {
|
||||
self.pause()
|
||||
}
|
||||
completionHandler()
|
||||
}
|
||||
}
|
||||
|
||||
public func playbackCoordinator(_: AVDelegatingPlaybackCoordinator, didIssue seekCommand: AVDelegatingPlaybackCoordinatorSeekCommand) async {
|
||||
guard seekCommand.expectedCurrentItemIdentifier == (playbackCoordinator as? AVDelegatingPlaybackCoordinator)?.currentItemIdentifier else {
|
||||
return
|
||||
}
|
||||
let seekTime = fmod(seekCommand.itemTime.seconds, duration)
|
||||
if abs(currentPlaybackTime - seekTime) < CGFLOAT_EPSILON {
|
||||
return
|
||||
}
|
||||
seek(time: seekTime) { _ in }
|
||||
}
|
||||
|
||||
public func playbackCoordinator(_: AVDelegatingPlaybackCoordinator, didIssue bufferingCommand: AVDelegatingPlaybackCoordinatorBufferingCommand, completionHandler: @escaping () -> Void) {
|
||||
guard bufferingCommand.expectedCurrentItemIdentifier == (playbackCoordinator as? AVDelegatingPlaybackCoordinator)?.currentItemIdentifier else {
|
||||
completionHandler()
|
||||
return
|
||||
}
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
guard self.loadState != .playable, let countDown = bufferingCommand.completionDueDate?.timeIntervalSinceNow else {
|
||||
completionHandler()
|
||||
return
|
||||
}
|
||||
self.bufferingCountDownTimer?.invalidate()
|
||||
self.bufferingCountDownTimer = nil
|
||||
self.bufferingCountDownTimer = Timer(timeInterval: countDown, repeats: false) { _ in
|
||||
completionHandler()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension KSMEPlayer: DisplayLayerDelegate {
|
||||
public func change(displayLayer: AVSampleBufferDisplayLayer) {
|
||||
if #available(iOS 15.0, tvOS 15.0, macOS 12.0, *) {
|
||||
let contentSource = AVPictureInPictureController.ContentSource(sampleBufferDisplayLayer: displayLayer, playbackDelegate: self)
|
||||
_pipController = KSPictureInPictureController(contentSource: contentSource)
|
||||
// 更改contentSource会直接crash
|
||||
// pipController?.contentSource = contentSource
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public extension KSMEPlayer {
|
||||
func startRecord(url: URL) {
|
||||
playerItem.startRecord(url: url)
|
||||
}
|
||||
|
||||
func stoptRecord() {
|
||||
playerItem.stopRecord()
|
||||
}
|
||||
}
|
||||
881
KSPlayer-main/Sources/KSPlayer/MEPlayer/MEPlayerItem.swift
Normal file
881
KSPlayer-main/Sources/KSPlayer/MEPlayer/MEPlayerItem.swift
Normal file
@@ -0,0 +1,881 @@
|
||||
//
|
||||
// MEPlayerItem.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/9.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import FFmpegKit
|
||||
import Libavcodec
|
||||
import Libavfilter
|
||||
import Libavformat
|
||||
|
||||
public final class MEPlayerItem: Sendable {
|
||||
private let url: URL
|
||||
private let options: KSOptions
|
||||
private let operationQueue = OperationQueue()
|
||||
private let condition = NSCondition()
|
||||
private var formatCtx: UnsafeMutablePointer<AVFormatContext>?
|
||||
private var outputFormatCtx: UnsafeMutablePointer<AVFormatContext>?
|
||||
private var outputPacket: UnsafeMutablePointer<AVPacket>?
|
||||
private var streamMapping = [Int: Int]()
|
||||
private var openOperation: BlockOperation?
|
||||
private var readOperation: BlockOperation?
|
||||
private var closeOperation: BlockOperation?
|
||||
private var seekingCompletionHandler: ((Bool) -> Void)?
|
||||
// 没有音频数据可以渲染
|
||||
private var isAudioStalled = true
|
||||
private var audioClock = KSClock()
|
||||
private var videoClock = KSClock()
|
||||
private var isFirst = true
|
||||
private var isSeek = false
|
||||
private var allPlayerItemTracks = [PlayerItemTrackProtocol]()
|
||||
private var maxFrameDuration = 10.0
|
||||
private var videoAudioTracks = [CapacityProtocol]()
|
||||
private var videoTrack: SyncPlayerItemTrack<VideoVTBFrame>?
|
||||
private var audioTrack: SyncPlayerItemTrack<AudioFrame>?
|
||||
private(set) var assetTracks = [FFmpegAssetTrack]()
|
||||
private var videoAdaptation: VideoAdaptationState?
|
||||
private var videoDisplayCount = UInt8(0)
|
||||
private var seekByBytes = false
|
||||
private var lastVideoDisplayTime = CACurrentMediaTime()
|
||||
public private(set) var chapters: [Chapter] = []
|
||||
public var currentPlaybackTime: TimeInterval {
|
||||
state == .seeking ? seekTime : (mainClock().time - startTime).seconds
|
||||
}
|
||||
|
||||
private var seekTime = TimeInterval(0)
|
||||
private var startTime = CMTime.zero
|
||||
public private(set) var duration: TimeInterval = 0
|
||||
public private(set) var fileSize: Double = 0
|
||||
public private(set) var naturalSize = CGSize.zero
|
||||
private var error: NSError? {
|
||||
didSet {
|
||||
if error != nil {
|
||||
state = .failed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private var state = MESourceState.idle {
|
||||
didSet {
|
||||
switch state {
|
||||
case .opened:
|
||||
delegate?.sourceDidOpened()
|
||||
case .reading:
|
||||
timer.fireDate = Date.distantPast
|
||||
case .closed:
|
||||
timer.invalidate()
|
||||
case .failed:
|
||||
delegate?.sourceDidFailed(error: error)
|
||||
timer.fireDate = Date.distantFuture
|
||||
case .idle, .opening, .seeking, .paused, .finished:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private lazy var timer: Timer = .scheduledTimer(withTimeInterval: 0.05, repeats: true) { [weak self] _ in
|
||||
self?.codecDidChangeCapacity()
|
||||
}
|
||||
|
||||
lazy var dynamicInfo = DynamicInfo { [weak self] in
|
||||
// metadata可能会实时变化。所以把它放在DynamicInfo里面
|
||||
toDictionary(self?.formatCtx?.pointee.metadata)
|
||||
} bytesRead: { [weak self] in
|
||||
self?.formatCtx?.pointee.pb?.pointee.bytes_read ?? 0
|
||||
} audioBitrate: { [weak self] in
|
||||
Int(8 * (self?.audioTrack?.bitrate ?? 0))
|
||||
} videoBitrate: { [weak self] in
|
||||
Int(8 * (self?.videoTrack?.bitrate ?? 0))
|
||||
}
|
||||
|
||||
private static var onceInitial: Void = {
|
||||
var result = avformat_network_init()
|
||||
av_log_set_callback { ptr, level, format, args in
|
||||
guard let format else {
|
||||
return
|
||||
}
|
||||
var log = String(cString: format)
|
||||
let arguments: CVaListPointer? = args
|
||||
if let arguments {
|
||||
log = NSString(format: log, arguments: arguments) as String
|
||||
}
|
||||
if let ptr {
|
||||
let avclass = ptr.assumingMemoryBound(to: UnsafePointer<AVClass>.self).pointee
|
||||
if avclass == avfilter_get_class() {
|
||||
let context = ptr.assumingMemoryBound(to: AVFilterContext.self).pointee
|
||||
if let opaque = context.graph?.pointee.opaque {
|
||||
let options = Unmanaged<KSOptions>.fromOpaque(opaque).takeUnretainedValue()
|
||||
options.filter(log: log)
|
||||
}
|
||||
}
|
||||
}
|
||||
// 找不到解码器
|
||||
if log.hasPrefix("parser not found for codec") {
|
||||
KSLog(level: .error, log)
|
||||
}
|
||||
KSLog(level: LogLevel(rawValue: level) ?? .warning, log)
|
||||
}
|
||||
}()
|
||||
|
||||
weak var delegate: MEPlayerDelegate?
|
||||
public init(url: URL, options: KSOptions) {
|
||||
self.url = url
|
||||
self.options = options
|
||||
timer.fireDate = Date.distantFuture
|
||||
operationQueue.name = "KSPlayer_" + String(describing: self).components(separatedBy: ".").last!
|
||||
operationQueue.maxConcurrentOperationCount = 1
|
||||
operationQueue.qualityOfService = .userInteractive
|
||||
_ = MEPlayerItem.onceInitial
|
||||
}
|
||||
|
||||
func select(track: some MediaPlayerTrack) -> Bool {
|
||||
if track.isEnabled {
|
||||
return false
|
||||
}
|
||||
assetTracks.filter { $0.mediaType == track.mediaType }.forEach {
|
||||
$0.isEnabled = track === $0
|
||||
}
|
||||
guard let assetTrack = track as? FFmpegAssetTrack else {
|
||||
return false
|
||||
}
|
||||
if assetTrack.mediaType == .video {
|
||||
findBestAudio(videoTrack: assetTrack)
|
||||
} else if assetTrack.mediaType == .subtitle {
|
||||
if assetTrack.isImageSubtitle {
|
||||
if !options.isSeekImageSubtitle {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
seek(time: currentPlaybackTime) { _ in
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: private functions
|
||||
|
||||
extension MEPlayerItem {
|
||||
private func openThread() {
|
||||
avformat_close_input(&self.formatCtx)
|
||||
formatCtx = avformat_alloc_context()
|
||||
guard let formatCtx else {
|
||||
error = NSError(errorCode: .formatCreate)
|
||||
return
|
||||
}
|
||||
var interruptCB = AVIOInterruptCB()
|
||||
interruptCB.opaque = Unmanaged.passUnretained(self).toOpaque()
|
||||
interruptCB.callback = { ctx -> Int32 in
|
||||
guard let ctx else {
|
||||
return 0
|
||||
}
|
||||
let formatContext = Unmanaged<MEPlayerItem>.fromOpaque(ctx).takeUnretainedValue()
|
||||
switch formatContext.state {
|
||||
case .finished, .closed, .failed:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
formatCtx.pointee.interrupt_callback = interruptCB
|
||||
// avformat_close_input这个函数会调用io_close2。但是自定义协议是不会调用io_close2这个函数
|
||||
// formatCtx.pointee.io_close2 = { _, _ -> Int32 in
|
||||
// 0
|
||||
// }
|
||||
setHttpProxy()
|
||||
var avOptions = options.formatContextOptions.avOptions
|
||||
if let pb = options.process(url: url) {
|
||||
// 如果要自定义协议的话,那就用avio_alloc_context,对formatCtx.pointee.pb赋值
|
||||
formatCtx.pointee.pb = pb.getContext()
|
||||
}
|
||||
let urlString: String
|
||||
if url.isFileURL {
|
||||
urlString = url.path
|
||||
} else {
|
||||
urlString = url.absoluteString
|
||||
}
|
||||
var result = avformat_open_input(&self.formatCtx, urlString, nil, &avOptions)
|
||||
av_dict_free(&avOptions)
|
||||
if result == AVError.eof.code {
|
||||
state = .finished
|
||||
delegate?.sourceDidFinished()
|
||||
return
|
||||
}
|
||||
guard result == 0 else {
|
||||
error = .init(errorCode: .formatOpenInput, avErrorCode: result)
|
||||
avformat_close_input(&self.formatCtx)
|
||||
return
|
||||
}
|
||||
options.openTime = CACurrentMediaTime()
|
||||
formatCtx.pointee.flags |= AVFMT_FLAG_GENPTS
|
||||
if options.nobuffer {
|
||||
formatCtx.pointee.flags |= AVFMT_FLAG_NOBUFFER
|
||||
}
|
||||
if let probesize = options.probesize {
|
||||
formatCtx.pointee.probesize = probesize
|
||||
}
|
||||
if let maxAnalyzeDuration = options.maxAnalyzeDuration {
|
||||
formatCtx.pointee.max_analyze_duration = maxAnalyzeDuration
|
||||
}
|
||||
result = avformat_find_stream_info(formatCtx, nil)
|
||||
guard result == 0 else {
|
||||
error = .init(errorCode: .formatFindStreamInfo, avErrorCode: result)
|
||||
avformat_close_input(&self.formatCtx)
|
||||
return
|
||||
}
|
||||
// FIXME: hack, ffplay maybe should not use avio_feof() to test for the end
|
||||
formatCtx.pointee.pb?.pointee.eof_reached = 0
|
||||
let flags = formatCtx.pointee.iformat.pointee.flags
|
||||
maxFrameDuration = flags & AVFMT_TS_DISCONT == AVFMT_TS_DISCONT ? 10.0 : 3600.0
|
||||
options.findTime = CACurrentMediaTime()
|
||||
options.formatName = String(cString: formatCtx.pointee.iformat.pointee.name)
|
||||
seekByBytes = (flags & AVFMT_NO_BYTE_SEEK == 0) && (flags & AVFMT_TS_DISCONT != 0) && options.formatName != "ogg"
|
||||
if formatCtx.pointee.start_time != Int64.min {
|
||||
startTime = CMTime(value: formatCtx.pointee.start_time, timescale: AV_TIME_BASE)
|
||||
videoClock.time = startTime
|
||||
audioClock.time = startTime
|
||||
}
|
||||
duration = TimeInterval(max(formatCtx.pointee.duration, 0) / Int64(AV_TIME_BASE))
|
||||
fileSize = Double(formatCtx.pointee.bit_rate) * duration / 8
|
||||
createCodec(formatCtx: formatCtx)
|
||||
if formatCtx.pointee.nb_chapters > 0 {
|
||||
chapters.removeAll()
|
||||
for i in 0 ..< formatCtx.pointee.nb_chapters {
|
||||
if let chapter = formatCtx.pointee.chapters[Int(i)]?.pointee {
|
||||
let timeBase = Timebase(chapter.time_base)
|
||||
let start = timeBase.cmtime(for: chapter.start).seconds
|
||||
let end = timeBase.cmtime(for: chapter.end).seconds
|
||||
let metadata = toDictionary(chapter.metadata)
|
||||
let title = metadata["title"] ?? ""
|
||||
chapters.append(Chapter(start: start, end: end, title: title))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let outputURL = options.outputURL {
|
||||
startRecord(url: outputURL)
|
||||
}
|
||||
if videoTrack == nil, audioTrack == nil {
|
||||
state = .failed
|
||||
} else {
|
||||
state = .opened
|
||||
read()
|
||||
}
|
||||
}
|
||||
|
||||
func startRecord(url: URL) {
|
||||
stopRecord()
|
||||
let filename = url.isFileURL ? url.path : url.absoluteString
|
||||
var ret = avformat_alloc_output_context2(&outputFormatCtx, nil, nil, filename)
|
||||
guard let outputFormatCtx, let formatCtx else {
|
||||
KSLog(NSError(errorCode: .formatOutputCreate, avErrorCode: ret))
|
||||
return
|
||||
}
|
||||
var index = 0
|
||||
var audioIndex: Int?
|
||||
var videoIndex: Int?
|
||||
let formatName = outputFormatCtx.pointee.oformat.pointee.name.flatMap { String(cString: $0) }
|
||||
for i in 0 ..< Int(formatCtx.pointee.nb_streams) {
|
||||
if let inputStream = formatCtx.pointee.streams[i] {
|
||||
let codecType = inputStream.pointee.codecpar.pointee.codec_type
|
||||
if [AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_SUBTITLE].contains(codecType) {
|
||||
if codecType == AVMEDIA_TYPE_AUDIO {
|
||||
if let audioIndex {
|
||||
streamMapping[i] = audioIndex
|
||||
continue
|
||||
} else {
|
||||
audioIndex = index
|
||||
}
|
||||
} else if codecType == AVMEDIA_TYPE_VIDEO {
|
||||
if let videoIndex {
|
||||
streamMapping[i] = videoIndex
|
||||
continue
|
||||
} else {
|
||||
videoIndex = index
|
||||
}
|
||||
}
|
||||
if let outStream = avformat_new_stream(outputFormatCtx, nil) {
|
||||
streamMapping[i] = index
|
||||
index += 1
|
||||
avcodec_parameters_copy(outStream.pointee.codecpar, inputStream.pointee.codecpar)
|
||||
if codecType == AVMEDIA_TYPE_SUBTITLE, formatName == "mp4" || formatName == "mov" {
|
||||
outStream.pointee.codecpar.pointee.codec_id = AV_CODEC_ID_MOV_TEXT
|
||||
}
|
||||
if inputStream.pointee.codecpar.pointee.codec_id == AV_CODEC_ID_HEVC {
|
||||
outStream.pointee.codecpar.pointee.codec_tag = CMFormatDescription.MediaSubType.hevc.rawValue.bigEndian
|
||||
} else {
|
||||
outStream.pointee.codecpar.pointee.codec_tag = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
avio_open(&(outputFormatCtx.pointee.pb), filename, AVIO_FLAG_WRITE)
|
||||
ret = avformat_write_header(outputFormatCtx, nil)
|
||||
guard ret >= 0 else {
|
||||
KSLog(NSError(errorCode: .formatWriteHeader, avErrorCode: ret))
|
||||
avformat_close_input(&self.outputFormatCtx)
|
||||
return
|
||||
}
|
||||
outputPacket = av_packet_alloc()
|
||||
}
|
||||
|
||||
private func createCodec(formatCtx: UnsafeMutablePointer<AVFormatContext>) {
|
||||
allPlayerItemTracks.removeAll()
|
||||
assetTracks.removeAll()
|
||||
videoAdaptation = nil
|
||||
videoTrack = nil
|
||||
audioTrack = nil
|
||||
videoAudioTracks.removeAll()
|
||||
assetTracks = (0 ..< Int(formatCtx.pointee.nb_streams)).compactMap { i in
|
||||
if let coreStream = formatCtx.pointee.streams[i] {
|
||||
coreStream.pointee.discard = AVDISCARD_ALL
|
||||
if let assetTrack = FFmpegAssetTrack(stream: coreStream) {
|
||||
if assetTrack.mediaType == .subtitle {
|
||||
let subtitle = SyncPlayerItemTrack<SubtitleFrame>(mediaType: .subtitle, frameCapacity: 255, options: options)
|
||||
assetTrack.subtitle = subtitle
|
||||
allPlayerItemTracks.append(subtitle)
|
||||
}
|
||||
assetTrack.seekByBytes = seekByBytes
|
||||
return assetTrack
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var videoIndex: Int32 = -1
|
||||
if !options.videoDisable {
|
||||
let videos = assetTracks.filter { $0.mediaType == .video }
|
||||
let wantedStreamNb: Int32
|
||||
if !videos.isEmpty, let index = options.wantedVideo(tracks: videos) {
|
||||
wantedStreamNb = videos[index].trackID
|
||||
} else {
|
||||
wantedStreamNb = -1
|
||||
}
|
||||
videoIndex = av_find_best_stream(formatCtx, AVMEDIA_TYPE_VIDEO, wantedStreamNb, -1, nil, 0)
|
||||
if let first = videos.first(where: { $0.trackID == videoIndex }) {
|
||||
first.isEnabled = true
|
||||
let rotation = first.rotation
|
||||
if rotation > 0, options.autoRotate {
|
||||
options.hardwareDecode = false
|
||||
if abs(rotation - 90) <= 1 {
|
||||
options.videoFilters.append("transpose=clock")
|
||||
} else if abs(rotation - 180) <= 1 {
|
||||
options.videoFilters.append("hflip")
|
||||
options.videoFilters.append("vflip")
|
||||
} else if abs(rotation - 270) <= 1 {
|
||||
options.videoFilters.append("transpose=cclock")
|
||||
} else if abs(rotation) > 1 {
|
||||
options.videoFilters.append("rotate=\(rotation)*PI/180")
|
||||
}
|
||||
}
|
||||
naturalSize = abs(rotation - 90) <= 1 || abs(rotation - 270) <= 1 ? first.naturalSize.reverse : first.naturalSize
|
||||
options.process(assetTrack: first)
|
||||
let frameCapacity = options.videoFrameMaxCount(fps: first.nominalFrameRate, naturalSize: naturalSize, isLive: duration == 0)
|
||||
let track = options.syncDecodeVideo ? SyncPlayerItemTrack<VideoVTBFrame>(mediaType: .video, frameCapacity: frameCapacity, options: options) : AsyncPlayerItemTrack<VideoVTBFrame>(mediaType: .video, frameCapacity: frameCapacity, options: options)
|
||||
track.delegate = self
|
||||
allPlayerItemTracks.append(track)
|
||||
videoTrack = track
|
||||
if first.codecpar.codec_id != AV_CODEC_ID_MJPEG {
|
||||
videoAudioTracks.append(track)
|
||||
}
|
||||
let bitRates = videos.map(\.bitRate).filter {
|
||||
$0 > 0
|
||||
}
|
||||
if bitRates.count > 1, options.videoAdaptable {
|
||||
let bitRateState = VideoAdaptationState.BitRateState(bitRate: first.bitRate, time: CACurrentMediaTime())
|
||||
videoAdaptation = VideoAdaptationState(bitRates: bitRates.sorted(by: <), duration: duration, fps: first.nominalFrameRate, bitRateStates: [bitRateState])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let audios = assetTracks.filter { $0.mediaType == .audio }
|
||||
let wantedStreamNb: Int32
|
||||
if !audios.isEmpty, let index = options.wantedAudio(tracks: audios) {
|
||||
wantedStreamNb = audios[index].trackID
|
||||
} else {
|
||||
wantedStreamNb = -1
|
||||
}
|
||||
let index = av_find_best_stream(formatCtx, AVMEDIA_TYPE_AUDIO, wantedStreamNb, videoIndex, nil, 0)
|
||||
if let first = audios.first(where: {
|
||||
index > 0 ? $0.trackID == index : true
|
||||
}), first.codecpar.codec_id != AV_CODEC_ID_NONE {
|
||||
first.isEnabled = true
|
||||
options.process(assetTrack: first)
|
||||
// 音频要比较所有的音轨,因为truehd的fps是1200,跟其他的音轨差距太大了
|
||||
let fps = audios.map(\.nominalFrameRate).max() ?? 44
|
||||
let frameCapacity = options.audioFrameMaxCount(fps: fps, channelCount: Int(first.audioDescriptor?.audioFormat.channelCount ?? 2))
|
||||
let track = options.syncDecodeAudio ? SyncPlayerItemTrack<AudioFrame>(mediaType: .audio, frameCapacity: frameCapacity, options: options) : AsyncPlayerItemTrack<AudioFrame>(mediaType: .audio, frameCapacity: frameCapacity, options: options)
|
||||
track.delegate = self
|
||||
allPlayerItemTracks.append(track)
|
||||
audioTrack = track
|
||||
videoAudioTracks.append(track)
|
||||
isAudioStalled = false
|
||||
}
|
||||
}
|
||||
|
||||
private func read() {
|
||||
readOperation = BlockOperation { [weak self] in
|
||||
guard let self else { return }
|
||||
Thread.current.name = (self.operationQueue.name ?? "") + "_read"
|
||||
Thread.current.stackSize = KSOptions.stackSize
|
||||
self.readThread()
|
||||
}
|
||||
readOperation?.queuePriority = .veryHigh
|
||||
readOperation?.qualityOfService = .userInteractive
|
||||
if let readOperation {
|
||||
operationQueue.addOperation(readOperation)
|
||||
}
|
||||
}
|
||||
|
||||
private func readThread() {
|
||||
if state == .opened {
|
||||
if options.startPlayTime > 0 {
|
||||
let timestamp = startTime + CMTime(seconds: options.startPlayTime)
|
||||
let flags = seekByBytes ? AVSEEK_FLAG_BYTE : 0
|
||||
let seekStartTime = CACurrentMediaTime()
|
||||
let result = avformat_seek_file(formatCtx, -1, Int64.min, timestamp.value, Int64.max, flags)
|
||||
audioClock.time = timestamp
|
||||
videoClock.time = timestamp
|
||||
KSLog("start PlayTime: \(timestamp.seconds) spend Time: \(CACurrentMediaTime() - seekStartTime)")
|
||||
}
|
||||
state = .reading
|
||||
}
|
||||
allPlayerItemTracks.forEach { $0.decode() }
|
||||
while [MESourceState.paused, .seeking, .reading].contains(state) {
|
||||
if state == .paused {
|
||||
condition.wait()
|
||||
}
|
||||
if state == .seeking {
|
||||
let seekToTime = seekTime
|
||||
let time = mainClock().time
|
||||
var increase = Int64(seekTime + startTime.seconds - time.seconds)
|
||||
var seekFlags = options.seekFlags
|
||||
let timeStamp: Int64
|
||||
if seekByBytes {
|
||||
seekFlags |= AVSEEK_FLAG_BYTE
|
||||
if let bitRate = formatCtx?.pointee.bit_rate {
|
||||
increase = increase * bitRate / 8
|
||||
} else {
|
||||
increase *= 180_000
|
||||
}
|
||||
var position = Int64(-1)
|
||||
if position < 0 {
|
||||
position = videoClock.position
|
||||
}
|
||||
if position < 0 {
|
||||
position = audioClock.position
|
||||
}
|
||||
if position < 0 {
|
||||
position = avio_tell(formatCtx?.pointee.pb)
|
||||
}
|
||||
timeStamp = position + increase
|
||||
} else {
|
||||
increase *= Int64(AV_TIME_BASE)
|
||||
timeStamp = Int64(time.seconds) * Int64(AV_TIME_BASE) + increase
|
||||
}
|
||||
let seekMin = increase > 0 ? timeStamp - increase + 2 : Int64.min
|
||||
let seekMax = increase < 0 ? timeStamp - increase - 2 : Int64.max
|
||||
// can not seek to key frame
|
||||
let seekStartTime = CACurrentMediaTime()
|
||||
var result = avformat_seek_file(formatCtx, -1, seekMin, timeStamp, seekMax, seekFlags)
|
||||
// var result = av_seek_frame(formatCtx, -1, timeStamp, seekFlags)
|
||||
// When seeking before the beginning of the file, and seeking fails,
|
||||
// try again without the backwards flag to make it seek to the
|
||||
// beginning.
|
||||
if result < 0, seekFlags & AVSEEK_FLAG_BACKWARD == AVSEEK_FLAG_BACKWARD {
|
||||
KSLog("seek to \(seekToTime) failed. seekFlags remove BACKWARD")
|
||||
options.seekFlags &= ~AVSEEK_FLAG_BACKWARD
|
||||
seekFlags &= ~AVSEEK_FLAG_BACKWARD
|
||||
result = avformat_seek_file(formatCtx, -1, seekMin, timeStamp, seekMax, seekFlags)
|
||||
}
|
||||
KSLog("seek to \(seekToTime) spend Time: \(CACurrentMediaTime() - seekStartTime)")
|
||||
if state == .closed {
|
||||
break
|
||||
}
|
||||
if seekToTime != seekTime {
|
||||
continue
|
||||
}
|
||||
isSeek = true
|
||||
allPlayerItemTracks.forEach { $0.seek(time: seekToTime) }
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
guard let self else { return }
|
||||
self.seekingCompletionHandler?(result >= 0)
|
||||
self.seekingCompletionHandler = nil
|
||||
}
|
||||
audioClock.time = CMTime(seconds: seekToTime, preferredTimescale: time.timescale) + startTime
|
||||
videoClock.time = CMTime(seconds: seekToTime, preferredTimescale: time.timescale) + startTime
|
||||
state = .reading
|
||||
} else if state == .reading {
|
||||
autoreleasepool {
|
||||
_ = reading()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func reading() -> Int32 {
|
||||
let packet = Packet()
|
||||
guard let corePacket = packet.corePacket else {
|
||||
return 0
|
||||
}
|
||||
let readResult = av_read_frame(formatCtx, corePacket)
|
||||
if state == .closed {
|
||||
return 0
|
||||
}
|
||||
if readResult == 0 {
|
||||
if let outputFormatCtx, let formatCtx {
|
||||
let index = Int(corePacket.pointee.stream_index)
|
||||
if let outputIndex = streamMapping[index],
|
||||
let inputTb = formatCtx.pointee.streams[index]?.pointee.time_base,
|
||||
let outputTb = outputFormatCtx.pointee.streams[outputIndex]?.pointee.time_base,
|
||||
let outputPacket
|
||||
{
|
||||
av_packet_ref(outputPacket, corePacket)
|
||||
outputPacket.pointee.stream_index = Int32(outputIndex)
|
||||
av_packet_rescale_ts(outputPacket, inputTb, outputTb)
|
||||
outputPacket.pointee.pos = -1
|
||||
let ret = av_interleaved_write_frame(outputFormatCtx, outputPacket)
|
||||
if ret < 0 {
|
||||
KSLog("can not av_interleaved_write_frame")
|
||||
}
|
||||
}
|
||||
}
|
||||
if corePacket.pointee.size <= 0 {
|
||||
return 0
|
||||
}
|
||||
let first = assetTracks.first { $0.trackID == corePacket.pointee.stream_index }
|
||||
if let first, first.isEnabled {
|
||||
packet.assetTrack = first
|
||||
if first.mediaType == .video {
|
||||
if options.readVideoTime == 0 {
|
||||
options.readVideoTime = CACurrentMediaTime()
|
||||
}
|
||||
videoTrack?.putPacket(packet: packet)
|
||||
} else if first.mediaType == .audio {
|
||||
if options.readAudioTime == 0 {
|
||||
options.readAudioTime = CACurrentMediaTime()
|
||||
}
|
||||
audioTrack?.putPacket(packet: packet)
|
||||
} else {
|
||||
first.subtitle?.putPacket(packet: packet)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if readResult == AVError.eof.code || avio_feof(formatCtx?.pointee.pb) > 0 {
|
||||
if options.isLoopPlay, allPlayerItemTracks.allSatisfy({ !$0.isLoopModel }) {
|
||||
allPlayerItemTracks.forEach { $0.isLoopModel = true }
|
||||
_ = av_seek_frame(formatCtx, -1, startTime.value, AVSEEK_FLAG_BACKWARD)
|
||||
} else {
|
||||
allPlayerItemTracks.forEach { $0.isEndOfFile = true }
|
||||
state = .finished
|
||||
}
|
||||
} else {
|
||||
// if IS_AVERROR_INVALIDDATA(readResult)
|
||||
error = .init(errorCode: .readFrame, avErrorCode: readResult)
|
||||
}
|
||||
}
|
||||
return readResult
|
||||
}
|
||||
|
||||
private func pause() {
|
||||
if state == .reading {
|
||||
state = .paused
|
||||
}
|
||||
}
|
||||
|
||||
private func resume() {
|
||||
if state == .paused {
|
||||
state = .reading
|
||||
condition.signal()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: MediaPlayback
|
||||
|
||||
extension MEPlayerItem: MediaPlayback {
|
||||
var seekable: Bool {
|
||||
guard let formatCtx else {
|
||||
return false
|
||||
}
|
||||
var seekable = true
|
||||
if let ioContext = formatCtx.pointee.pb {
|
||||
seekable = ioContext.pointee.seekable > 0
|
||||
}
|
||||
return seekable
|
||||
}
|
||||
|
||||
public func prepareToPlay() {
|
||||
state = .opening
|
||||
openOperation = BlockOperation { [weak self] in
|
||||
guard let self else { return }
|
||||
Thread.current.name = (self.operationQueue.name ?? "") + "_open"
|
||||
Thread.current.stackSize = KSOptions.stackSize
|
||||
self.openThread()
|
||||
}
|
||||
openOperation?.queuePriority = .veryHigh
|
||||
openOperation?.qualityOfService = .userInteractive
|
||||
if let openOperation {
|
||||
operationQueue.addOperation(openOperation)
|
||||
}
|
||||
}
|
||||
|
||||
public func shutdown() {
|
||||
guard state != .closed else { return }
|
||||
state = .closed
|
||||
av_packet_free(&outputPacket)
|
||||
stopRecord()
|
||||
// 故意循环引用。等结束了。才释放
|
||||
let closeOperation = BlockOperation {
|
||||
Thread.current.name = (self.operationQueue.name ?? "") + "_close"
|
||||
self.allPlayerItemTracks.forEach { $0.shutdown() }
|
||||
KSLog("清空formatCtx")
|
||||
// 自定义的协议才会av_class为空
|
||||
if let formatCtx = self.formatCtx, (formatCtx.pointee.flags & AVFMT_FLAG_CUSTOM_IO) != 0, let opaque = formatCtx.pointee.pb.pointee.opaque {
|
||||
let value = Unmanaged<AbstractAVIOContext>.fromOpaque(opaque).takeRetainedValue()
|
||||
value.close()
|
||||
}
|
||||
// 不要自己来释放pb。不然第二次播放同一个url会出问题
|
||||
// self.formatCtx?.pointee.pb = nil
|
||||
self.formatCtx?.pointee.interrupt_callback.opaque = nil
|
||||
self.formatCtx?.pointee.interrupt_callback.callback = nil
|
||||
avformat_close_input(&self.formatCtx)
|
||||
avformat_close_input(&self.outputFormatCtx)
|
||||
self.duration = 0
|
||||
self.closeOperation = nil
|
||||
self.operationQueue.cancelAllOperations()
|
||||
}
|
||||
closeOperation.queuePriority = .veryHigh
|
||||
closeOperation.qualityOfService = .userInteractive
|
||||
if let readOperation {
|
||||
readOperation.cancel()
|
||||
closeOperation.addDependency(readOperation)
|
||||
} else if let openOperation {
|
||||
openOperation.cancel()
|
||||
closeOperation.addDependency(openOperation)
|
||||
}
|
||||
operationQueue.addOperation(closeOperation)
|
||||
condition.signal()
|
||||
if options.syncDecodeVideo || options.syncDecodeAudio {
|
||||
DispatchQueue.global().async { [weak self] in
|
||||
self?.allPlayerItemTracks.forEach { $0.shutdown() }
|
||||
}
|
||||
}
|
||||
self.closeOperation = closeOperation
|
||||
}
|
||||
|
||||
func stopRecord() {
|
||||
if let outputFormatCtx {
|
||||
av_write_trailer(outputFormatCtx)
|
||||
}
|
||||
}
|
||||
|
||||
public func seek(time: TimeInterval, completion: @escaping ((Bool) -> Void)) {
|
||||
if state == .reading || state == .paused {
|
||||
seekTime = time
|
||||
state = .seeking
|
||||
seekingCompletionHandler = completion
|
||||
condition.broadcast()
|
||||
allPlayerItemTracks.forEach { $0.seek(time: time) }
|
||||
} else if state == .finished {
|
||||
seekTime = time
|
||||
state = .seeking
|
||||
seekingCompletionHandler = completion
|
||||
read()
|
||||
} else if state == .seeking {
|
||||
seekTime = time
|
||||
seekingCompletionHandler = completion
|
||||
}
|
||||
isAudioStalled = audioTrack == nil
|
||||
}
|
||||
}
|
||||
|
||||
extension MEPlayerItem: CodecCapacityDelegate {
|
||||
func codecDidChangeCapacity() {
|
||||
let loadingState = options.playable(capacitys: videoAudioTracks, isFirst: isFirst, isSeek: isSeek)
|
||||
delegate?.sourceDidChange(loadingState: loadingState)
|
||||
if loadingState.isPlayable {
|
||||
isFirst = false
|
||||
isSeek = false
|
||||
if loadingState.loadedTime > options.maxBufferDuration {
|
||||
adaptableVideo(loadingState: loadingState)
|
||||
pause()
|
||||
} else if loadingState.loadedTime < options.maxBufferDuration / 2 {
|
||||
resume()
|
||||
}
|
||||
} else {
|
||||
resume()
|
||||
adaptableVideo(loadingState: loadingState)
|
||||
}
|
||||
}
|
||||
|
||||
func codecDidFinished(track: some CapacityProtocol) {
|
||||
if track.mediaType == .audio {
|
||||
isAudioStalled = true
|
||||
}
|
||||
let allSatisfy = videoAudioTracks.allSatisfy { $0.isEndOfFile && $0.frameCount == 0 && $0.packetCount == 0 }
|
||||
if allSatisfy {
|
||||
delegate?.sourceDidFinished()
|
||||
timer.fireDate = Date.distantFuture
|
||||
if options.isLoopPlay {
|
||||
isAudioStalled = audioTrack == nil
|
||||
audioTrack?.isLoopModel = false
|
||||
videoTrack?.isLoopModel = false
|
||||
if state == .finished {
|
||||
seek(time: 0) { _ in }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func adaptableVideo(loadingState: LoadingState) {
|
||||
if options.videoDisable || videoAdaptation == nil || loadingState.isEndOfFile || loadingState.isSeek || state == .seeking {
|
||||
return
|
||||
}
|
||||
guard let track = videoTrack else {
|
||||
return
|
||||
}
|
||||
videoAdaptation?.loadedCount = track.packetCount + track.frameCount
|
||||
videoAdaptation?.currentPlaybackTime = currentPlaybackTime
|
||||
videoAdaptation?.isPlayable = loadingState.isPlayable
|
||||
guard let (oldBitRate, newBitrate) = options.adaptable(state: videoAdaptation), oldBitRate != newBitrate,
|
||||
let newFFmpegAssetTrack = assetTracks.first(where: { $0.mediaType == .video && $0.bitRate == newBitrate })
|
||||
else {
|
||||
return
|
||||
}
|
||||
assetTracks.first { $0.mediaType == .video && $0.bitRate == oldBitRate }?.isEnabled = false
|
||||
newFFmpegAssetTrack.isEnabled = true
|
||||
findBestAudio(videoTrack: newFFmpegAssetTrack)
|
||||
let bitRateState = VideoAdaptationState.BitRateState(bitRate: newBitrate, time: CACurrentMediaTime())
|
||||
videoAdaptation?.bitRateStates.append(bitRateState)
|
||||
delegate?.sourceDidChange(oldBitRate: oldBitRate, newBitrate: newBitrate)
|
||||
}
|
||||
|
||||
private func findBestAudio(videoTrack: FFmpegAssetTrack) {
|
||||
guard videoAdaptation != nil, let first = assetTracks.first(where: { $0.mediaType == .audio && $0.isEnabled }) else {
|
||||
return
|
||||
}
|
||||
let index = av_find_best_stream(formatCtx, AVMEDIA_TYPE_AUDIO, -1, videoTrack.trackID, nil, 0)
|
||||
if index != first.trackID {
|
||||
first.isEnabled = false
|
||||
assetTracks.first { $0.mediaType == .audio && $0.trackID == index }?.isEnabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension MEPlayerItem: OutputRenderSourceDelegate {
|
||||
func mainClock() -> KSClock {
|
||||
isAudioStalled ? videoClock : audioClock
|
||||
}
|
||||
|
||||
public func setVideo(time: CMTime, position: Int64) {
|
||||
// print("[video] video interval \(CACurrentMediaTime() - videoClock.lastMediaTime) video diff \(time.seconds - videoClock.time.seconds)")
|
||||
videoClock.time = time
|
||||
videoClock.position = position
|
||||
videoDisplayCount += 1
|
||||
let diff = videoClock.lastMediaTime - lastVideoDisplayTime
|
||||
if diff > 1 {
|
||||
dynamicInfo.displayFPS = Double(videoDisplayCount) / diff
|
||||
videoDisplayCount = 0
|
||||
lastVideoDisplayTime = videoClock.lastMediaTime
|
||||
}
|
||||
}
|
||||
|
||||
public func setAudio(time: CMTime, position: Int64) {
|
||||
// print("[audio] setAudio: \(time.seconds)")
|
||||
// 切换到主线程的话,那播放起来会更顺滑
|
||||
runOnMainThread {
|
||||
self.audioClock.time = time
|
||||
self.audioClock.position = position
|
||||
}
|
||||
}
|
||||
|
||||
public func getVideoOutputRender(force: Bool) -> VideoVTBFrame? {
|
||||
guard let videoTrack else {
|
||||
return nil
|
||||
}
|
||||
var type: ClockProcessType = force ? .next : .remain
|
||||
let predicate: ((VideoVTBFrame, Int) -> Bool)? = force ? nil : { [weak self] frame, count -> Bool in
|
||||
guard let self else { return true }
|
||||
(self.dynamicInfo.audioVideoSyncDiff, type) = self.options.videoClockSync(main: self.mainClock(), nextVideoTime: frame.seconds, fps: Double(frame.fps), frameCount: count)
|
||||
return type != .remain
|
||||
}
|
||||
let frame = videoTrack.getOutputRender(where: predicate)
|
||||
switch type {
|
||||
case .remain:
|
||||
break
|
||||
case .next:
|
||||
break
|
||||
case .dropNextFrame:
|
||||
if videoTrack.getOutputRender(where: nil) != nil {
|
||||
dynamicInfo.droppedVideoFrameCount += 1
|
||||
}
|
||||
case .flush:
|
||||
let count = videoTrack.outputRenderQueue.count
|
||||
videoTrack.outputRenderQueue.flush()
|
||||
dynamicInfo.droppedVideoFrameCount += UInt32(count)
|
||||
case .seek:
|
||||
videoTrack.outputRenderQueue.flush()
|
||||
videoTrack.seekTime = mainClock().time.seconds
|
||||
case .dropNextPacket:
|
||||
if let videoTrack = videoTrack as? AsyncPlayerItemTrack {
|
||||
let packet = videoTrack.packetQueue.pop { item, _ -> Bool in
|
||||
!item.isKeyFrame
|
||||
}
|
||||
if packet != nil {
|
||||
dynamicInfo.droppedVideoPacketCount += 1
|
||||
}
|
||||
}
|
||||
case .dropGOPPacket:
|
||||
if let videoTrack = videoTrack as? AsyncPlayerItemTrack {
|
||||
var packet: Packet? = nil
|
||||
repeat {
|
||||
packet = videoTrack.packetQueue.pop { item, _ -> Bool in
|
||||
!item.isKeyFrame
|
||||
}
|
||||
if packet != nil {
|
||||
dynamicInfo.droppedVideoPacketCount += 1
|
||||
}
|
||||
} while packet != nil
|
||||
}
|
||||
}
|
||||
return frame
|
||||
}
|
||||
|
||||
public func getAudioOutputRender() -> AudioFrame? {
|
||||
if let frame = audioTrack?.getOutputRender(where: nil) {
|
||||
SubtitleModel.audioRecognizes.first {
|
||||
$0.isEnabled
|
||||
}?.append(frame: frame)
|
||||
return frame
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension AbstractAVIOContext {
|
||||
func getContext() -> UnsafeMutablePointer<AVIOContext> {
|
||||
// 需要持有ioContext,不然会被释放掉,等到shutdown在清空
|
||||
avio_alloc_context(av_malloc(Int(bufferSize)), bufferSize, writable ? 1 : 0, Unmanaged.passRetained(self).toOpaque()) { opaque, buffer, size -> Int32 in
|
||||
let value = Unmanaged<AbstractAVIOContext>.fromOpaque(opaque!).takeUnretainedValue()
|
||||
let ret = value.read(buffer: buffer, size: size)
|
||||
return Int32(ret)
|
||||
} _: { opaque, buffer, size -> Int32 in
|
||||
let value = Unmanaged<AbstractAVIOContext>.fromOpaque(opaque!).takeUnretainedValue()
|
||||
let ret = value.write(buffer: buffer, size: size)
|
||||
return Int32(ret)
|
||||
} _: { opaque, offset, whence -> Int64 in
|
||||
let value = Unmanaged<AbstractAVIOContext>.fromOpaque(opaque!).takeUnretainedValue()
|
||||
if whence == AVSEEK_SIZE {
|
||||
return value.fileSize()
|
||||
}
|
||||
return value.seek(offset: offset, whence: whence)
|
||||
}
|
||||
}
|
||||
}
|
||||
316
KSPlayer-main/Sources/KSPlayer/MEPlayer/MEPlayerItemTrack.swift
Normal file
316
KSPlayer-main/Sources/KSPlayer/MEPlayer/MEPlayerItemTrack.swift
Normal file
@@ -0,0 +1,316 @@
|
||||
//
|
||||
// Decoder.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/9.
|
||||
//
|
||||
import AVFoundation
|
||||
import CoreMedia
|
||||
import Libavformat
|
||||
|
||||
protocol PlayerItemTrackProtocol: CapacityProtocol, AnyObject {
|
||||
init(mediaType: AVFoundation.AVMediaType, frameCapacity: UInt8, options: KSOptions)
|
||||
// 是否无缝循环
|
||||
var isLoopModel: Bool { get set }
|
||||
var isEndOfFile: Bool { get set }
|
||||
var delegate: CodecCapacityDelegate? { get set }
|
||||
func decode()
|
||||
func seek(time: TimeInterval)
|
||||
func putPacket(packet: Packet)
|
||||
// func getOutputRender<Frame: ObjectQueueItem>(where predicate: ((Frame) -> Bool)?) -> Frame?
|
||||
func shutdown()
|
||||
}
|
||||
|
||||
class SyncPlayerItemTrack<Frame: MEFrame>: PlayerItemTrackProtocol, CustomStringConvertible {
|
||||
var seekTime = 0.0
|
||||
fileprivate let options: KSOptions
|
||||
fileprivate var decoderMap = [Int32: DecodeProtocol]()
|
||||
fileprivate var state = MECodecState.idle {
|
||||
didSet {
|
||||
if state == .finished {
|
||||
seekTime = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var isEndOfFile: Bool = false
|
||||
var packetCount: Int { 0 }
|
||||
let description: String
|
||||
weak var delegate: CodecCapacityDelegate?
|
||||
let mediaType: AVFoundation.AVMediaType
|
||||
let outputRenderQueue: CircularBuffer<Frame>
|
||||
var isLoopModel = false
|
||||
var frameCount: Int { outputRenderQueue.count }
|
||||
var frameMaxCount: Int {
|
||||
outputRenderQueue.maxCount
|
||||
}
|
||||
|
||||
var fps: Float {
|
||||
outputRenderQueue.fps
|
||||
}
|
||||
|
||||
required init(mediaType: AVFoundation.AVMediaType, frameCapacity: UInt8, options: KSOptions) {
|
||||
self.options = options
|
||||
self.mediaType = mediaType
|
||||
description = mediaType.rawValue
|
||||
// 默认缓存队列大小跟帧率挂钩,经测试除以4,最优
|
||||
if mediaType == .audio {
|
||||
outputRenderQueue = CircularBuffer(initialCapacity: Int(frameCapacity), expanding: false)
|
||||
} else if mediaType == .video {
|
||||
outputRenderQueue = CircularBuffer(initialCapacity: Int(frameCapacity), sorted: true, expanding: false)
|
||||
} else {
|
||||
// 有的图片字幕不按顺序来输出,所以要排序下。
|
||||
outputRenderQueue = CircularBuffer(initialCapacity: Int(frameCapacity), sorted: true)
|
||||
}
|
||||
}
|
||||
|
||||
func decode() {
|
||||
isEndOfFile = false
|
||||
state = .decoding
|
||||
}
|
||||
|
||||
func seek(time: TimeInterval) {
|
||||
if options.isAccurateSeek {
|
||||
seekTime = time
|
||||
} else {
|
||||
seekTime = 0
|
||||
}
|
||||
isEndOfFile = false
|
||||
state = .flush
|
||||
outputRenderQueue.flush()
|
||||
isLoopModel = false
|
||||
}
|
||||
|
||||
func putPacket(packet: Packet) {
|
||||
if state == .flush {
|
||||
decoderMap.values.forEach { $0.doFlushCodec() }
|
||||
state = .decoding
|
||||
}
|
||||
if state == .decoding {
|
||||
doDecode(packet: packet)
|
||||
}
|
||||
}
|
||||
|
||||
func getOutputRender(where predicate: ((Frame, Int) -> Bool)?) -> Frame? {
|
||||
let outputFecthRender = outputRenderQueue.pop(where: predicate)
|
||||
if outputFecthRender == nil {
|
||||
if state == .finished, frameCount == 0 {
|
||||
delegate?.codecDidFinished(track: self)
|
||||
}
|
||||
}
|
||||
return outputFecthRender
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
if state == .idle {
|
||||
return
|
||||
}
|
||||
state = .closed
|
||||
outputRenderQueue.shutdown()
|
||||
}
|
||||
|
||||
private var lastPacketBytes = Int32(0)
|
||||
private var lastPacketSeconds = Double(-1)
|
||||
var bitrate = Double(0)
|
||||
fileprivate func doDecode(packet: Packet) {
|
||||
if packet.isKeyFrame, packet.assetTrack.mediaType != .subtitle {
|
||||
let seconds = packet.seconds
|
||||
let diff = seconds - lastPacketSeconds
|
||||
if lastPacketSeconds < 0 || diff < 0 {
|
||||
bitrate = 0
|
||||
lastPacketBytes = 0
|
||||
lastPacketSeconds = seconds
|
||||
} else if diff > 1 {
|
||||
bitrate = Double(lastPacketBytes) / diff
|
||||
lastPacketBytes = 0
|
||||
lastPacketSeconds = seconds
|
||||
}
|
||||
}
|
||||
lastPacketBytes += packet.size
|
||||
let decoder = decoderMap.value(for: packet.assetTrack.trackID, default: makeDecode(assetTrack: packet.assetTrack))
|
||||
// var startTime = CACurrentMediaTime()
|
||||
decoder.decodeFrame(from: packet) { [weak self] result in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
do {
|
||||
// if packet.assetTrack.mediaType == .video {
|
||||
// print("[video] decode time: \(CACurrentMediaTime()-startTime)")
|
||||
// startTime = CACurrentMediaTime()
|
||||
// }
|
||||
let frame = try result.get()
|
||||
if self.state == .flush || self.state == .closed {
|
||||
return
|
||||
}
|
||||
if self.seekTime > 0 {
|
||||
let timestamp = frame.timestamp + frame.duration
|
||||
// KSLog("seektime \(self.seekTime), frame \(frame.seconds), mediaType \(packet.assetTrack.mediaType)")
|
||||
if timestamp <= 0 || frame.timebase.cmtime(for: timestamp).seconds < self.seekTime {
|
||||
return
|
||||
} else {
|
||||
self.seekTime = 0.0
|
||||
}
|
||||
}
|
||||
if let frame = frame as? Frame {
|
||||
self.outputRenderQueue.push(frame)
|
||||
self.outputRenderQueue.fps = packet.assetTrack.nominalFrameRate
|
||||
}
|
||||
} catch {
|
||||
KSLog("Decoder did Failed : \(error)")
|
||||
if decoder is VideoToolboxDecode {
|
||||
decoder.shutdown()
|
||||
self.decoderMap[packet.assetTrack.trackID] = FFmpegDecode(assetTrack: packet.assetTrack, options: self.options)
|
||||
KSLog("VideoCodec switch to software decompression")
|
||||
self.doDecode(packet: packet)
|
||||
} else {
|
||||
self.state = .failed
|
||||
}
|
||||
}
|
||||
}
|
||||
if options.decodeAudioTime == 0, mediaType == .audio {
|
||||
options.decodeAudioTime = CACurrentMediaTime()
|
||||
}
|
||||
if options.decodeVideoTime == 0, mediaType == .video {
|
||||
options.decodeVideoTime = CACurrentMediaTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final class AsyncPlayerItemTrack<Frame: MEFrame>: SyncPlayerItemTrack<Frame> {
|
||||
private let operationQueue = OperationQueue()
|
||||
private var decodeOperation: BlockOperation!
|
||||
// 无缝播放使用的PacketQueue
|
||||
private var loopPacketQueue: CircularBuffer<Packet>?
|
||||
var packetQueue = CircularBuffer<Packet>()
|
||||
override var packetCount: Int { packetQueue.count }
|
||||
override var isLoopModel: Bool {
|
||||
didSet {
|
||||
if isLoopModel {
|
||||
loopPacketQueue = CircularBuffer<Packet>()
|
||||
isEndOfFile = true
|
||||
} else {
|
||||
if let loopPacketQueue {
|
||||
packetQueue.shutdown()
|
||||
packetQueue = loopPacketQueue
|
||||
self.loopPacketQueue = nil
|
||||
if decodeOperation.isFinished {
|
||||
decode()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
required init(mediaType: AVFoundation.AVMediaType, frameCapacity: UInt8, options: KSOptions) {
|
||||
super.init(mediaType: mediaType, frameCapacity: frameCapacity, options: options)
|
||||
operationQueue.name = "KSPlayer_" + mediaType.rawValue
|
||||
operationQueue.maxConcurrentOperationCount = 1
|
||||
operationQueue.qualityOfService = .userInteractive
|
||||
}
|
||||
|
||||
override func putPacket(packet: Packet) {
|
||||
if isLoopModel {
|
||||
loopPacketQueue?.push(packet)
|
||||
} else {
|
||||
packetQueue.push(packet)
|
||||
}
|
||||
}
|
||||
|
||||
override func decode() {
|
||||
isEndOfFile = false
|
||||
guard operationQueue.operationCount == 0 else { return }
|
||||
decodeOperation = BlockOperation { [weak self] in
|
||||
guard let self else { return }
|
||||
Thread.current.name = self.operationQueue.name
|
||||
Thread.current.stackSize = KSOptions.stackSize
|
||||
self.decodeThread()
|
||||
}
|
||||
decodeOperation.queuePriority = .veryHigh
|
||||
decodeOperation.qualityOfService = .userInteractive
|
||||
operationQueue.addOperation(decodeOperation)
|
||||
}
|
||||
|
||||
private func decodeThread() {
|
||||
state = .decoding
|
||||
isEndOfFile = false
|
||||
decoderMap.values.forEach { $0.decode() }
|
||||
outerLoop: while !decodeOperation.isCancelled {
|
||||
switch state {
|
||||
case .idle:
|
||||
break outerLoop
|
||||
case .finished, .closed, .failed:
|
||||
decoderMap.values.forEach { $0.shutdown() }
|
||||
decoderMap.removeAll()
|
||||
break outerLoop
|
||||
case .flush:
|
||||
decoderMap.values.forEach { $0.doFlushCodec() }
|
||||
state = .decoding
|
||||
case .decoding:
|
||||
if isEndOfFile, packetQueue.count == 0 {
|
||||
state = .finished
|
||||
} else {
|
||||
guard let packet = packetQueue.pop(wait: true), state != .flush, state != .closed else {
|
||||
continue
|
||||
}
|
||||
autoreleasepool {
|
||||
doDecode(packet: packet)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override func seek(time: TimeInterval) {
|
||||
if decodeOperation.isFinished {
|
||||
decode()
|
||||
}
|
||||
packetQueue.flush()
|
||||
super.seek(time: time)
|
||||
loopPacketQueue = nil
|
||||
}
|
||||
|
||||
override func shutdown() {
|
||||
if state == .idle {
|
||||
return
|
||||
}
|
||||
super.shutdown()
|
||||
packetQueue.shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
public extension Dictionary {
|
||||
mutating func value(for key: Key, default defaultValue: @autoclosure () -> Value) -> Value {
|
||||
if let value = self[key] {
|
||||
return value
|
||||
} else {
|
||||
let value = defaultValue()
|
||||
self[key] = value
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protocol DecodeProtocol {
|
||||
func decode()
|
||||
func decodeFrame(from packet: Packet, completionHandler: @escaping (Result<MEFrame, Error>) -> Void)
|
||||
func doFlushCodec()
|
||||
func shutdown()
|
||||
}
|
||||
|
||||
extension SyncPlayerItemTrack {
|
||||
func makeDecode(assetTrack: FFmpegAssetTrack) -> DecodeProtocol {
|
||||
autoreleasepool {
|
||||
if mediaType == .subtitle {
|
||||
return SubtitleDecode(assetTrack: assetTrack, options: options)
|
||||
} else {
|
||||
if mediaType == .video, options.asynchronousDecompression, options.hardwareDecode,
|
||||
let session = DecompressionSession(assetTrack: assetTrack, options: options)
|
||||
{
|
||||
return VideoToolboxDecode(options: options, session: session)
|
||||
} else {
|
||||
return FFmpegDecode(assetTrack: assetTrack, options: options)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
446
KSPlayer-main/Sources/KSPlayer/MEPlayer/MetalPlayView.swift
Normal file
446
KSPlayer-main/Sources/KSPlayer/MEPlayer/MetalPlayView.swift
Normal file
@@ -0,0 +1,446 @@
|
||||
//
|
||||
// MetalPlayView.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/11.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Combine
|
||||
import CoreMedia
|
||||
#if canImport(MetalKit)
|
||||
import MetalKit
|
||||
#endif
|
||||
public protocol DisplayLayerDelegate: NSObjectProtocol {
|
||||
func change(displayLayer: AVSampleBufferDisplayLayer)
|
||||
}
|
||||
|
||||
public protocol VideoOutput: FrameOutput {
|
||||
var displayLayerDelegate: DisplayLayerDelegate? { get set }
|
||||
var options: KSOptions { get set }
|
||||
var displayLayer: AVSampleBufferDisplayLayer { get }
|
||||
var pixelBuffer: PixelBufferProtocol? { get }
|
||||
init(options: KSOptions)
|
||||
func invalidate()
|
||||
func readNextFrame()
|
||||
}
|
||||
|
||||
public final class MetalPlayView: UIView, VideoOutput {
|
||||
public var displayLayer: AVSampleBufferDisplayLayer {
|
||||
displayView.displayLayer
|
||||
}
|
||||
|
||||
private var isDovi: Bool = false
|
||||
private var formatDescription: CMFormatDescription? {
|
||||
didSet {
|
||||
options.updateVideo(refreshRate: fps, isDovi: isDovi, formatDescription: formatDescription)
|
||||
}
|
||||
}
|
||||
|
||||
private var fps = Float(60) {
|
||||
didSet {
|
||||
if fps != oldValue {
|
||||
if KSOptions.preferredFrame {
|
||||
let preferredFramesPerSecond = ceil(fps)
|
||||
if #available(iOS 15.0, tvOS 15.0, macOS 14.0, *) {
|
||||
displayLink.preferredFrameRateRange = CAFrameRateRange(minimum: preferredFramesPerSecond, maximum: 2 * preferredFramesPerSecond, __preferred: preferredFramesPerSecond)
|
||||
} else {
|
||||
displayLink.preferredFramesPerSecond = Int(preferredFramesPerSecond) << 1
|
||||
}
|
||||
}
|
||||
options.updateVideo(refreshRate: fps, isDovi: isDovi, formatDescription: formatDescription)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public private(set) var pixelBuffer: PixelBufferProtocol?
|
||||
/// 用displayLink会导致锁屏无法draw,
|
||||
/// 用DispatchSourceTimer的话,在播放4k视频的时候repeat的时间会变长,
|
||||
/// 用MTKView的draw(in:)也是不行,会卡顿
|
||||
private var displayLink: CADisplayLink!
|
||||
// private let timer = DispatchSource.makeTimerSource(queue: DispatchQueue.main)
|
||||
public var options: KSOptions
|
||||
public weak var renderSource: OutputRenderSourceDelegate?
|
||||
// AVSampleBufferAudioRenderer AVSampleBufferRenderSynchronizer AVSampleBufferDisplayLayer
|
||||
var displayView = AVSampleBufferDisplayView() {
|
||||
didSet {
|
||||
displayLayerDelegate?.change(displayLayer: displayView.displayLayer)
|
||||
}
|
||||
}
|
||||
|
||||
private let metalView = MetalView()
|
||||
public weak var displayLayerDelegate: DisplayLayerDelegate?
|
||||
public init(options: KSOptions) {
|
||||
self.options = options
|
||||
super.init(frame: .zero)
|
||||
addSubview(displayView)
|
||||
addSubview(metalView)
|
||||
metalView.isHidden = true
|
||||
// displayLink = CADisplayLink(block: renderFrame)
|
||||
displayLink = CADisplayLink(target: self, selector: #selector(renderFrame))
|
||||
// 一定要用common。不然在视频上面操作view的话,那就会卡顿了。
|
||||
displayLink.add(to: .main, forMode: .common)
|
||||
pause()
|
||||
}
|
||||
|
||||
public func play() {
|
||||
displayLink.isPaused = false
|
||||
}
|
||||
|
||||
public func pause() {
|
||||
displayLink.isPaused = true
|
||||
}
|
||||
|
||||
@available(*, unavailable)
|
||||
required init(coder _: NSCoder) {
|
||||
fatalError("init(coder:) has not been implemented")
|
||||
}
|
||||
|
||||
override public func didAddSubview(_ subview: UIView) {
|
||||
super.didAddSubview(subview)
|
||||
subview.translatesAutoresizingMaskIntoConstraints = false
|
||||
NSLayoutConstraint.activate([
|
||||
subview.leftAnchor.constraint(equalTo: leftAnchor),
|
||||
subview.topAnchor.constraint(equalTo: topAnchor),
|
||||
subview.bottomAnchor.constraint(equalTo: bottomAnchor),
|
||||
subview.rightAnchor.constraint(equalTo: rightAnchor),
|
||||
])
|
||||
}
|
||||
|
||||
override public var contentMode: UIViewContentMode {
|
||||
didSet {
|
||||
metalView.contentMode = contentMode
|
||||
switch contentMode {
|
||||
case .scaleToFill:
|
||||
displayView.displayLayer.videoGravity = .resize
|
||||
case .scaleAspectFit, .center:
|
||||
displayView.displayLayer.videoGravity = .resizeAspect
|
||||
case .scaleAspectFill:
|
||||
displayView.displayLayer.videoGravity = .resizeAspectFill
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if canImport(UIKit)
|
||||
override public func touchesMoved(_ touches: Set<UITouch>, with: UIEvent?) {
|
||||
if options.display == .plane {
|
||||
super.touchesMoved(touches, with: with)
|
||||
} else {
|
||||
options.display.touchesMoved(touch: touches.first!)
|
||||
}
|
||||
}
|
||||
#else
|
||||
override public func touchesMoved(with event: NSEvent) {
|
||||
if options.display == .plane {
|
||||
super.touchesMoved(with: event)
|
||||
} else {
|
||||
options.display.touchesMoved(touch: event.allTouches().first!)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
public func flush() {
|
||||
pixelBuffer = nil
|
||||
if displayView.isHidden {
|
||||
metalView.clear()
|
||||
} else {
|
||||
displayView.displayLayer.flushAndRemoveImage()
|
||||
}
|
||||
}
|
||||
|
||||
public func invalidate() {
|
||||
displayLink.invalidate()
|
||||
}
|
||||
|
||||
public func readNextFrame() {
|
||||
draw(force: true)
|
||||
}
|
||||
|
||||
// deinit {
|
||||
// print()
|
||||
// }
|
||||
}
|
||||
|
||||
extension MetalPlayView {
|
||||
@objc private func renderFrame() {
|
||||
draw(force: false)
|
||||
}
|
||||
|
||||
private func draw(force: Bool) {
|
||||
autoreleasepool {
|
||||
guard let frame = renderSource?.getVideoOutputRender(force: force) else {
|
||||
return
|
||||
}
|
||||
pixelBuffer = frame.corePixelBuffer
|
||||
guard let pixelBuffer else {
|
||||
return
|
||||
}
|
||||
isDovi = frame.isDovi
|
||||
fps = frame.fps
|
||||
let cmtime = frame.cmtime
|
||||
let par = pixelBuffer.size
|
||||
let sar = pixelBuffer.aspectRatio
|
||||
if let pixelBuffer = pixelBuffer.cvPixelBuffer, options.isUseDisplayLayer() {
|
||||
if displayView.isHidden {
|
||||
displayView.isHidden = false
|
||||
metalView.isHidden = true
|
||||
metalView.clear()
|
||||
}
|
||||
if let dar = options.customizeDar(sar: sar, par: par) {
|
||||
pixelBuffer.aspectRatio = CGSize(width: dar.width, height: dar.height * par.width / par.height)
|
||||
}
|
||||
checkFormatDescription(pixelBuffer: pixelBuffer)
|
||||
set(pixelBuffer: pixelBuffer, time: cmtime)
|
||||
} else {
|
||||
if !displayView.isHidden {
|
||||
displayView.isHidden = true
|
||||
metalView.isHidden = false
|
||||
displayView.displayLayer.flushAndRemoveImage()
|
||||
}
|
||||
let size: CGSize
|
||||
if options.display == .plane {
|
||||
if let dar = options.customizeDar(sar: sar, par: par) {
|
||||
size = CGSize(width: par.width, height: par.width * dar.height / dar.width)
|
||||
} else {
|
||||
size = CGSize(width: par.width, height: par.height * sar.height / sar.width)
|
||||
}
|
||||
} else {
|
||||
size = KSOptions.sceneSize
|
||||
}
|
||||
checkFormatDescription(pixelBuffer: pixelBuffer)
|
||||
#if !os(tvOS)
|
||||
if #available(iOS 16, *) {
|
||||
metalView.metalLayer.edrMetadata = frame.edrMetadata
|
||||
}
|
||||
#endif
|
||||
metalView.draw(pixelBuffer: pixelBuffer, display: options.display, size: size)
|
||||
}
|
||||
renderSource?.setVideo(time: cmtime, position: frame.position)
|
||||
}
|
||||
}
|
||||
|
||||
private func checkFormatDescription(pixelBuffer: PixelBufferProtocol) {
|
||||
if formatDescription == nil || !pixelBuffer.matche(formatDescription: formatDescription!) {
|
||||
if formatDescription != nil {
|
||||
displayView.removeFromSuperview()
|
||||
displayView = AVSampleBufferDisplayView()
|
||||
displayView.frame = frame
|
||||
addSubview(displayView)
|
||||
}
|
||||
formatDescription = pixelBuffer.formatDescription
|
||||
}
|
||||
}
|
||||
|
||||
private func set(pixelBuffer: CVPixelBuffer, time: CMTime) {
|
||||
guard let formatDescription else { return }
|
||||
displayView.enqueue(imageBuffer: pixelBuffer, formatDescription: formatDescription, time: time)
|
||||
}
|
||||
}
|
||||
|
||||
class MetalView: UIView {
|
||||
private let render = MetalRender()
|
||||
#if canImport(UIKit)
|
||||
override public class var layerClass: AnyClass { CAMetalLayer.self }
|
||||
#endif
|
||||
var metalLayer: CAMetalLayer {
|
||||
// swiftlint:disable force_cast
|
||||
layer as! CAMetalLayer
|
||||
// swiftlint:enable force_cast
|
||||
}
|
||||
|
||||
init() {
|
||||
super.init(frame: .zero)
|
||||
#if !canImport(UIKit)
|
||||
layer = CAMetalLayer()
|
||||
#endif
|
||||
metalLayer.device = MetalRender.device
|
||||
metalLayer.framebufferOnly = true
|
||||
// metalLayer.displaySyncEnabled = false
|
||||
}
|
||||
|
||||
@available(*, unavailable)
|
||||
required init?(coder _: NSCoder) {
|
||||
fatalError("init(coder:) has not been implemented")
|
||||
}
|
||||
|
||||
func clear() {
|
||||
if let drawable = metalLayer.nextDrawable() {
|
||||
render.clear(drawable: drawable)
|
||||
}
|
||||
}
|
||||
|
||||
func draw(pixelBuffer: PixelBufferProtocol, display: DisplayEnum, size: CGSize) {
|
||||
metalLayer.drawableSize = size
|
||||
metalLayer.pixelFormat = KSOptions.colorPixelFormat(bitDepth: pixelBuffer.bitDepth)
|
||||
let colorspace = pixelBuffer.colorspace
|
||||
if colorspace != nil, metalLayer.colorspace != colorspace {
|
||||
metalLayer.colorspace = colorspace
|
||||
KSLog("[video] CAMetalLayer colorspace \(String(describing: colorspace))")
|
||||
#if !os(tvOS)
|
||||
if #available(iOS 16.0, *) {
|
||||
if let name = colorspace?.name, name != CGColorSpace.sRGB {
|
||||
#if os(macOS)
|
||||
metalLayer.wantsExtendedDynamicRangeContent = window?.screen?.maximumPotentialExtendedDynamicRangeColorComponentValue ?? 1.0 > 1.0
|
||||
#else
|
||||
metalLayer.wantsExtendedDynamicRangeContent = true
|
||||
#endif
|
||||
} else {
|
||||
metalLayer.wantsExtendedDynamicRangeContent = false
|
||||
}
|
||||
KSLog("[video] CAMetalLayer wantsExtendedDynamicRangeContent \(metalLayer.wantsExtendedDynamicRangeContent)")
|
||||
}
|
||||
#endif
|
||||
}
|
||||
guard let drawable = metalLayer.nextDrawable() else {
|
||||
KSLog("[video] CAMetalLayer not readyForMoreMediaData")
|
||||
return
|
||||
}
|
||||
render.draw(pixelBuffer: pixelBuffer, display: display, drawable: drawable)
|
||||
}
|
||||
}
|
||||
|
||||
class AVSampleBufferDisplayView: UIView {
|
||||
#if canImport(UIKit)
|
||||
override public class var layerClass: AnyClass { AVSampleBufferDisplayLayer.self }
|
||||
#endif
|
||||
var displayLayer: AVSampleBufferDisplayLayer {
|
||||
// swiftlint:disable force_cast
|
||||
layer as! AVSampleBufferDisplayLayer
|
||||
// swiftlint:enable force_cast
|
||||
}
|
||||
|
||||
override init(frame: CGRect) {
|
||||
super.init(frame: frame)
|
||||
#if !canImport(UIKit)
|
||||
layer = AVSampleBufferDisplayLayer()
|
||||
#endif
|
||||
var controlTimebase: CMTimebase?
|
||||
CMTimebaseCreateWithSourceClock(allocator: kCFAllocatorDefault, sourceClock: CMClockGetHostTimeClock(), timebaseOut: &controlTimebase)
|
||||
if let controlTimebase {
|
||||
displayLayer.controlTimebase = controlTimebase
|
||||
CMTimebaseSetTime(controlTimebase, time: .zero)
|
||||
CMTimebaseSetRate(controlTimebase, rate: 1.0)
|
||||
}
|
||||
}
|
||||
|
||||
@available(*, unavailable)
|
||||
required init?(coder _: NSCoder) {
|
||||
fatalError("init(coder:) has not been implemented")
|
||||
}
|
||||
|
||||
func enqueue(imageBuffer: CVPixelBuffer, formatDescription: CMVideoFormatDescription, time: CMTime) {
|
||||
let timing = CMSampleTimingInfo(duration: .invalid, presentationTimeStamp: .zero, decodeTimeStamp: .invalid)
|
||||
// var timing = CMSampleTimingInfo(duration: .invalid, presentationTimeStamp: time, decodeTimeStamp: .invalid)
|
||||
var sampleBuffer: CMSampleBuffer?
|
||||
CMSampleBufferCreateReadyWithImageBuffer(allocator: kCFAllocatorDefault, imageBuffer: imageBuffer, formatDescription: formatDescription, sampleTiming: [timing], sampleBufferOut: &sampleBuffer)
|
||||
if let sampleBuffer {
|
||||
if let attachmentsArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, createIfNecessary: true) as? [NSMutableDictionary], let dic = attachmentsArray.first {
|
||||
dic[kCMSampleAttachmentKey_DisplayImmediately] = true
|
||||
}
|
||||
if displayLayer.isReadyForMoreMediaData {
|
||||
displayLayer.enqueue(sampleBuffer)
|
||||
} else {
|
||||
KSLog("[video] AVSampleBufferDisplayLayer not readyForMoreMediaData. video time \(time), controlTime \(displayLayer.timebase.time) ")
|
||||
displayLayer.enqueue(sampleBuffer)
|
||||
}
|
||||
if #available(macOS 11.0, iOS 14, tvOS 14, *) {
|
||||
if displayLayer.requiresFlushToResumeDecoding {
|
||||
KSLog("[video] AVSampleBufferDisplayLayer requiresFlushToResumeDecoding so flush")
|
||||
displayLayer.flush()
|
||||
}
|
||||
}
|
||||
if displayLayer.status == .failed {
|
||||
KSLog("[video] AVSampleBufferDisplayLayer status failed so flush")
|
||||
displayLayer.flush()
|
||||
// if let error = displayLayer.error as NSError?, error.code == -11847 {
|
||||
// displayLayer.stopRequestingMediaData()
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if os(macOS)
|
||||
import CoreVideo
|
||||
|
||||
class CADisplayLink {
|
||||
private let displayLink: CVDisplayLink
|
||||
private var runloop: RunLoop?
|
||||
private var mode = RunLoop.Mode.default
|
||||
public var preferredFramesPerSecond = 60
|
||||
@available(macOS 12.0, *)
|
||||
public var preferredFrameRateRange: CAFrameRateRange {
|
||||
get {
|
||||
CAFrameRateRange()
|
||||
}
|
||||
set {}
|
||||
}
|
||||
|
||||
public var timestamp: TimeInterval {
|
||||
var timeStamp = CVTimeStamp()
|
||||
if CVDisplayLinkGetCurrentTime(displayLink, &timeStamp) == kCVReturnSuccess, (timeStamp.flags & CVTimeStampFlags.hostTimeValid.rawValue) != 0 {
|
||||
return TimeInterval(timeStamp.hostTime / NSEC_PER_SEC)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
public var duration: TimeInterval {
|
||||
CVDisplayLinkGetActualOutputVideoRefreshPeriod(displayLink)
|
||||
}
|
||||
|
||||
public var targetTimestamp: TimeInterval {
|
||||
duration + timestamp
|
||||
}
|
||||
|
||||
public var isPaused: Bool {
|
||||
get {
|
||||
!CVDisplayLinkIsRunning(displayLink)
|
||||
}
|
||||
set {
|
||||
if newValue {
|
||||
CVDisplayLinkStop(displayLink)
|
||||
} else {
|
||||
CVDisplayLinkStart(displayLink)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public init(target: NSObject, selector: Selector) {
|
||||
var displayLink: CVDisplayLink?
|
||||
CVDisplayLinkCreateWithActiveCGDisplays(&displayLink)
|
||||
self.displayLink = displayLink!
|
||||
CVDisplayLinkSetOutputHandler(self.displayLink) { [weak self] _, _, _, _, _ in
|
||||
guard let self else { return kCVReturnSuccess }
|
||||
self.runloop?.perform(selector, target: target, argument: self, order: 0, modes: [self.mode])
|
||||
return kCVReturnSuccess
|
||||
}
|
||||
CVDisplayLinkStart(self.displayLink)
|
||||
}
|
||||
|
||||
public init(block: @escaping (() -> Void)) {
|
||||
var displayLink: CVDisplayLink?
|
||||
CVDisplayLinkCreateWithActiveCGDisplays(&displayLink)
|
||||
self.displayLink = displayLink!
|
||||
CVDisplayLinkSetOutputHandler(self.displayLink) { _, _, _, _, _ in
|
||||
block()
|
||||
return kCVReturnSuccess
|
||||
}
|
||||
CVDisplayLinkStart(self.displayLink)
|
||||
}
|
||||
|
||||
open func add(to runloop: RunLoop, forMode mode: RunLoop.Mode) {
|
||||
self.runloop = runloop
|
||||
self.mode = mode
|
||||
}
|
||||
|
||||
public func invalidate() {
|
||||
isPaused = true
|
||||
runloop = nil
|
||||
CVDisplayLinkSetOutputHandler(displayLink) { _, _, _, _, _ in
|
||||
kCVReturnError
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
493
KSPlayer-main/Sources/KSPlayer/MEPlayer/Model.swift
Normal file
493
KSPlayer-main/Sources/KSPlayer/MEPlayer/Model.swift
Normal file
@@ -0,0 +1,493 @@
|
||||
//
|
||||
// Model.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/9.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import CoreMedia
|
||||
import Libavcodec
|
||||
#if canImport(UIKit)
|
||||
import UIKit
|
||||
#endif
|
||||
|
||||
// MARK: enum
|
||||
|
||||
enum MESourceState {
|
||||
case idle
|
||||
case opening
|
||||
case opened
|
||||
case reading
|
||||
case seeking
|
||||
case paused
|
||||
case finished
|
||||
case closed
|
||||
case failed
|
||||
}
|
||||
|
||||
// MARK: delegate
|
||||
|
||||
public protocol OutputRenderSourceDelegate: AnyObject {
|
||||
func getVideoOutputRender(force: Bool) -> VideoVTBFrame?
|
||||
func getAudioOutputRender() -> AudioFrame?
|
||||
func setAudio(time: CMTime, position: Int64)
|
||||
func setVideo(time: CMTime, position: Int64)
|
||||
}
|
||||
|
||||
protocol CodecCapacityDelegate: AnyObject {
|
||||
func codecDidFinished(track: some CapacityProtocol)
|
||||
}
|
||||
|
||||
protocol MEPlayerDelegate: AnyObject {
|
||||
func sourceDidChange(loadingState: LoadingState)
|
||||
func sourceDidOpened()
|
||||
func sourceDidFailed(error: NSError?)
|
||||
func sourceDidFinished()
|
||||
func sourceDidChange(oldBitRate: Int64, newBitrate: Int64)
|
||||
}
|
||||
|
||||
// MARK: protocol
|
||||
|
||||
public protocol ObjectQueueItem {
|
||||
var timebase: Timebase { get }
|
||||
var timestamp: Int64 { get set }
|
||||
var duration: Int64 { get set }
|
||||
// byte position
|
||||
var position: Int64 { get set }
|
||||
var size: Int32 { get set }
|
||||
}
|
||||
|
||||
extension ObjectQueueItem {
|
||||
var seconds: TimeInterval { cmtime.seconds }
|
||||
var cmtime: CMTime { timebase.cmtime(for: timestamp) }
|
||||
}
|
||||
|
||||
public protocol FrameOutput: AnyObject {
|
||||
var renderSource: OutputRenderSourceDelegate? { get set }
|
||||
func pause()
|
||||
func flush()
|
||||
func play()
|
||||
}
|
||||
|
||||
protocol MEFrame: ObjectQueueItem {
|
||||
var timebase: Timebase { get set }
|
||||
}
|
||||
|
||||
// MARK: model
|
||||
|
||||
// for MEPlayer
|
||||
public extension KSOptions {
|
||||
/// 开启VR模式的陀飞轮
|
||||
static var enableSensor = true
|
||||
static var stackSize = 65536
|
||||
static var isClearVideoWhereReplace = true
|
||||
static var audioPlayerType: AudioOutput.Type = AudioEnginePlayer.self
|
||||
static var videoPlayerType: (VideoOutput & UIView).Type = MetalPlayView.self
|
||||
static var yadifMode = 1
|
||||
static var deInterlaceAddIdet = false
|
||||
static func colorSpace(ycbcrMatrix: CFString?, transferFunction: CFString?) -> CGColorSpace? {
|
||||
switch ycbcrMatrix {
|
||||
case kCVImageBufferYCbCrMatrix_ITU_R_709_2:
|
||||
return CGColorSpace(name: CGColorSpace.itur_709)
|
||||
case kCVImageBufferYCbCrMatrix_ITU_R_601_4:
|
||||
return CGColorSpace(name: CGColorSpace.sRGB)
|
||||
case kCVImageBufferYCbCrMatrix_ITU_R_2020:
|
||||
if transferFunction == kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ {
|
||||
if #available(macOS 11.0, iOS 14.0, tvOS 14.0, *) {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2100_PQ)
|
||||
} else if #available(macOS 10.15.4, iOS 13.4, tvOS 13.4, *) {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2020_PQ)
|
||||
} else {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2020_PQ_EOTF)
|
||||
}
|
||||
} else if transferFunction == kCVImageBufferTransferFunction_ITU_R_2100_HLG {
|
||||
if #available(macOS 11.0, iOS 14.0, tvOS 14.0, *) {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2100_HLG)
|
||||
} else {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2020)
|
||||
}
|
||||
} else {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2020)
|
||||
}
|
||||
default:
|
||||
return CGColorSpace(name: CGColorSpace.sRGB)
|
||||
}
|
||||
}
|
||||
|
||||
static func colorSpace(colorPrimaries: CFString?) -> CGColorSpace? {
|
||||
switch colorPrimaries {
|
||||
case kCVImageBufferColorPrimaries_ITU_R_709_2:
|
||||
return CGColorSpace(name: CGColorSpace.sRGB)
|
||||
case kCVImageBufferColorPrimaries_DCI_P3:
|
||||
if #available(macOS 10.15.4, iOS 13.4, tvOS 13.4, *) {
|
||||
return CGColorSpace(name: CGColorSpace.displayP3_PQ)
|
||||
} else {
|
||||
return CGColorSpace(name: CGColorSpace.displayP3_PQ_EOTF)
|
||||
}
|
||||
case kCVImageBufferColorPrimaries_ITU_R_2020:
|
||||
if #available(macOS 11.0, iOS 14.0, tvOS 14.0, *) {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2100_PQ)
|
||||
} else if #available(macOS 10.15.4, iOS 13.4, tvOS 13.4, *) {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2020_PQ)
|
||||
} else {
|
||||
return CGColorSpace(name: CGColorSpace.itur_2020_PQ_EOTF)
|
||||
}
|
||||
default:
|
||||
return CGColorSpace(name: CGColorSpace.sRGB)
|
||||
}
|
||||
}
|
||||
|
||||
static func pixelFormat(planeCount: Int, bitDepth: Int32) -> [MTLPixelFormat] {
|
||||
if planeCount == 3 {
|
||||
if bitDepth > 8 {
|
||||
return [.r16Unorm, .r16Unorm, .r16Unorm]
|
||||
} else {
|
||||
return [.r8Unorm, .r8Unorm, .r8Unorm]
|
||||
}
|
||||
} else if planeCount == 2 {
|
||||
if bitDepth > 8 {
|
||||
return [.r16Unorm, .rg16Unorm]
|
||||
} else {
|
||||
return [.r8Unorm, .rg8Unorm]
|
||||
}
|
||||
} else {
|
||||
return [colorPixelFormat(bitDepth: bitDepth)]
|
||||
}
|
||||
}
|
||||
|
||||
static func colorPixelFormat(bitDepth: Int32) -> MTLPixelFormat {
|
||||
if bitDepth == 10 {
|
||||
return .bgr10a2Unorm
|
||||
} else {
|
||||
return .bgra8Unorm
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum MECodecState {
|
||||
case idle
|
||||
case decoding
|
||||
case flush
|
||||
case closed
|
||||
case failed
|
||||
case finished
|
||||
}
|
||||
|
||||
public struct Timebase {
|
||||
static let defaultValue = Timebase(num: 1, den: 1)
|
||||
public let num: Int32
|
||||
public let den: Int32
|
||||
func getPosition(from seconds: TimeInterval) -> Int64 { Int64(seconds * TimeInterval(den) / TimeInterval(num)) }
|
||||
|
||||
func cmtime(for timestamp: Int64) -> CMTime { CMTime(value: timestamp * Int64(num), timescale: den) }
|
||||
}
|
||||
|
||||
extension Timebase {
|
||||
public var rational: AVRational { AVRational(num: num, den: den) }
|
||||
|
||||
init(_ rational: AVRational) {
|
||||
num = rational.num
|
||||
den = rational.den
|
||||
}
|
||||
}
|
||||
|
||||
final class Packet: ObjectQueueItem {
|
||||
var duration: Int64 = 0
|
||||
var timestamp: Int64 = 0
|
||||
var position: Int64 = 0
|
||||
var size: Int32 = 0
|
||||
private(set) var corePacket = av_packet_alloc()
|
||||
var timebase: Timebase {
|
||||
assetTrack.timebase
|
||||
}
|
||||
|
||||
var isKeyFrame: Bool {
|
||||
if let corePacket {
|
||||
return corePacket.pointee.flags & AV_PKT_FLAG_KEY == AV_PKT_FLAG_KEY
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var assetTrack: FFmpegAssetTrack! {
|
||||
didSet {
|
||||
guard let packet = corePacket?.pointee else {
|
||||
return
|
||||
}
|
||||
timestamp = packet.pts == Int64.min ? packet.dts : packet.pts
|
||||
position = packet.pos
|
||||
duration = packet.duration
|
||||
size = packet.size
|
||||
}
|
||||
}
|
||||
|
||||
deinit {
|
||||
av_packet_unref(corePacket)
|
||||
av_packet_free(&corePacket)
|
||||
}
|
||||
}
|
||||
|
||||
final class SubtitleFrame: MEFrame {
|
||||
var timestamp: Int64 = 0
|
||||
var timebase: Timebase
|
||||
var duration: Int64 = 0
|
||||
var position: Int64 = 0
|
||||
var size: Int32 = 0
|
||||
let part: SubtitlePart
|
||||
init(part: SubtitlePart, timebase: Timebase) {
|
||||
self.part = part
|
||||
self.timebase = timebase
|
||||
}
|
||||
}
|
||||
|
||||
public final class AudioFrame: MEFrame {
|
||||
public let dataSize: Int
|
||||
public let audioFormat: AVAudioFormat
|
||||
public internal(set) var timebase = Timebase.defaultValue
|
||||
public var timestamp: Int64 = 0
|
||||
public var duration: Int64 = 0
|
||||
public var position: Int64 = 0
|
||||
public var size: Int32 = 0
|
||||
public var data: [UnsafeMutablePointer<UInt8>?]
|
||||
public var numberOfSamples: UInt32 = 0
|
||||
public init(dataSize: Int, audioFormat: AVAudioFormat) {
|
||||
self.dataSize = dataSize
|
||||
self.audioFormat = audioFormat
|
||||
let count = audioFormat.isInterleaved ? 1 : audioFormat.channelCount
|
||||
data = (0 ..< count).map { _ in
|
||||
UnsafeMutablePointer<UInt8>.allocate(capacity: dataSize)
|
||||
}
|
||||
}
|
||||
|
||||
init(array: [AudioFrame]) {
|
||||
audioFormat = array[0].audioFormat
|
||||
timebase = array[0].timebase
|
||||
timestamp = array[0].timestamp
|
||||
position = array[0].position
|
||||
var dataSize = 0
|
||||
for frame in array {
|
||||
duration += frame.duration
|
||||
dataSize += frame.dataSize
|
||||
size += frame.size
|
||||
numberOfSamples += frame.numberOfSamples
|
||||
}
|
||||
self.dataSize = dataSize
|
||||
let count = audioFormat.isInterleaved ? 1 : audioFormat.channelCount
|
||||
data = (0 ..< count).map { _ in
|
||||
UnsafeMutablePointer<UInt8>.allocate(capacity: dataSize)
|
||||
}
|
||||
var offset = 0
|
||||
for frame in array {
|
||||
for i in 0 ..< data.count {
|
||||
data[i]?.advanced(by: offset).initialize(from: frame.data[i]!, count: frame.dataSize)
|
||||
}
|
||||
offset += frame.dataSize
|
||||
}
|
||||
}
|
||||
|
||||
deinit {
|
||||
for i in 0 ..< data.count {
|
||||
data[i]?.deinitialize(count: dataSize)
|
||||
data[i]?.deallocate()
|
||||
}
|
||||
data.removeAll()
|
||||
}
|
||||
|
||||
public func toFloat() -> [ContiguousArray<Float>] {
|
||||
var array = [ContiguousArray<Float>]()
|
||||
for i in 0 ..< data.count {
|
||||
switch audioFormat.commonFormat {
|
||||
case .pcmFormatInt16:
|
||||
let capacity = dataSize / MemoryLayout<Int16>.size
|
||||
data[i]?.withMemoryRebound(to: Int16.self, capacity: capacity) { src in
|
||||
var des = ContiguousArray<Float>(repeating: 0, count: Int(capacity))
|
||||
for j in 0 ..< capacity {
|
||||
des[j] = max(-1.0, min(Float(src[j]) / 32767.0, 1.0))
|
||||
}
|
||||
array.append(des)
|
||||
}
|
||||
case .pcmFormatInt32:
|
||||
let capacity = dataSize / MemoryLayout<Int32>.size
|
||||
data[i]?.withMemoryRebound(to: Int32.self, capacity: capacity) { src in
|
||||
var des = ContiguousArray<Float>(repeating: 0, count: Int(capacity))
|
||||
for j in 0 ..< capacity {
|
||||
des[j] = max(-1.0, min(Float(src[j]) / 2_147_483_647.0, 1.0))
|
||||
}
|
||||
array.append(des)
|
||||
}
|
||||
default:
|
||||
let capacity = dataSize / MemoryLayout<Float>.size
|
||||
data[i]?.withMemoryRebound(to: Float.self, capacity: capacity) { src in
|
||||
var des = ContiguousArray<Float>(repeating: 0, count: Int(capacity))
|
||||
for j in 0 ..< capacity {
|
||||
des[j] = src[j]
|
||||
}
|
||||
array.append(ContiguousArray<Float>(des))
|
||||
}
|
||||
}
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
public func toPCMBuffer() -> AVAudioPCMBuffer? {
|
||||
guard let pcmBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: numberOfSamples) else {
|
||||
return nil
|
||||
}
|
||||
pcmBuffer.frameLength = pcmBuffer.frameCapacity
|
||||
for i in 0 ..< min(Int(pcmBuffer.format.channelCount), data.count) {
|
||||
switch audioFormat.commonFormat {
|
||||
case .pcmFormatInt16:
|
||||
let capacity = dataSize / MemoryLayout<Int16>.size
|
||||
data[i]?.withMemoryRebound(to: Int16.self, capacity: capacity) { src in
|
||||
pcmBuffer.int16ChannelData?[i].update(from: src, count: capacity)
|
||||
}
|
||||
case .pcmFormatInt32:
|
||||
let capacity = dataSize / MemoryLayout<Int32>.size
|
||||
data[i]?.withMemoryRebound(to: Int32.self, capacity: capacity) { src in
|
||||
pcmBuffer.int32ChannelData?[i].update(from: src, count: capacity)
|
||||
}
|
||||
default:
|
||||
let capacity = dataSize / MemoryLayout<Float>.size
|
||||
data[i]?.withMemoryRebound(to: Float.self, capacity: capacity) { src in
|
||||
pcmBuffer.floatChannelData?[i].update(from: src, count: capacity)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pcmBuffer
|
||||
}
|
||||
|
||||
public func toCMSampleBuffer() -> CMSampleBuffer? {
|
||||
var outBlockListBuffer: CMBlockBuffer?
|
||||
CMBlockBufferCreateEmpty(allocator: kCFAllocatorDefault, capacity: UInt32(data.count), flags: 0, blockBufferOut: &outBlockListBuffer)
|
||||
guard let outBlockListBuffer else {
|
||||
return nil
|
||||
}
|
||||
let sampleSize = Int(audioFormat.sampleSize)
|
||||
let sampleCount = CMItemCount(numberOfSamples)
|
||||
let dataByteSize = sampleCount * sampleSize
|
||||
if dataByteSize > dataSize {
|
||||
assertionFailure("dataByteSize: \(dataByteSize),render.dataSize: \(dataSize)")
|
||||
}
|
||||
for i in 0 ..< data.count {
|
||||
var outBlockBuffer: CMBlockBuffer?
|
||||
CMBlockBufferCreateWithMemoryBlock(
|
||||
allocator: kCFAllocatorDefault,
|
||||
memoryBlock: nil,
|
||||
blockLength: dataByteSize,
|
||||
blockAllocator: kCFAllocatorDefault,
|
||||
customBlockSource: nil,
|
||||
offsetToData: 0,
|
||||
dataLength: dataByteSize,
|
||||
flags: kCMBlockBufferAssureMemoryNowFlag,
|
||||
blockBufferOut: &outBlockBuffer
|
||||
)
|
||||
if let outBlockBuffer {
|
||||
CMBlockBufferReplaceDataBytes(
|
||||
with: data[i]!,
|
||||
blockBuffer: outBlockBuffer,
|
||||
offsetIntoDestination: 0,
|
||||
dataLength: dataByteSize
|
||||
)
|
||||
CMBlockBufferAppendBufferReference(
|
||||
outBlockListBuffer,
|
||||
targetBBuf: outBlockBuffer,
|
||||
offsetToData: 0,
|
||||
dataLength: CMBlockBufferGetDataLength(outBlockBuffer),
|
||||
flags: 0
|
||||
)
|
||||
}
|
||||
}
|
||||
var sampleBuffer: CMSampleBuffer?
|
||||
// 因为sampleRate跟timescale没有对齐,所以导致杂音。所以要让duration为invalid
|
||||
// let duration = CMTime(value: CMTimeValue(sampleCount), timescale: CMTimeScale(audioFormat.sampleRate))
|
||||
let duration = CMTime.invalid
|
||||
let timing = CMSampleTimingInfo(duration: duration, presentationTimeStamp: cmtime, decodeTimeStamp: .invalid)
|
||||
let sampleSizeEntryCount: CMItemCount
|
||||
let sampleSizeArray: [Int]?
|
||||
if audioFormat.isInterleaved {
|
||||
sampleSizeEntryCount = 1
|
||||
sampleSizeArray = [sampleSize]
|
||||
} else {
|
||||
sampleSizeEntryCount = 0
|
||||
sampleSizeArray = nil
|
||||
}
|
||||
CMSampleBufferCreateReady(allocator: kCFAllocatorDefault, dataBuffer: outBlockListBuffer, formatDescription: audioFormat.formatDescription, sampleCount: sampleCount, sampleTimingEntryCount: 1, sampleTimingArray: [timing], sampleSizeEntryCount: sampleSizeEntryCount, sampleSizeArray: sampleSizeArray, sampleBufferOut: &sampleBuffer)
|
||||
return sampleBuffer
|
||||
}
|
||||
}
|
||||
|
||||
public final class VideoVTBFrame: MEFrame {
|
||||
public var timebase = Timebase.defaultValue
|
||||
// 交叉视频的duration会不准,直接减半了
|
||||
public var duration: Int64 = 0
|
||||
public var position: Int64 = 0
|
||||
public var timestamp: Int64 = 0
|
||||
public var size: Int32 = 0
|
||||
public let fps: Float
|
||||
public let isDovi: Bool
|
||||
public var edrMetaData: EDRMetaData? = nil
|
||||
var corePixelBuffer: PixelBufferProtocol?
|
||||
init(fps: Float, isDovi: Bool) {
|
||||
self.fps = fps
|
||||
self.isDovi = isDovi
|
||||
}
|
||||
}
|
||||
|
||||
extension VideoVTBFrame {
|
||||
#if !os(tvOS)
|
||||
@available(iOS 16, *)
|
||||
var edrMetadata: CAEDRMetadata? {
|
||||
if var contentData = edrMetaData?.contentData, var displayData = edrMetaData?.displayData {
|
||||
let data = Data(bytes: &displayData, count: MemoryLayout<MasteringDisplayMetadata>.stride)
|
||||
let data2 = Data(bytes: &contentData, count: MemoryLayout<ContentLightMetadata>.stride)
|
||||
return CAEDRMetadata.hdr10(displayInfo: data, contentInfo: data2, opticalOutputScale: 10000)
|
||||
}
|
||||
if var ambientViewingEnvironment = edrMetaData?.ambientViewingEnvironment {
|
||||
let data = Data(bytes: &ambientViewingEnvironment, count: MemoryLayout<AmbientViewingEnvironment>.stride)
|
||||
if #available(macOS 14.0, iOS 17.0, *) {
|
||||
return CAEDRMetadata.hlg(ambientViewingEnvironment: data)
|
||||
} else {
|
||||
return CAEDRMetadata.hlg
|
||||
}
|
||||
}
|
||||
if corePixelBuffer?.transferFunction == kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ {
|
||||
return CAEDRMetadata.hdr10(minLuminance: 0.1, maxLuminance: 1000, opticalOutputScale: 10000)
|
||||
} else if corePixelBuffer?.transferFunction == kCVImageBufferTransferFunction_ITU_R_2100_HLG {
|
||||
return CAEDRMetadata.hlg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
public struct EDRMetaData {
|
||||
var displayData: MasteringDisplayMetadata?
|
||||
var contentData: ContentLightMetadata?
|
||||
var ambientViewingEnvironment: AmbientViewingEnvironment?
|
||||
}
|
||||
|
||||
public struct MasteringDisplayMetadata {
|
||||
let display_primaries_r_x: UInt16
|
||||
let display_primaries_r_y: UInt16
|
||||
let display_primaries_g_x: UInt16
|
||||
let display_primaries_g_y: UInt16
|
||||
let display_primaries_b_x: UInt16
|
||||
let display_primaries_b_y: UInt16
|
||||
let white_point_x: UInt16
|
||||
let white_point_y: UInt16
|
||||
let minLuminance: UInt32
|
||||
let maxLuminance: UInt32
|
||||
}
|
||||
|
||||
public struct ContentLightMetadata {
|
||||
let MaxCLL: UInt16
|
||||
let MaxFALL: UInt16
|
||||
}
|
||||
|
||||
// https://developer.apple.com/documentation/technotes/tn3145-hdr-video-metadata
|
||||
public struct AmbientViewingEnvironment {
|
||||
let ambient_illuminance: UInt32
|
||||
let ambient_light_x: UInt16
|
||||
let ambient_light_y: UInt16
|
||||
}
|
||||
384
KSPlayer-main/Sources/KSPlayer/MEPlayer/Resample.swift
Normal file
384
KSPlayer-main/Sources/KSPlayer/MEPlayer/Resample.swift
Normal file
@@ -0,0 +1,384 @@
|
||||
//
|
||||
// Resample.swift
|
||||
// KSPlayer-iOS
|
||||
//
|
||||
// Created by kintan on 2020/1/27.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import CoreGraphics
|
||||
import CoreMedia
|
||||
import Libavcodec
|
||||
import Libswresample
|
||||
import Libswscale
|
||||
|
||||
protocol FrameTransfer {
|
||||
func transfer(avframe: UnsafeMutablePointer<AVFrame>) -> UnsafeMutablePointer<AVFrame>
|
||||
func shutdown()
|
||||
}
|
||||
|
||||
protocol FrameChange {
|
||||
func change(avframe: UnsafeMutablePointer<AVFrame>) throws -> MEFrame
|
||||
func shutdown()
|
||||
}
|
||||
|
||||
class VideoSwscale: FrameTransfer {
|
||||
private var imgConvertCtx: OpaquePointer?
|
||||
private var format: AVPixelFormat = AV_PIX_FMT_NONE
|
||||
private var height: Int32 = 0
|
||||
private var width: Int32 = 0
|
||||
private var outFrame: UnsafeMutablePointer<AVFrame>?
|
||||
private func setup(format: AVPixelFormat, width: Int32, height: Int32, linesize _: Int32) {
|
||||
if self.format == format, self.width == width, self.height == height {
|
||||
return
|
||||
}
|
||||
self.format = format
|
||||
self.height = height
|
||||
self.width = width
|
||||
if format.osType() != nil {
|
||||
sws_freeContext(imgConvertCtx)
|
||||
imgConvertCtx = nil
|
||||
outFrame = nil
|
||||
} else {
|
||||
let dstFormat = format.bestPixelFormat
|
||||
imgConvertCtx = sws_getCachedContext(imgConvertCtx, width, height, self.format, width, height, dstFormat, SWS_BICUBIC, nil, nil, nil)
|
||||
outFrame = av_frame_alloc()
|
||||
outFrame?.pointee.format = dstFormat.rawValue
|
||||
outFrame?.pointee.width = width
|
||||
outFrame?.pointee.height = height
|
||||
}
|
||||
}
|
||||
|
||||
func transfer(avframe: UnsafeMutablePointer<AVFrame>) -> UnsafeMutablePointer<AVFrame> {
|
||||
setup(format: AVPixelFormat(rawValue: avframe.pointee.format), width: avframe.pointee.width, height: avframe.pointee.height, linesize: avframe.pointee.linesize.0)
|
||||
if let imgConvertCtx, let outFrame {
|
||||
sws_scale_frame(imgConvertCtx, outFrame, avframe)
|
||||
return outFrame
|
||||
}
|
||||
return avframe
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
sws_freeContext(imgConvertCtx)
|
||||
imgConvertCtx = nil
|
||||
}
|
||||
}
|
||||
|
||||
class VideoSwresample: FrameChange {
|
||||
private var imgConvertCtx: OpaquePointer?
|
||||
private var format: AVPixelFormat = AV_PIX_FMT_NONE
|
||||
private var height: Int32 = 0
|
||||
private var width: Int32 = 0
|
||||
private var pool: CVPixelBufferPool?
|
||||
private var dstHeight: Int32?
|
||||
private var dstWidth: Int32?
|
||||
private let dstFormat: AVPixelFormat?
|
||||
private let fps: Float
|
||||
private let isDovi: Bool
|
||||
init(dstWidth: Int32? = nil, dstHeight: Int32? = nil, dstFormat: AVPixelFormat? = nil, fps: Float = 60, isDovi: Bool) {
|
||||
self.dstWidth = dstWidth
|
||||
self.dstHeight = dstHeight
|
||||
self.dstFormat = dstFormat
|
||||
self.fps = fps
|
||||
self.isDovi = isDovi
|
||||
}
|
||||
|
||||
func change(avframe: UnsafeMutablePointer<AVFrame>) throws -> MEFrame {
|
||||
let frame = VideoVTBFrame(fps: fps, isDovi: isDovi)
|
||||
if avframe.pointee.format == AV_PIX_FMT_VIDEOTOOLBOX.rawValue {
|
||||
frame.corePixelBuffer = unsafeBitCast(avframe.pointee.data.3, to: CVPixelBuffer.self)
|
||||
} else {
|
||||
frame.corePixelBuffer = transfer(frame: avframe.pointee)
|
||||
}
|
||||
return frame
|
||||
}
|
||||
|
||||
private func setup(format: AVPixelFormat, width: Int32, height: Int32, linesize: Int32) {
|
||||
if self.format == format, self.width == width, self.height == height {
|
||||
return
|
||||
}
|
||||
self.format = format
|
||||
self.height = height
|
||||
self.width = width
|
||||
let dstWidth = dstWidth ?? width
|
||||
let dstHeight = dstHeight ?? height
|
||||
let pixelFormatType: OSType
|
||||
if self.dstWidth == nil, self.dstHeight == nil, dstFormat == nil, let osType = format.osType() {
|
||||
pixelFormatType = osType
|
||||
sws_freeContext(imgConvertCtx)
|
||||
imgConvertCtx = nil
|
||||
} else {
|
||||
let dstFormat = dstFormat ?? format.bestPixelFormat
|
||||
pixelFormatType = dstFormat.osType()!
|
||||
// imgConvertCtx = sws_getContext(width, height, self.format, width, height, dstFormat, SWS_FAST_BILINEAR, nil, nil, nil)
|
||||
// AV_PIX_FMT_VIDEOTOOLBOX格式是无法进行swscale的
|
||||
imgConvertCtx = sws_getCachedContext(imgConvertCtx, width, height, self.format, dstWidth, dstHeight, dstFormat, SWS_FAST_BILINEAR, nil, nil, nil)
|
||||
}
|
||||
pool = CVPixelBufferPool.create(width: dstWidth, height: dstHeight, bytesPerRowAlignment: linesize, pixelFormatType: pixelFormatType)
|
||||
}
|
||||
|
||||
func transfer(frame: AVFrame) -> PixelBufferProtocol? {
|
||||
let format = AVPixelFormat(rawValue: frame.format)
|
||||
let width = frame.width
|
||||
let height = frame.height
|
||||
if format.leftShift > 0 {
|
||||
return PixelBuffer(frame: frame)
|
||||
}
|
||||
let pbuf = transfer(format: format, width: width, height: height, data: Array(tuple: frame.data), linesize: Array(tuple: frame.linesize))
|
||||
if let pbuf {
|
||||
pbuf.aspectRatio = frame.sample_aspect_ratio.size
|
||||
pbuf.yCbCrMatrix = frame.colorspace.ycbcrMatrix
|
||||
pbuf.colorPrimaries = frame.color_primaries.colorPrimaries
|
||||
pbuf.transferFunction = frame.color_trc.transferFunction
|
||||
// vt_pixbuf_set_colorspace
|
||||
if pbuf.transferFunction == kCVImageBufferTransferFunction_UseGamma {
|
||||
let gamma = NSNumber(value: frame.color_trc == AVCOL_TRC_GAMMA22 ? 2.2 : 2.8)
|
||||
CVBufferSetAttachment(pbuf, kCVImageBufferGammaLevelKey, gamma, .shouldPropagate)
|
||||
}
|
||||
if let chroma = frame.chroma_location.chroma {
|
||||
CVBufferSetAttachment(pbuf, kCVImageBufferChromaLocationTopFieldKey, chroma, .shouldPropagate)
|
||||
}
|
||||
pbuf.colorspace = KSOptions.colorSpace(ycbcrMatrix: pbuf.yCbCrMatrix, transferFunction: pbuf.transferFunction)
|
||||
}
|
||||
return pbuf
|
||||
}
|
||||
|
||||
func transfer(format: AVPixelFormat, width: Int32, height: Int32, data: [UnsafeMutablePointer<UInt8>?], linesize: [Int32]) -> CVPixelBuffer? {
|
||||
setup(format: format, width: width, height: height, linesize: linesize[1] == 0 ? linesize[0] : linesize[1])
|
||||
guard let pool else {
|
||||
return nil
|
||||
}
|
||||
return autoreleasepool {
|
||||
var pbuf: CVPixelBuffer?
|
||||
let ret = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pool, &pbuf)
|
||||
guard let pbuf, ret == kCVReturnSuccess else {
|
||||
return nil
|
||||
}
|
||||
CVPixelBufferLockBaseAddress(pbuf, CVPixelBufferLockFlags(rawValue: 0))
|
||||
let bufferPlaneCount = pbuf.planeCount
|
||||
if let imgConvertCtx {
|
||||
let bytesPerRow = (0 ..< bufferPlaneCount).map { i in
|
||||
Int32(CVPixelBufferGetBytesPerRowOfPlane(pbuf, i))
|
||||
}
|
||||
let contents = (0 ..< bufferPlaneCount).map { i in
|
||||
pbuf.baseAddressOfPlane(at: i)?.assumingMemoryBound(to: UInt8.self)
|
||||
}
|
||||
_ = sws_scale(imgConvertCtx, data.map { UnsafePointer($0) }, linesize, 0, height, contents, bytesPerRow)
|
||||
} else {
|
||||
let planeCount = format.planeCount
|
||||
let byteCount = format.bitDepth > 8 ? 2 : 1
|
||||
for i in 0 ..< bufferPlaneCount {
|
||||
let height = pbuf.heightOfPlane(at: i)
|
||||
let size = Int(linesize[i])
|
||||
let bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pbuf, i)
|
||||
var contents = pbuf.baseAddressOfPlane(at: i)
|
||||
var source = data[i]!
|
||||
if bufferPlaneCount < planeCount, i + 2 == planeCount {
|
||||
var sourceU = data[i]!
|
||||
var sourceV = data[i + 1]!
|
||||
var k = 0
|
||||
while k < height {
|
||||
var j = 0
|
||||
while j < size {
|
||||
contents?.advanced(by: 2 * j).copyMemory(from: sourceU.advanced(by: j), byteCount: byteCount)
|
||||
contents?.advanced(by: 2 * j + byteCount).copyMemory(from: sourceV.advanced(by: j), byteCount: byteCount)
|
||||
j += byteCount
|
||||
}
|
||||
contents = contents?.advanced(by: bytesPerRow)
|
||||
sourceU = sourceU.advanced(by: size)
|
||||
sourceV = sourceV.advanced(by: size)
|
||||
k += 1
|
||||
}
|
||||
} else if bytesPerRow == size {
|
||||
contents?.copyMemory(from: source, byteCount: height * size)
|
||||
} else {
|
||||
var j = 0
|
||||
while j < height {
|
||||
contents?.advanced(by: j * bytesPerRow).copyMemory(from: source.advanced(by: j * size), byteCount: size)
|
||||
j += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CVPixelBufferUnlockBaseAddress(pbuf, CVPixelBufferLockFlags(rawValue: 0))
|
||||
return pbuf
|
||||
}
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
sws_freeContext(imgConvertCtx)
|
||||
imgConvertCtx = nil
|
||||
}
|
||||
}
|
||||
|
||||
extension BinaryInteger {
|
||||
func alignment(value: Self) -> Self {
|
||||
let remainder = self % value
|
||||
return remainder == 0 ? self : self + value - remainder
|
||||
}
|
||||
}
|
||||
|
||||
typealias SwrContext = OpaquePointer
|
||||
|
||||
class AudioSwresample: FrameChange {
|
||||
private var swrContext: SwrContext?
|
||||
private var descriptor: AudioDescriptor
|
||||
private var outChannel: AVChannelLayout
|
||||
init(audioDescriptor: AudioDescriptor) {
|
||||
descriptor = audioDescriptor
|
||||
outChannel = audioDescriptor.outChannel
|
||||
_ = setup(descriptor: descriptor)
|
||||
}
|
||||
|
||||
private func setup(descriptor: AudioDescriptor) -> Bool {
|
||||
var result = swr_alloc_set_opts2(&swrContext, &descriptor.outChannel, descriptor.audioFormat.sampleFormat, Int32(descriptor.audioFormat.sampleRate), &descriptor.channel, descriptor.sampleFormat, descriptor.sampleRate, 0, nil)
|
||||
result = swr_init(swrContext)
|
||||
if result < 0 {
|
||||
shutdown()
|
||||
return false
|
||||
} else {
|
||||
outChannel = descriptor.outChannel
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func change(avframe: UnsafeMutablePointer<AVFrame>) throws -> MEFrame {
|
||||
if !(descriptor == avframe.pointee) || outChannel != descriptor.outChannel {
|
||||
let newDescriptor = AudioDescriptor(frame: avframe.pointee)
|
||||
if setup(descriptor: newDescriptor) {
|
||||
descriptor = newDescriptor
|
||||
} else {
|
||||
throw NSError(errorCode: .auidoSwrInit, userInfo: ["outChannel": newDescriptor.outChannel, "inChannel": newDescriptor.channel])
|
||||
}
|
||||
}
|
||||
let numberOfSamples = avframe.pointee.nb_samples
|
||||
let outSamples = swr_get_out_samples(swrContext, numberOfSamples)
|
||||
var frameBuffer = Array(tuple: avframe.pointee.data).map { UnsafePointer<UInt8>($0) }
|
||||
let channels = descriptor.outChannel.nb_channels
|
||||
var bufferSize = [Int32(0)]
|
||||
// 返回值是有乘以声道,所以不用返回值
|
||||
_ = av_samples_get_buffer_size(&bufferSize, channels, outSamples, descriptor.audioFormat.sampleFormat, 1)
|
||||
let frame = AudioFrame(dataSize: Int(bufferSize[0]), audioFormat: descriptor.audioFormat)
|
||||
frame.numberOfSamples = UInt32(swr_convert(swrContext, &frame.data, outSamples, &frameBuffer, numberOfSamples))
|
||||
return frame
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
swr_free(&swrContext)
|
||||
}
|
||||
}
|
||||
|
||||
public class AudioDescriptor: Equatable {
|
||||
// static let defaultValue = AudioDescriptor()
|
||||
public let sampleRate: Int32
|
||||
public private(set) var audioFormat: AVAudioFormat
|
||||
fileprivate(set) var channel: AVChannelLayout
|
||||
fileprivate let sampleFormat: AVSampleFormat
|
||||
fileprivate var outChannel: AVChannelLayout
|
||||
|
||||
private convenience init() {
|
||||
self.init(sampleFormat: AV_SAMPLE_FMT_FLT, sampleRate: 48000, channel: AVChannelLayout.defaultValue)
|
||||
}
|
||||
|
||||
convenience init(codecpar: AVCodecParameters) {
|
||||
self.init(sampleFormat: AVSampleFormat(rawValue: codecpar.format), sampleRate: codecpar.sample_rate, channel: codecpar.ch_layout)
|
||||
}
|
||||
|
||||
convenience init(frame: AVFrame) {
|
||||
self.init(sampleFormat: AVSampleFormat(rawValue: frame.format), sampleRate: frame.sample_rate, channel: frame.ch_layout)
|
||||
}
|
||||
|
||||
init(sampleFormat: AVSampleFormat, sampleRate: Int32, channel: AVChannelLayout) {
|
||||
self.channel = channel
|
||||
outChannel = channel
|
||||
if sampleRate <= 0 {
|
||||
self.sampleRate = 48000
|
||||
} else {
|
||||
self.sampleRate = sampleRate
|
||||
}
|
||||
self.sampleFormat = sampleFormat
|
||||
#if os(macOS)
|
||||
let channelCount = AVAudioChannelCount(2)
|
||||
#else
|
||||
let channelCount = KSOptions.outputNumberOfChannels(channelCount: AVAudioChannelCount(outChannel.nb_channels))
|
||||
#endif
|
||||
audioFormat = AudioDescriptor.audioFormat(sampleFormat: sampleFormat, sampleRate: self.sampleRate, outChannel: &outChannel, channelCount: channelCount)
|
||||
}
|
||||
|
||||
public static func == (lhs: AudioDescriptor, rhs: AudioDescriptor) -> Bool {
|
||||
lhs.sampleFormat == rhs.sampleFormat && lhs.sampleRate == rhs.sampleRate && lhs.channel == rhs.channel
|
||||
}
|
||||
|
||||
public static func == (lhs: AudioDescriptor, rhs: AVFrame) -> Bool {
|
||||
var sampleRate = rhs.sample_rate
|
||||
if sampleRate <= 0 {
|
||||
sampleRate = 48000
|
||||
}
|
||||
return lhs.sampleFormat == AVSampleFormat(rawValue: rhs.format) && lhs.sampleRate == sampleRate && lhs.channel == rhs.ch_layout
|
||||
}
|
||||
|
||||
static func audioFormat(sampleFormat: AVSampleFormat, sampleRate: Int32, outChannel: inout AVChannelLayout, channelCount: AVAudioChannelCount) -> AVAudioFormat {
|
||||
if channelCount != AVAudioChannelCount(outChannel.nb_channels) {
|
||||
av_channel_layout_default(&outChannel, Int32(channelCount))
|
||||
}
|
||||
let layoutTag: AudioChannelLayoutTag
|
||||
if let tag = outChannel.layoutTag {
|
||||
layoutTag = tag
|
||||
} else {
|
||||
av_channel_layout_default(&outChannel, Int32(channelCount))
|
||||
if let tag = outChannel.layoutTag {
|
||||
layoutTag = tag
|
||||
} else {
|
||||
av_channel_layout_default(&outChannel, 2)
|
||||
layoutTag = outChannel.layoutTag!
|
||||
}
|
||||
}
|
||||
KSLog("[audio] out channelLayout: \(outChannel)")
|
||||
var commonFormat: AVAudioCommonFormat
|
||||
var interleaved: Bool
|
||||
switch sampleFormat {
|
||||
case AV_SAMPLE_FMT_S16:
|
||||
commonFormat = .pcmFormatInt16
|
||||
interleaved = true
|
||||
case AV_SAMPLE_FMT_S32:
|
||||
commonFormat = .pcmFormatInt32
|
||||
interleaved = true
|
||||
case AV_SAMPLE_FMT_FLT:
|
||||
commonFormat = .pcmFormatFloat32
|
||||
interleaved = true
|
||||
case AV_SAMPLE_FMT_DBL:
|
||||
commonFormat = .pcmFormatFloat64
|
||||
interleaved = true
|
||||
case AV_SAMPLE_FMT_S16P:
|
||||
commonFormat = .pcmFormatInt16
|
||||
interleaved = false
|
||||
case AV_SAMPLE_FMT_S32P:
|
||||
commonFormat = .pcmFormatInt32
|
||||
interleaved = false
|
||||
case AV_SAMPLE_FMT_FLTP:
|
||||
commonFormat = .pcmFormatFloat32
|
||||
interleaved = false
|
||||
case AV_SAMPLE_FMT_DBLP:
|
||||
commonFormat = .pcmFormatFloat64
|
||||
interleaved = false
|
||||
default:
|
||||
commonFormat = .pcmFormatFloat32
|
||||
interleaved = false
|
||||
}
|
||||
interleaved = KSOptions.audioPlayerType == AudioRendererPlayer.self
|
||||
if !(KSOptions.audioPlayerType == AudioRendererPlayer.self || KSOptions.audioPlayerType == AudioUnitPlayer.self) {
|
||||
commonFormat = .pcmFormatFloat32
|
||||
}
|
||||
return AVAudioFormat(commonFormat: commonFormat, sampleRate: Double(sampleRate), interleaved: interleaved, channelLayout: AVAudioChannelLayout(layoutTag: layoutTag)!)
|
||||
// AVAudioChannelLayout(layout: outChannel.layoutTag.channelLayout)
|
||||
}
|
||||
|
||||
public func updateAudioFormat() {
|
||||
#if os(macOS)
|
||||
let channelCount = AVAudioChannelCount(2)
|
||||
#else
|
||||
let channelCount = KSOptions.outputNumberOfChannels(channelCount: AVAudioChannelCount(channel.nb_channels))
|
||||
#endif
|
||||
audioFormat = AudioDescriptor.audioFormat(sampleFormat: sampleFormat, sampleRate: sampleRate, outChannel: &outChannel, channelCount: channelCount)
|
||||
}
|
||||
}
|
||||
136
KSPlayer-main/Sources/KSPlayer/MEPlayer/SubtitleDecode.swift
Normal file
136
KSPlayer-main/Sources/KSPlayer/MEPlayer/SubtitleDecode.swift
Normal file
@@ -0,0 +1,136 @@
|
||||
//
|
||||
// SubtitleDecode.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/11.
|
||||
//
|
||||
|
||||
import CoreGraphics
|
||||
import Foundation
|
||||
import Libavformat
|
||||
#if canImport(UIKit)
|
||||
import UIKit
|
||||
#else
|
||||
import AppKit
|
||||
#endif
|
||||
class SubtitleDecode: DecodeProtocol {
|
||||
private var codecContext: UnsafeMutablePointer<AVCodecContext>?
|
||||
private let scale = VideoSwresample(dstFormat: AV_PIX_FMT_ARGB, isDovi: false)
|
||||
private var subtitle = AVSubtitle()
|
||||
private var startTime = TimeInterval(0)
|
||||
private let assParse = AssParse()
|
||||
required init(assetTrack: FFmpegAssetTrack, options: KSOptions) {
|
||||
startTime = assetTrack.startTime.seconds
|
||||
do {
|
||||
codecContext = try assetTrack.createContext(options: options)
|
||||
if let pointer = codecContext?.pointee.subtitle_header {
|
||||
let subtitleHeader = String(cString: pointer)
|
||||
_ = assParse.canParse(scanner: Scanner(string: subtitleHeader))
|
||||
}
|
||||
} catch {
|
||||
KSLog(error as CustomStringConvertible)
|
||||
}
|
||||
}
|
||||
|
||||
func decode() {}
|
||||
|
||||
func decodeFrame(from packet: Packet, completionHandler: @escaping (Result<MEFrame, Error>) -> Void) {
|
||||
guard let codecContext else {
|
||||
return
|
||||
}
|
||||
var gotsubtitle = Int32(0)
|
||||
_ = avcodec_decode_subtitle2(codecContext, &subtitle, &gotsubtitle, packet.corePacket)
|
||||
if gotsubtitle == 0 {
|
||||
return
|
||||
}
|
||||
let timestamp = packet.timestamp
|
||||
var start = packet.assetTrack.timebase.cmtime(for: timestamp).seconds + TimeInterval(subtitle.start_display_time) / 1000.0
|
||||
if start >= startTime {
|
||||
start -= startTime
|
||||
}
|
||||
var duration = 0.0
|
||||
if subtitle.end_display_time != UInt32.max {
|
||||
duration = TimeInterval(subtitle.end_display_time - subtitle.start_display_time) / 1000.0
|
||||
}
|
||||
if duration == 0, packet.duration != 0 {
|
||||
duration = packet.assetTrack.timebase.cmtime(for: packet.duration).seconds
|
||||
}
|
||||
var parts = text(subtitle: subtitle)
|
||||
/// 不用preSubtitleFrame来进行更新end。而是插入一个空的字幕来更新字幕。
|
||||
/// 因为字幕有可能不按顺序解码。这样就会导致end比start小,然后这个字幕就不会被清空了。
|
||||
if parts.isEmpty {
|
||||
parts.append(SubtitlePart(0, 0, attributedString: nil))
|
||||
}
|
||||
for part in parts {
|
||||
part.start = start
|
||||
if duration == 0 {
|
||||
part.end = .infinity
|
||||
} else {
|
||||
part.end = start + duration
|
||||
}
|
||||
let frame = SubtitleFrame(part: part, timebase: packet.assetTrack.timebase)
|
||||
frame.timestamp = timestamp
|
||||
completionHandler(.success(frame))
|
||||
}
|
||||
avsubtitle_free(&subtitle)
|
||||
}
|
||||
|
||||
func doFlushCodec() {}
|
||||
|
||||
func shutdown() {
|
||||
scale.shutdown()
|
||||
avsubtitle_free(&subtitle)
|
||||
if let codecContext {
|
||||
avcodec_close(codecContext)
|
||||
avcodec_free_context(&self.codecContext)
|
||||
}
|
||||
}
|
||||
|
||||
private func text(subtitle: AVSubtitle) -> [SubtitlePart] {
|
||||
var parts = [SubtitlePart]()
|
||||
var images = [(CGRect, CGImage)]()
|
||||
var origin: CGPoint = .zero
|
||||
var attributedString: NSMutableAttributedString?
|
||||
for i in 0 ..< Int(subtitle.num_rects) {
|
||||
guard let rect = subtitle.rects[i]?.pointee else {
|
||||
continue
|
||||
}
|
||||
if i == 0 {
|
||||
origin = CGPoint(x: Int(rect.x), y: Int(rect.y))
|
||||
}
|
||||
if let text = rect.text {
|
||||
if attributedString == nil {
|
||||
attributedString = NSMutableAttributedString()
|
||||
}
|
||||
attributedString?.append(NSAttributedString(string: String(cString: text)))
|
||||
} else if let ass = rect.ass {
|
||||
let scanner = Scanner(string: String(cString: ass))
|
||||
if let group = assParse.parsePart(scanner: scanner) {
|
||||
parts.append(group)
|
||||
}
|
||||
} else if rect.type == SUBTITLE_BITMAP {
|
||||
if let image = scale.transfer(format: AV_PIX_FMT_PAL8, width: rect.w, height: rect.h, data: Array(tuple: rect.data), linesize: Array(tuple: rect.linesize))?.cgImage() {
|
||||
images.append((CGRect(x: Int(rect.x), y: Int(rect.y), width: Int(rect.w), height: Int(rect.h)), image))
|
||||
}
|
||||
}
|
||||
}
|
||||
if images.count > 0 {
|
||||
let part = SubtitlePart(0, 0, attributedString: nil)
|
||||
if images.count > 1 {
|
||||
origin = .zero
|
||||
}
|
||||
var image: UIImage?
|
||||
// 因为字幕需要有透明度,所以不能用jpg;tif在iOS支持没有那么好,会有绿色背景; 用heic格式,展示的时候会卡主线程;所以最终用png。
|
||||
if let data = CGImage.combine(images: images)?.data(type: .png, quality: 0.2) {
|
||||
image = UIImage(data: data)
|
||||
}
|
||||
part.image = image
|
||||
part.origin = origin
|
||||
parts.append(part)
|
||||
}
|
||||
if let attributedString {
|
||||
parts.append(SubtitlePart(0, 0, attributedString: attributedString))
|
||||
}
|
||||
return parts
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,132 @@
|
||||
//
|
||||
// ThumbnailController.swift
|
||||
//
|
||||
//
|
||||
// Created by kintan on 12/27/23.
|
||||
//
|
||||
|
||||
import AVFoundation
|
||||
import Foundation
|
||||
import Libavcodec
|
||||
import Libavformat
|
||||
#if canImport(UIKit)
|
||||
import UIKit
|
||||
#endif
|
||||
public struct FFThumbnail {
|
||||
public let image: UIImage
|
||||
public let time: TimeInterval
|
||||
}
|
||||
|
||||
public protocol ThumbnailControllerDelegate: AnyObject {
|
||||
func didUpdate(thumbnails: [FFThumbnail], forFile file: URL, withProgress: Int)
|
||||
}
|
||||
|
||||
public class ThumbnailController {
|
||||
public weak var delegate: ThumbnailControllerDelegate?
|
||||
private let thumbnailCount: Int
|
||||
public init(thumbnailCount: Int = 100) {
|
||||
self.thumbnailCount = thumbnailCount
|
||||
}
|
||||
|
||||
public func generateThumbnail(for url: URL, thumbWidth: Int32 = 240) async throws -> [FFThumbnail] {
|
||||
try await Task {
|
||||
try getPeeks(for: url, thumbWidth: thumbWidth)
|
||||
}.value
|
||||
}
|
||||
|
||||
private func getPeeks(for url: URL, thumbWidth: Int32 = 240) throws -> [FFThumbnail] {
|
||||
let urlString: String
|
||||
if url.isFileURL {
|
||||
urlString = url.path
|
||||
} else {
|
||||
urlString = url.absoluteString
|
||||
}
|
||||
var thumbnails = [FFThumbnail]()
|
||||
var formatCtx = avformat_alloc_context()
|
||||
defer {
|
||||
avformat_close_input(&formatCtx)
|
||||
}
|
||||
var result = avformat_open_input(&formatCtx, urlString, nil, nil)
|
||||
guard result == 0, let formatCtx else {
|
||||
throw NSError(errorCode: .formatOpenInput, avErrorCode: result)
|
||||
}
|
||||
result = avformat_find_stream_info(formatCtx, nil)
|
||||
guard result == 0 else {
|
||||
throw NSError(errorCode: .formatFindStreamInfo, avErrorCode: result)
|
||||
}
|
||||
var videoStreamIndex = -1
|
||||
for i in 0 ..< Int32(formatCtx.pointee.nb_streams) {
|
||||
if formatCtx.pointee.streams[Int(i)]?.pointee.codecpar.pointee.codec_type == AVMEDIA_TYPE_VIDEO {
|
||||
videoStreamIndex = Int(i)
|
||||
break
|
||||
}
|
||||
}
|
||||
guard videoStreamIndex >= 0, let videoStream = formatCtx.pointee.streams[videoStreamIndex] else {
|
||||
throw NSError(description: "No video stream")
|
||||
}
|
||||
|
||||
let videoAvgFrameRate = videoStream.pointee.avg_frame_rate
|
||||
if videoAvgFrameRate.den == 0 || av_q2d(videoAvgFrameRate) == 0 {
|
||||
throw NSError(description: "Avg frame rate = 0, ignore")
|
||||
}
|
||||
var codecContext = try videoStream.pointee.codecpar.pointee.createContext(options: nil)
|
||||
defer {
|
||||
avcodec_close(codecContext)
|
||||
var codecContext: UnsafeMutablePointer<AVCodecContext>? = codecContext
|
||||
avcodec_free_context(&codecContext)
|
||||
}
|
||||
let thumbHeight = thumbWidth * codecContext.pointee.height / codecContext.pointee.width
|
||||
let reScale = VideoSwresample(dstWidth: thumbWidth, dstHeight: thumbHeight, isDovi: false)
|
||||
// let duration = formatCtx.pointee.duration
|
||||
// 因为是针对视频流来进行seek。所以不能直接取formatCtx的duration
|
||||
let duration = av_rescale_q(formatCtx.pointee.duration,
|
||||
AVRational(num: 1, den: AV_TIME_BASE), videoStream.pointee.time_base)
|
||||
let interval = duration / Int64(thumbnailCount)
|
||||
var packet = AVPacket()
|
||||
let timeBase = Timebase(videoStream.pointee.time_base)
|
||||
var frame = av_frame_alloc()
|
||||
defer {
|
||||
av_frame_free(&frame)
|
||||
}
|
||||
guard let frame else {
|
||||
throw NSError(description: "can not av_frame_alloc")
|
||||
}
|
||||
for i in 0 ..< thumbnailCount {
|
||||
let seek_pos = interval * Int64(i) + videoStream.pointee.start_time
|
||||
avcodec_flush_buffers(codecContext)
|
||||
result = av_seek_frame(formatCtx, Int32(videoStreamIndex), seek_pos, AVSEEK_FLAG_BACKWARD)
|
||||
guard result == 0 else {
|
||||
return thumbnails
|
||||
}
|
||||
avcodec_flush_buffers(codecContext)
|
||||
while av_read_frame(formatCtx, &packet) >= 0 {
|
||||
if packet.stream_index == Int32(videoStreamIndex) {
|
||||
if avcodec_send_packet(codecContext, &packet) < 0 {
|
||||
break
|
||||
}
|
||||
let ret = avcodec_receive_frame(codecContext, frame)
|
||||
if ret < 0 {
|
||||
if ret == -EAGAIN {
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
let image = reScale.transfer(frame: frame.pointee)?.cgImage().map {
|
||||
UIImage(cgImage: $0)
|
||||
}
|
||||
let currentTimeStamp = frame.pointee.best_effort_timestamp
|
||||
if let image {
|
||||
let thumbnail = FFThumbnail(image: image, time: timeBase.cmtime(for: currentTimeStamp).seconds)
|
||||
thumbnails.append(thumbnail)
|
||||
delegate?.didUpdate(thumbnails: thumbnails, forFile: url, withProgress: i)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(&packet)
|
||||
reScale.shutdown()
|
||||
return thumbnails
|
||||
}
|
||||
}
|
||||
216
KSPlayer-main/Sources/KSPlayer/MEPlayer/VideoToolboxDecode.swift
Normal file
216
KSPlayer-main/Sources/KSPlayer/MEPlayer/VideoToolboxDecode.swift
Normal file
@@ -0,0 +1,216 @@
|
||||
//
|
||||
// VideoToolboxDecode.swift
|
||||
// KSPlayer
|
||||
//
|
||||
// Created by kintan on 2018/3/10.
|
||||
//
|
||||
|
||||
import FFmpegKit
|
||||
import Libavformat
|
||||
#if canImport(VideoToolbox)
|
||||
import VideoToolbox
|
||||
|
||||
class VideoToolboxDecode: DecodeProtocol {
|
||||
private var session: DecompressionSession {
|
||||
didSet {
|
||||
VTDecompressionSessionInvalidate(oldValue.decompressionSession)
|
||||
}
|
||||
}
|
||||
|
||||
private let options: KSOptions
|
||||
private var startTime = Int64(0)
|
||||
private var lastPosition = Int64(0)
|
||||
private var needReconfig = false
|
||||
|
||||
init(options: KSOptions, session: DecompressionSession) {
|
||||
self.options = options
|
||||
self.session = session
|
||||
}
|
||||
|
||||
func decodeFrame(from packet: Packet, completionHandler: @escaping (Result<MEFrame, Error>) -> Void) {
|
||||
if needReconfig {
|
||||
// 解决从后台切换到前台,解码失败的问题
|
||||
session = DecompressionSession(assetTrack: session.assetTrack, options: options)!
|
||||
doFlushCodec()
|
||||
needReconfig = false
|
||||
}
|
||||
guard let corePacket = packet.corePacket?.pointee, let data = corePacket.data else {
|
||||
return
|
||||
}
|
||||
do {
|
||||
let sampleBuffer = try session.formatDescription.getSampleBuffer(isConvertNALSize: session.assetTrack.isConvertNALSize, data: data, size: Int(corePacket.size))
|
||||
let flags: VTDecodeFrameFlags = [
|
||||
._EnableAsynchronousDecompression,
|
||||
]
|
||||
var flagOut = VTDecodeInfoFlags.frameDropped
|
||||
let timestamp = packet.timestamp
|
||||
let packetFlags = corePacket.flags
|
||||
let duration = corePacket.duration
|
||||
let size = corePacket.size
|
||||
let status = VTDecompressionSessionDecodeFrame(session.decompressionSession, sampleBuffer: sampleBuffer, flags: flags, infoFlagsOut: &flagOut) { [weak self] status, infoFlags, imageBuffer, _, _ in
|
||||
guard let self, !infoFlags.contains(.frameDropped) else {
|
||||
return
|
||||
}
|
||||
guard status == noErr else {
|
||||
if status == kVTInvalidSessionErr || status == kVTVideoDecoderMalfunctionErr || status == kVTVideoDecoderBadDataErr {
|
||||
if packet.isKeyFrame {
|
||||
completionHandler(.failure(NSError(errorCode: .codecVideoReceiveFrame, avErrorCode: status)))
|
||||
} else {
|
||||
// 解决从后台切换到前台,解码失败的问题
|
||||
self.needReconfig = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
let frame = VideoVTBFrame(fps: session.assetTrack.nominalFrameRate, isDovi: session.assetTrack.dovi != nil)
|
||||
frame.corePixelBuffer = imageBuffer
|
||||
frame.timebase = session.assetTrack.timebase
|
||||
if packet.isKeyFrame, packetFlags & AV_PKT_FLAG_DISCARD != 0, self.lastPosition > 0 {
|
||||
self.startTime = self.lastPosition - timestamp
|
||||
}
|
||||
self.lastPosition = max(self.lastPosition, timestamp)
|
||||
frame.position = packet.position
|
||||
frame.timestamp = self.startTime + timestamp
|
||||
frame.duration = duration
|
||||
frame.size = size
|
||||
self.lastPosition += frame.duration
|
||||
completionHandler(.success(frame))
|
||||
}
|
||||
if status == noErr {
|
||||
if !flags.contains(._EnableAsynchronousDecompression) {
|
||||
VTDecompressionSessionWaitForAsynchronousFrames(session.decompressionSession)
|
||||
}
|
||||
} else if status == kVTInvalidSessionErr || status == kVTVideoDecoderMalfunctionErr || status == kVTVideoDecoderBadDataErr {
|
||||
if packet.isKeyFrame {
|
||||
throw NSError(errorCode: .codecVideoReceiveFrame, avErrorCode: status)
|
||||
} else {
|
||||
// 解决从后台切换到前台,解码失败的问题
|
||||
needReconfig = true
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
completionHandler(.failure(error))
|
||||
}
|
||||
}
|
||||
|
||||
func doFlushCodec() {
|
||||
lastPosition = 0
|
||||
startTime = 0
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
VTDecompressionSessionInvalidate(session.decompressionSession)
|
||||
}
|
||||
|
||||
func decode() {
|
||||
lastPosition = 0
|
||||
startTime = 0
|
||||
}
|
||||
}
|
||||
|
||||
class DecompressionSession {
|
||||
fileprivate let formatDescription: CMFormatDescription
|
||||
fileprivate let decompressionSession: VTDecompressionSession
|
||||
fileprivate var assetTrack: FFmpegAssetTrack
|
||||
init?(assetTrack: FFmpegAssetTrack, options: KSOptions) {
|
||||
self.assetTrack = assetTrack
|
||||
guard let pixelFormatType = assetTrack.pixelFormatType, let formatDescription = assetTrack.formatDescription else {
|
||||
return nil
|
||||
}
|
||||
self.formatDescription = formatDescription
|
||||
#if os(macOS)
|
||||
VTRegisterProfessionalVideoWorkflowVideoDecoders()
|
||||
if #available(macOS 11.0, *) {
|
||||
VTRegisterSupplementalVideoDecoderIfAvailable(formatDescription.mediaSubType.rawValue)
|
||||
}
|
||||
#endif
|
||||
// VTDecompressionSessionCanAcceptFormatDescription(<#T##session: VTDecompressionSession##VTDecompressionSession#>, formatDescription: <#T##CMFormatDescription#>)
|
||||
let attributes: NSMutableDictionary = [
|
||||
kCVPixelBufferPixelFormatTypeKey: pixelFormatType,
|
||||
kCVPixelBufferMetalCompatibilityKey: true,
|
||||
kCVPixelBufferWidthKey: assetTrack.codecpar.width,
|
||||
kCVPixelBufferHeightKey: assetTrack.codecpar.height,
|
||||
kCVPixelBufferIOSurfacePropertiesKey: NSDictionary(),
|
||||
]
|
||||
var session: VTDecompressionSession?
|
||||
// swiftlint:disable line_length
|
||||
let status = VTDecompressionSessionCreate(allocator: kCFAllocatorDefault, formatDescription: formatDescription, decoderSpecification: CMFormatDescriptionGetExtensions(formatDescription), imageBufferAttributes: attributes, outputCallback: nil, decompressionSessionOut: &session)
|
||||
// swiftlint:enable line_length
|
||||
guard status == noErr, let decompressionSession = session else {
|
||||
return nil
|
||||
}
|
||||
if #available(iOS 14.0, tvOS 14.0, macOS 11.0, *) {
|
||||
VTSessionSetProperty(decompressionSession, key: kVTDecompressionPropertyKey_PropagatePerFrameHDRDisplayMetadata,
|
||||
value: kCFBooleanTrue)
|
||||
}
|
||||
if let destinationDynamicRange = options.availableDynamicRange(nil) {
|
||||
let pixelTransferProperties = [kVTPixelTransferPropertyKey_DestinationColorPrimaries: destinationDynamicRange.colorPrimaries,
|
||||
kVTPixelTransferPropertyKey_DestinationTransferFunction: destinationDynamicRange.transferFunction,
|
||||
kVTPixelTransferPropertyKey_DestinationYCbCrMatrix: destinationDynamicRange.yCbCrMatrix]
|
||||
VTSessionSetProperty(decompressionSession,
|
||||
key: kVTDecompressionPropertyKey_PixelTransferProperties,
|
||||
value: pixelTransferProperties as CFDictionary)
|
||||
}
|
||||
self.decompressionSession = decompressionSession
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
extension CMFormatDescription {
|
||||
fileprivate func getSampleBuffer(isConvertNALSize: Bool, data: UnsafeMutablePointer<UInt8>, size: Int) throws -> CMSampleBuffer {
|
||||
if isConvertNALSize {
|
||||
var ioContext: UnsafeMutablePointer<AVIOContext>?
|
||||
let status = avio_open_dyn_buf(&ioContext)
|
||||
if status == 0 {
|
||||
var nalSize: UInt32 = 0
|
||||
let end = data + size
|
||||
var nalStart = data
|
||||
while nalStart < end {
|
||||
nalSize = UInt32(nalStart[0]) << 16 | UInt32(nalStart[1]) << 8 | UInt32(nalStart[2])
|
||||
avio_wb32(ioContext, nalSize)
|
||||
nalStart += 3
|
||||
avio_write(ioContext, nalStart, Int32(nalSize))
|
||||
nalStart += Int(nalSize)
|
||||
}
|
||||
var demuxBuffer: UnsafeMutablePointer<UInt8>?
|
||||
let demuxSze = avio_close_dyn_buf(ioContext, &demuxBuffer)
|
||||
return try createSampleBuffer(data: demuxBuffer, size: Int(demuxSze))
|
||||
} else {
|
||||
throw NSError(errorCode: .codecVideoReceiveFrame, avErrorCode: status)
|
||||
}
|
||||
} else {
|
||||
return try createSampleBuffer(data: data, size: size)
|
||||
}
|
||||
}
|
||||
|
||||
private func createSampleBuffer(data: UnsafeMutablePointer<UInt8>?, size: Int) throws -> CMSampleBuffer {
|
||||
var blockBuffer: CMBlockBuffer?
|
||||
var sampleBuffer: CMSampleBuffer?
|
||||
// swiftlint:disable line_length
|
||||
var status = CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault, memoryBlock: data, blockLength: size, blockAllocator: kCFAllocatorNull, customBlockSource: nil, offsetToData: 0, dataLength: size, flags: 0, blockBufferOut: &blockBuffer)
|
||||
if status == noErr {
|
||||
status = CMSampleBufferCreate(allocator: kCFAllocatorDefault, dataBuffer: blockBuffer, dataReady: true, makeDataReadyCallback: nil, refcon: nil, formatDescription: self, sampleCount: 1, sampleTimingEntryCount: 0, sampleTimingArray: nil, sampleSizeEntryCount: 0, sampleSizeArray: nil, sampleBufferOut: &sampleBuffer)
|
||||
if let sampleBuffer {
|
||||
return sampleBuffer
|
||||
}
|
||||
}
|
||||
throw NSError(errorCode: .codecVideoReceiveFrame, avErrorCode: status)
|
||||
// swiftlint:enable line_length
|
||||
}
|
||||
}
|
||||
|
||||
extension CMVideoCodecType {
|
||||
var avc: String {
|
||||
switch self {
|
||||
case kCMVideoCodecType_MPEG4Video:
|
||||
return "esds"
|
||||
case kCMVideoCodecType_H264:
|
||||
return "avcC"
|
||||
case kCMVideoCodecType_HEVC:
|
||||
return "hvcC"
|
||||
case kCMVideoCodecType_VP9:
|
||||
return "vpcC"
|
||||
default: return "avcC"
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user