实现GetSampleAsync()
接下来需要实现MediaStreamSource.GetSampleAsync()。当系统解码器需要新的视频或音频sample的时候会调用这个函数,这个函数需要根据MediaStreamType返回相应的下一个sample。至于是预加载所有的sample还是用on demand的方式获取下一个sample都可以,前者可能会在播放视频的头几秒有所卡顿,后者可能会导致seek的过程略长。
以下代码演示on demand的方式。代码风格很糟糕,仅供演示用:
private MediaStreamSample GetAudioSample()
{
MediaStreamSample mediaStreamSample = new MediaStreamSample(this.audioStreamDescription, null, 0, 0, 0, emptyDict);
try
{
var sample = audioSamples[audioStreamIndex];
mediaStreamSample = new MediaStreamSample(this.audioStreamDescription, this.mediaStream, sample.Offset, sample.Count, sample.Timestamp, emptyDict);
}
catch { }
}
{
MediaStreamSample mediaStreamSample = new MediaStreamSample(this.audioStreamDescription, null, 0, 0, 0, emptyDict);
try
{
var sample = audioSamples[audioStreamIndex];
mediaStreamSample = new MediaStreamSample(this.audioStreamDescription, this.mediaStream, sample.Offset, sample.Count, sample.Timestamp, emptyDict);
}
catch { }
}
private MediaStreamSample GetVideoSample()
{
MediaStreamSample mediaStreamSample = new MediaStreamSample(this.videoStreamDescription, null, 0, 0, 0, emptyDict);
try
{
var sample = videoSamples[videoStreamIndex];
MemoryStream stream = new MemoryStream();
if (videoStreamIndex == 1)
{
// 处理第二个视频帧(即第一个包含实际视频数据的视频帧,第一个视频帧用来OpenMediaAsync了)
mediaStream.Seek(videoSamples[0].Offset, SeekOrigin.Begin);
BinaryReader binaryReader = new BinaryReader(mediaStream);
var configurationVersion = binaryReader.ReadByte();
var avcProfileIndication = binaryReader.ReadByte();
var avcCompatibleProfiles = binaryReader.ReadByte();
var avcLevelIndication = binaryReader.ReadByte();
byte lengthSizeMinusOne = binaryReader.ReadByte();
var naluLengthSize = (byte)(1 + (lengthSizeMinusOne & 3));
byte nSPS = (byte)(binaryReader.ReadByte() & 0x1f);
var sPs = new List<byte[]>(nSPS);
for (uint i = 0; i < nSPS; i++)
{
byte[] buffer = new byte[Mp4Util.BytesToUInt16BE(binaryReader.ReadBytes(2))];
mediaStream.Read(buffer, 0, buffer.Length);
sPs.Add(buffer);
}
byte nPPS = binaryReader.ReadByte();
var pPs = new List<byte[]>(nPPS);
for (uint i = 0; i < nPPS; i++)
{
byte[] buffer = new byte[Mp4Util.BytesToUInt16BE(binaryReader.ReadBytes(2))];
mediaStream.Read(buffer, 0, buffer.Length);
pPs.Add(buffer);
}
var sps = sPs[0];
var pps = pPs[0];
stream.Write(startCode, 0, startCode.Length);
stream.Write(sps, 0, sps.Length);
stream.Write(startCode, 0, startCode.Length);
stream.Write(pps, 0, pps.Length);
}
mediaStream.Seek(sample.Offset, SeekOrigin.Begin);
BinaryReader reader = new BinaryReader(this.mediaStream);
var sS = sample.Count;
ulong count;
while (sS > 4L)
{
var ui32 = reader.ReadUInt32();
count = OldSkool.swaplong(ui32);
stream.Write(startCode, 0, startCode.Length);
stream.Write(reader.ReadBytes((int)count), 0, (int)count);
sS -= 4 + (uint)count;
}
mediaStreamSample = new MediaStreamSample(this.videoStreamDescription, stream, 0, stream.Length, sample.Timestamp, emptyDict);
}
catch { }
return mediaStreamSample;
}
{
MediaStreamSample mediaStreamSample = new MediaStreamSample(this.videoStreamDescription, null, 0, 0, 0, emptyDict);
try
{
var sample = videoSamples[videoStreamIndex];
MemoryStream stream = new MemoryStream();
if (videoStreamIndex == 1)
{
// 处理第二个视频帧(即第一个包含实际视频数据的视频帧,第一个视频帧用来OpenMediaAsync了)
mediaStream.Seek(videoSamples[0].Offset, SeekOrigin.Begin);
BinaryReader binaryReader = new BinaryReader(mediaStream);
var configurationVersion = binaryReader.ReadByte();
var avcProfileIndication = binaryReader.ReadByte();
var avcCompatibleProfiles = binaryReader.ReadByte();
var avcLevelIndication = binaryReader.ReadByte();
byte lengthSizeMinusOne = binaryReader.ReadByte();
var naluLengthSize = (byte)(1 + (lengthSizeMinusOne & 3));
byte nSPS = (byte)(binaryReader.ReadByte() & 0x1f);
var sPs = new List<byte[]>(nSPS);
for (uint i = 0; i < nSPS; i++)
{
byte[] buffer = new byte[Mp4Util.BytesToUInt16BE(binaryReader.ReadBytes(2))];
mediaStream.Read(buffer, 0, buffer.Length);
sPs.Add(buffer);
}
byte nPPS = binaryReader.ReadByte();
var pPs = new List<byte[]>(nPPS);
for (uint i = 0; i < nPPS; i++)
{
byte[] buffer = new byte[Mp4Util.BytesToUInt16BE(binaryReader.ReadBytes(2))];
mediaStream.Read(buffer, 0, buffer.Length);
pPs.Add(buffer);
}
var sps = sPs[0];
var pps = pPs[0];
stream.Write(startCode, 0, startCode.Length);
stream.Write(sps, 0, sps.Length);
stream.Write(startCode, 0, startCode.Length);
stream.Write(pps, 0, pps.Length);
}
mediaStream.Seek(sample.Offset, SeekOrigin.Begin);
BinaryReader reader = new BinaryReader(this.mediaStream);
var sS = sample.Count;
ulong count;
while (sS > 4L)
{
var ui32 = reader.ReadUInt32();
count = OldSkool.swaplong(ui32);
stream.Write(startCode, 0, startCode.Length);
stream.Write(reader.ReadBytes((int)count), 0, (int)count);
sS -= 4 + (uint)count;
}
mediaStreamSample = new MediaStreamSample(this.videoStreamDescription, stream, 0, stream.Length, sample.Timestamp, emptyDict);
}
catch { }
return mediaStreamSample;
}
protected override void GetSampleAsync(MediaStreamType mediaStreamType)
{
MediaStreamSample mediaStreamSample = null;
FlvTag s = null;
lock (syncRootGetSample)
{
if (mediaStreamType == MediaStreamType.Audio)
{
if (audioStreamIndex <= audioSamples.Count - 1)
{
mediaStreamSample = this.GetAudioSample();
audioStreamIndex++;
}
else
{
try
{
while (audioStreamIndex > audioSamples.Count - 1)
{
s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
mediaStreamSample = this.GetAudioSample();
audioStreamIndex++;
}
catch
{
// 音频流末尾
mediaStreamSample = new MediaStreamSample(this.audioStreamDescription, null, 0, 0, 0, emptyDict);
Debug.WriteLine("audio end: " + audioStreamIndex + "," + audioSamples.Count);
}
}
}
else if (mediaStreamType == MediaStreamType.Video)
{
if (videoStreamIndex <= videoSamples.Count - 1)
{
mediaStreamSample = this.GetVideoSample();
videoStreamIndex++;
}
else
{
try
{
while (videoStreamIndex > videoSamples.Count - 1)
{
s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
mediaStreamSample = this.GetVideoSample();
videoStreamIndex++;
}
catch
{
// 视频流末尾
mediaStreamSample = new MediaStreamSample(this.videoStreamDescription, null, 0, 0, 0, emptyDict);
Debug.WriteLine("video end: " + videoStreamIndex + "," + videoSamples.Count);
}
}
}
}
this.ReportGetSampleCompleted(mediaStreamSample);
}
{
MediaStreamSample mediaStreamSample = null;
FlvTag s = null;
lock (syncRootGetSample)
{
if (mediaStreamType == MediaStreamType.Audio)
{
if (audioStreamIndex <= audioSamples.Count - 1)
{
mediaStreamSample = this.GetAudioSample();
audioStreamIndex++;
}
else
{
try
{
while (audioStreamIndex > audioSamples.Count - 1)
{
s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
mediaStreamSample = this.GetAudioSample();
audioStreamIndex++;
}
catch
{
// 音频流末尾
mediaStreamSample = new MediaStreamSample(this.audioStreamDescription, null, 0, 0, 0, emptyDict);
Debug.WriteLine("audio end: " + audioStreamIndex + "," + audioSamples.Count);
}
}
}
else if (mediaStreamType == MediaStreamType.Video)
{
if (videoStreamIndex <= videoSamples.Count - 1)
{
mediaStreamSample = this.GetVideoSample();
videoStreamIndex++;
}
else
{
try
{
while (videoStreamIndex > videoSamples.Count - 1)
{
s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
mediaStreamSample = this.GetVideoSample();
videoStreamIndex++;
}
catch
{
// 视频流末尾
mediaStreamSample = new MediaStreamSample(this.videoStreamDescription, null, 0, 0, 0, emptyDict);
Debug.WriteLine("video end: " + videoStreamIndex + "," + videoSamples.Count);
}
}
}
}
this.ReportGetSampleCompleted(mediaStreamSample);
}
实现SeekAsync()
seek的原理很简单,根据传来的timestamp找到相对最近的视频和音频sample然后传给系统解码器即可:
protected override void SeekAsync(long seekToTime)
{
if (seekToTime > 0)
{
lock (syncRootGetSample)
{
// 如果seekToTime超过正在播放的时间点,需要继续填充sample list
try
{
while (videoSamples[videoSamples.Count - 1].Timestamp < seekToTime)
{
var s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
}
catch { }
try
{
while (audioSamples[audioSamples.Count - 1].Timestamp < seekToTime)
{
var s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
}
catch { }
// 开始seek
try
{
for (int i = videoSamples.Count - 1; i >= 0; i--)
{
if (videoSamples[i].Timestamp <= seekToTime)
{
videoStreamIndex = i;
break;
}
}
}
catch { }
try
{
for (int i = audioSamples.Count - 1; i >= 0; i--)
{
if (audioSamples[i].Timestamp <= seekToTime)
{
audioStreamIndex = i;
break;
}
}
}
catch { }
}
}
this.ReportSeekCompleted(seekToTime);
}
{
if (seekToTime > 0)
{
lock (syncRootGetSample)
{
// 如果seekToTime超过正在播放的时间点,需要继续填充sample list
try
{
while (videoSamples[videoSamples.Count - 1].Timestamp < seekToTime)
{
var s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
}
catch { }
try
{
while (audioSamples[audioSamples.Count - 1].Timestamp < seekToTime)
{
var s = new FlvTag(this.mediaStream, ref fileOffset);
if (s.TagType == TagType.Audio)
audioSamples.Add(s);
else if (s.TagType == TagType.Video)
videoSamples.Add(s);
}
}
catch { }
// 开始seek
try
{
for (int i = videoSamples.Count - 1; i >= 0; i--)
{
if (videoSamples[i].Timestamp <= seekToTime)
{
videoStreamIndex = i;
break;
}
}
}
catch { }
try
{
for (int i = audioSamples.Count - 1; i >= 0; i--)
{
if (audioSamples[i].Timestamp <= seekToTime)
{
audioStreamIndex = i;
break;
}
}
}
catch { }
}
}
this.ReportSeekCompleted(seekToTime);
}
实现CloseMedia()
在这里释放资源,清除sample list,关闭mediaStream。
结语
需要说明的是,该方法只在Windows Phone Silverlight应用中有效,在针对Windows Phone 8.1的RT应用中需要改变实现方式,不过变化不是特别大,参考MSDN上的相关例程再开动一下脑筋很容易就能移植过去。
关于本系列文章中引用的大量谜之class(如FlvTag等)请自行阅读参考资料[1]。
参考资料:
[1] 在Silverlight应用程序中实现对FLV视频格式的支持
» 转载请注明来源及链接:未来代码研究所
求相关完整demo~~~