-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathplay_vedio.c
executable file
·195 lines (150 loc) · 5.75 KB
/
play_vedio.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <stdio.h>
#include <SDL/SDL.h>
#include <SDL/SDL_thread.h>
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVPacket packet;
int frameFinished;
AVDictionary *optionDict = NULL;
struct SwsContext *sws_ctx = NULL;
SDL_Overlay *bmp = NULL;
SDL_Surface *screen = NULL;
SDL_Rect rect;
SDL_Event event;
if(argc < 2){
fprintf(stderr, "Usage: test <file> \n");
exit(1);
}
av_register_all();
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)){
fprintf(stderr,"Could not initialize SDL - %s " + *SDL_GetError());
exit(1);
}
/**
*打开一个文件
*/
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
return -1;
/**
*为pFormatCtx->streams填充上正确的信息
*/
if(avformat_find_stream_info(pFormatCtx, NULL) < 0)
return -1;
/**
*手工调试函数,将文件信息在终端输出
*/
av_dump_format(pFormatCtx, 0, argv[1], 0);
videoStream=-1;
for ( i = 0; i < pFormatCtx->nb_streams; i++)
if(pFormatCtx -> streams[i] -> codec -> codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
if(videoStream == -1)
return -1;
/**
*从 vedio stream 中获取对应的解码器上下文的指针
*/
pCodecCtx = pFormatCtx -> streams[videoStream] -> codec;
/**
*根据 codec_id 找到对应的解码器
*/
pCodec = avcodec_find_decoder(pCodecCtx -> codec_id);
if(pCodec == NULL){
fprintf(stderr, "Unsupported codec ! \n");
return -1;
}
/**
* 打开解码器
*/
if(avcodec_open2(pCodecCtx, pCodec, &optionDict) <0 )
return -1;
/**
* 为frame 申请内存
*/
pFrame = av_frame_alloc();
#ifdef __DARWIN__
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif // __DARWIN__
if(!screen){
fprintf(stderr, "SDL : could not set video mode - exiting \n");
exit(1);
}
/**
* 申请一个 overlay , 将 yuv数据给 screen
*/
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
i = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0){
if(packet.stream_index == videoStream){
printf("\n");
printf("packet pts: %d \n", packet.pts);
printf("packet dts: %d \n", packet.dts);
printf("packet size: %d \n", packet.size);
//printf("packet duration: %d \n", packet.duration);
printf("packet pos: %d \n", packet.pos);
printf("\n");
//为视频流解码
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
printf("frame pts: %d \n", pFrame->pts);
printf("frame pkt_dts: %d \n", pFrame->pkt_dts);
printf("frame coded_picture_number: %d \n", pFrame->coded_picture_number);
printf("frame pkt_pos: %d \n", pFrame->pkt_pos);
printf("frame pkt_duration: %d \n", pFrame->pkt_duration);
printf("frame pkt_size: %d \n", pFrame->pkt_size);
if(frameFinished){
SDL_LockYUVOverlay(bmp);
printf("finish one frame \n");
printf("\n");
/**
*AVPicture 结构体有一个数据指针指向一个有 4 个元素的指针数组。由于我们处理的是 YUV420P,所以
*我们只需要 3 个通道即只要三组数据。其它的格式可能需要第四个指针来表示 alpha 通道或者其它参数。行尺寸
*正如它的名字表示的意义一样。在 YUV 覆盖中相同功能的结构体是像素(pixel)和间距(pitch)。(“间距”是
*在 SDL 里用来表示指定行数据宽度的值)。所以我们现在做的是让我们的 pict.data 中的三个数组指针指向我们的
*覆盖,这样当我们写(数据)到 pict 的时候,实际上是写入到我们的覆盖中,当然要先申请必要的空间。
*/
AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];
pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pict.data, pict.linesize);
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
SDL_Delay(10);
}
}
av_free_packet(&packet);
SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
SDL_Quit();
exit(0);
break;
default:
break;
}
}
av_free(pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}