summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKostya Shishkov <kostya.shishkov@gmail.com>2023-06-18 16:05:17 +0200
committerKostya Shishkov <kostya.shishkov@gmail.com>2023-06-18 16:05:17 +0200
commit27c26a2ac5b4df8ba0b8f3f19933a5b213ac5799 (patch)
tree4783a0335b7a1cc3fe1eda8ba5eccdeb0ae681e9
parent749a451e826a407181e85e3e7294828517966f6a (diff)
downloadnihav-player-27c26a2ac5b4df8ba0b8f3f19933a5b213ac5799.tar.gz
improve error reporting in video part
-rw-r--r--videoplayer/src/videodec.rs32
1 files changed, 16 insertions, 16 deletions
diff --git a/videoplayer/src/videodec.rs b/videoplayer/src/videodec.rs
index 1a34aad..d30f45f 100644
--- a/videoplayer/src/videodec.rs
+++ b/videoplayer/src/videodec.rs
@@ -48,12 +48,12 @@ impl VideoDecoder {
rgb_pool: NAVideoBufferPool::new(FRAME_QUEUE_SIZE),
tb_num, tb_den,
dec, ofmt_yuv, ofmt_rgb, oinfo_yuv, oinfo_rgb,
- scaler: NAScale::new(ofmt_rgb, ofmt_rgb).unwrap(),
+ scaler: NAScale::new(ofmt_rgb, ofmt_rgb).expect("creating scaler failed"),
ifmt: NAVideoInfo { width: 0, height: 0, flipped: false, format: SDL_RGB_FMT, bits: 24 },
}
}
fn convert_buf(&mut self, bt: NABufferType, ts: u64) -> Option<FrameRecord> {
- let vinfo = bt.get_video_info().unwrap();
+ let vinfo = bt.get_video_info().expect("this should be a video buffer");
if self.ifmt.get_width() != vinfo.get_width() ||
self.ifmt.get_height() != vinfo.get_height() ||
self.ifmt.get_format() != vinfo.get_format() {
@@ -61,26 +61,26 @@ impl VideoDecoder {
let sc_ifmt = ScaleInfo { width: self.ifmt.get_width(), height: self.ifmt.get_height(), fmt: self.ifmt.get_format() };
let do_yuv = if let ColorModel::YUV(_) = self.ifmt.get_format().get_model() { true } else { false };
let ofmt = if do_yuv { self.ofmt_yuv } else { self.ofmt_rgb };
- self.scaler = NAScale::new(sc_ifmt, ofmt).unwrap();
+ self.scaler = NAScale::new(sc_ifmt, ofmt).expect("scaling should not fail");
}
let mut opic = if let ColorModel::YUV(_) = self.ifmt.get_format().get_model() {
- self.yuv_pool.prealloc_video(self.oinfo_yuv, 2).unwrap();
+ self.yuv_pool.prealloc_video(self.oinfo_yuv, 2).expect("video frame pool allocation failure");
while self.yuv_pool.get_free().is_none() {
if VDEC_STATE.is_flushing() {
return None;
}
std::thread::yield_now();
}
- NABufferType::Video(self.yuv_pool.get_free().unwrap())
+ NABufferType::Video(self.yuv_pool.get_free().expect("video frame pool should have a free frame"))
} else {
- self.rgb_pool.prealloc_video(self.oinfo_rgb, 0).unwrap();
+ self.rgb_pool.prealloc_video(self.oinfo_rgb, 0).expect("video frame pool allocation failure");
while self.rgb_pool.get_free().is_none() {
if VDEC_STATE.is_flushing() {
return None;
}
std::thread::yield_now();
}
- NABufferType::VideoPacked(self.rgb_pool.get_free().unwrap())
+ NABufferType::VideoPacked(self.rgb_pool.get_free().expect("video frame pool should have a free frame"))
};
let ret = self.scaler.convert(&bt, &mut opic);
if ret.is_err() { println!(" scaler error {:?}", ret.err()); return None; }
@@ -230,16 +230,16 @@ fn start_video_decoding(width: usize, height: usize, tb_num: u32, tb_den: u32, v
Ok(PktSendEvent::Packet(pkt)) => {
if !VDEC_STATE.is_flushing() {
if let Some((buf, time)) = vdec.next_frame(&pkt) {
- vfsend.send((buf, time)).unwrap();
+ vfsend.send((buf, time)).expect("video frame should be sent");
}
while let Some((buf, time)) = vdec.more_frames(true) {
- vfsend.send((buf, time)).unwrap();
+ vfsend.send((buf, time)).expect("video frame should be sent");
}
}
},
Ok(PktSendEvent::GetFrames) => {
while let Some((buf, time)) = vdec.more_frames(false) {
- vfsend.send((buf, time)).unwrap();
+ vfsend.send((buf, time)).expect("video frame should be sent");
}
VDEC_STATE.set_state(DecodingState::Waiting);
},
@@ -249,11 +249,11 @@ fn start_video_decoding(width: usize, height: usize, tb_num: u32, tb_den: u32, v
},
Ok(PktSendEvent::End) => {
while vdec.yuv_pool.get_free().is_some() && vdec.rgb_pool.get_free().is_some() {
- let ret = vdec.last_frame();
- if ret.is_none() {
+ if let Some(frm) = vdec.last_frame() {
+ vfsend.send(frm).expect("video frame should be sent");
+ } else {
break;
}
- vfsend.send(ret.unwrap()).unwrap();
}
VDEC_STATE.set_state(DecodingState::End);
break;
@@ -315,7 +315,7 @@ fn output_yuv(yuv_texture: &mut Texture, buf: &NAVideoBuffer<u8>, width: usize,
for (dline, sline) in buffer[coff..].chunks_exact_mut(pitch / 2).take(height/2).zip(usrc.chunks(usstride)) {
dline[..csize].copy_from_slice(&sline[..csize]);
}
- }).unwrap();
+ }).expect("surface should be locked");
}
@@ -424,7 +424,7 @@ impl VideoControl {
while !disp_queue.is_full() {
let is_empty = disp_queue.is_empty();
if let Ok((pic, time)) = self.vfrecv.try_recv() {
- let buf = pic.get_vbuf().unwrap();
+ let buf = pic.get_vbuf().expect("video frame should be of u8 type");
self.do_yuv = buf.get_info().get_format().get_model().is_yuv();
let idx = disp_queue.end;
disp_queue.move_end();
@@ -438,7 +438,7 @@ impl VideoControl {
(&mut dst[..csize]).copy_from_slice(&src[..csize]);
}
true
- }).unwrap();
+ }).expect("surface should be locked");
} else {
output_yuv(&mut frm.yuv_tex, &buf, disp_queue.width, disp_queue.height);
}