[Feature] Support video demo (#1014)
* First commit for video demo * fix lint * update * fix * add wget in shellpull/995/head
parent
a672c0563f
commit
dec5bf0976
|
@ -0,0 +1,111 @@
|
|||
from argparse import ArgumentParser
|
||||
|
||||
import cv2
|
||||
|
||||
from mmseg.apis import inference_segmentor, init_segmentor
|
||||
from mmseg.core.evaluation import get_palette
|
||||
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('video', help='Video file or webcam id')
|
||||
parser.add_argument('config', help='Config file')
|
||||
parser.add_argument('checkpoint', help='Checkpoint file')
|
||||
parser.add_argument(
|
||||
'--device', default='cuda:0', help='Device used for inference')
|
||||
parser.add_argument(
|
||||
'--palette',
|
||||
default='cityscapes',
|
||||
help='Color palette used for segmentation map')
|
||||
parser.add_argument(
|
||||
'--show', action='store_true', help='Whether to show draw result')
|
||||
parser.add_argument(
|
||||
'--show-wait-time', default=1, type=int, help='Wait time after imshow')
|
||||
parser.add_argument(
|
||||
'--output-file', default=None, type=str, help='Output video file path')
|
||||
parser.add_argument(
|
||||
'--output-fourcc',
|
||||
default='MJPG',
|
||||
type=str,
|
||||
help='Fourcc of the output video')
|
||||
parser.add_argument(
|
||||
'--output-fps', default=-1, type=int, help='FPS of the output video')
|
||||
parser.add_argument(
|
||||
'--output-height',
|
||||
default=-1,
|
||||
type=int,
|
||||
help='Frame height of the output video')
|
||||
parser.add_argument(
|
||||
'--output-width',
|
||||
default=-1,
|
||||
type=int,
|
||||
help='Frame width of the output video')
|
||||
parser.add_argument(
|
||||
'--opacity',
|
||||
type=float,
|
||||
default=0.5,
|
||||
help='Opacity of painted segmentation map. In (0, 1] range.')
|
||||
args = parser.parse_args()
|
||||
|
||||
assert args.show or args.output_file, \
|
||||
'At least one output should be enabled.'
|
||||
|
||||
# build the model from a config file and a checkpoint file
|
||||
model = init_segmentor(args.config, args.checkpoint, device=args.device)
|
||||
|
||||
# build input video
|
||||
cap = cv2.VideoCapture(args.video)
|
||||
assert (cap.isOpened())
|
||||
input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
input_fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
|
||||
# init output video
|
||||
writer = None
|
||||
output_height = None
|
||||
output_width = None
|
||||
if args.output_file is not None:
|
||||
fourcc = cv2.VideoWriter_fourcc(*args.output_fourcc)
|
||||
output_fps = args.output_fps if args.output_fps > 0 else input_fps
|
||||
output_height = args.output_height if args.output_height > 0 else int(
|
||||
input_height)
|
||||
output_width = args.output_width if args.output_width > 0 else int(
|
||||
input_width)
|
||||
writer = cv2.VideoWriter(args.output_file, fourcc, output_fps,
|
||||
(output_width, output_height), True)
|
||||
|
||||
# start looping
|
||||
try:
|
||||
while True:
|
||||
flag, frame = cap.read()
|
||||
if not flag:
|
||||
break
|
||||
|
||||
# test a single image
|
||||
result = inference_segmentor(model, frame)
|
||||
|
||||
# blend raw image and prediction
|
||||
draw_img = model.show_result(
|
||||
frame,
|
||||
result,
|
||||
palette=get_palette(args.palette),
|
||||
show=False,
|
||||
opacity=args.opacity)
|
||||
|
||||
if args.show:
|
||||
cv2.imshow('video_demo', draw_img)
|
||||
cv2.waitKey(args.show_wait_time)
|
||||
if writer:
|
||||
if draw_img.shape[0] != output_height or draw_img.shape[
|
||||
1] != output_width:
|
||||
draw_img = cv2.resize(draw_img,
|
||||
(output_width, output_height))
|
||||
writer.write(draw_img)
|
||||
finally:
|
||||
if writer:
|
||||
writer.release()
|
||||
cap.release()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -216,3 +216,21 @@ python demo/image_demo.py demo/demo.jpg configs/pspnet/pspnet_r50-d8_512x1024_40
|
|||
```
|
||||
|
||||
A notebook demo can be found in [demo/inference_demo.ipynb](../demo/inference_demo.ipynb).
|
||||
|
||||
Now we also provide a demo script to test a single video.
|
||||
|
||||
```shell
|
||||
wget -O demo/demo.mp4 https://user-images.githubusercontent.com/22089207/144212749-44411ef4-b564-4b37-96d4-04bedec629ab.mp4
|
||||
python demo/video_demo.py ${VIDEO_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} [--device ${DEVICE_NAME}] [--palette-thr ${PALETTE}] \
|
||||
[--show] [--show-wait-time {SHOW_WAIT_TIME}] [--output-file {OUTPUT_FILE}] [--output-fps {OUTPUT_FPS}] \
|
||||
[--output-height {OUTPUT_HEIGHT}] [--output-width {OUTPUT_WIDTH}] [--opacity {OPACITY}]
|
||||
```
|
||||
|
||||
Examples:
|
||||
|
||||
```shell
|
||||
wget -O demo/demo.mp4 https://user-images.githubusercontent.com/22089207/144212749-44411ef4-b564-4b37-96d4-04bedec629ab.mp4
|
||||
python demo/video_demo.py demo/demo.mp4 configs/cgnet/cgnet_680x680_60k_cityscapes.py \
|
||||
checkpoints/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth \
|
||||
--device cuda:0 --palette cityscapes --show
|
||||
```
|
||||
|
|
Loading…
Reference in New Issue