-
Notifications
You must be signed in to change notification settings - Fork 19
/
video_demo.py
73 lines (70 loc) · 2.23 KB
/
video_demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from feat_extractor import FeatExtractor
from caffe_io import load_video
from caffe_io import save_matrix
import os
import sys
import argparse
import time
import caffe
import numpy as np
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'video_list',
help = 'Input video list. Put path to video file on each line.')
parser.add_argument(
'output_dir',
help = 'Output directory.')
parser.add_argument(
'--sample_rate',
type = float,
default = 5.0,
help = 'Number of frames sampled per second')
parser.add_argument(
'--model_def',
default = '/auto/iris-00/rn/chensun/ThirdParty/caffe_models/vgg_16/VGG_ILSVRC_16_layers_deploy.prototxt',
help = 'Model definition file (default VGG16)')
parser.add_argument(
'--pretrained_model',
default = '/auto/iris-00/rn/chensun/ThirdParty/caffe_models/vgg_16/VGG_ILSVRC_16_layers.caffemodel',
help = 'Model parameter file (default VGG16)')
parser.add_argument(
'--layers',
default = 'fc6,fc7',
help = 'Layers to be extracted, separated by commas')
parser.add_argument(
'--cpu',
action = 'store_true',
help = 'Use CPU if set')
parser.add_argument(
'--oversample',
action = 'store_true',
help = 'Oversample 10 patches per frame if set')
args = parser.parse_args()
if args.cpu:
caffe.set_mode_cpu()
print 'CPU mode'
else:
caffe.set_mode_gpu()
print 'GPU mode'
oversample = False
if args.oversample:
oversample = True
extractor = FeatExtractor(args.model_def, args.pretrained_model, oversample=oversample)
blobs = args.layers.split(',')
with open(args.video_list) as f:
videos = [l.rstrip() for l in f]
for video_file in videos:
frames = load_video(video_file, args.sample_rate)
if len(frames) < 1: # failed to open the video
continue
start = time.time()
feats = extractor.extract_batch(frames, blobs)
print '%s feature extracted in %f seconds.' % (os.path.basename(video_file), time.time()-start)
# save the features
for blob in blobs:
feats[blob] = np.array(feats[blob])
save_matrix(feats, os.path.join(args.output_dir, '%s.mat' % os.path.basename(video_file).split('.')[0]))
return
if __name__ == '__main__':
main(sys.argv)