summaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorEvan Shelhamer <shelhamer@imaginarynumber.net>2014-08-05 10:14:35 -0700
committerEvan Shelhamer <shelhamer@imaginarynumber.net>2014-08-05 23:17:59 -0700
commit0db94786a7a463fed49825811fac903f1f1fc3c8 (patch)
tree0354ccd6be7d62ab933edd26204365b69cb46633 /python
parent4f7726916cb965c79975c44e833e71347fa1822c (diff)
downloadcaffeonacl-0db94786a7a463fed49825811fac903f1f1fc3c8.tar.gz
caffeonacl-0db94786a7a463fed49825811fac903f1f1fc3c8.tar.bz2
caffeonacl-0db94786a7a463fed49825811fac903f1f1fc3c8.zip
drop np.asarray() in favor of declaration (~1.75x speedup)
Diffstat (limited to 'python')
-rw-r--r--python/caffe/classifier.py17
-rw-r--r--python/caffe/detector.py7
2 files changed, 16 insertions, 8 deletions
diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py
index 61e916cc..fe471ca1 100644
--- a/python/caffe/classifier.py
+++ b/python/caffe/classifier.py
@@ -60,12 +60,15 @@ class Classifier(caffe.Net):
for N images and C classes.
"""
# Scale to standardize input dimensions.
- inputs = np.asarray([caffe.io.resize_image(im, self.image_dims)
- for im in inputs])
+ input_ = np.zeros((len(inputs),
+ self.image_dims[0], self.image_dims[1], inputs[0].shape[2]),
+ dtype=np.float32)
+ for ix, in_ in enumerate(inputs):
+ input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
- inputs = caffe.io.oversample(inputs, self.crop_dims)
+ input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
@@ -73,11 +76,13 @@ class Classifier(caffe.Net):
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
- inputs = inputs[:, crop[0]:crop[2], crop[1]:crop[3], :]
+ input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
- caffe_in = np.asarray([self.preprocess(self.inputs[0], in_)
- for in_ in inputs])
+ caffe_in = np.zeros(np.array(input_.shape)[[0,3,1,2]],
+ dtype=np.float32)
+ for ix, in_ in enumerate(input_):
+ caffe_in[ix] = self.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2,3))
diff --git a/python/caffe/detector.py b/python/caffe/detector.py
index 2fc23db0..f219b610 100644
--- a/python/caffe/detector.py
+++ b/python/caffe/detector.py
@@ -76,8 +76,11 @@ class Detector(caffe.Net):
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
- caffe_in = np.asarray([self.preprocess(self.inputs[0], window_in)
- for window_in in window_inputs])
+ caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ + self.blobs[self.inputs[0]].data.shape[2:],
+ dtype=np.float32)
+ for ix, window_in in enumerate(window_inputs):
+ caffe_in[ix] = self.preprocess(self.inputs[0], window_in)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2,3))