#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Mon Oct 29 09:27:59 CET 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import facerec2010
import pyvision
import pickle
import PIL
import numpy
import facereclib
[docs]class ImageCrop (facereclib.preprocessing.Preprocessor):
"""This class defines a wrapper for the facerec2010.baseline.lrpca.LRPCA class to be used as an image :py:class:`facereclib.preprocessing.Preprocessor` in the :ref:`FaceRecLib <facereclib>`."""
def __init__(self, TUNING):
"""Constructor Documentation:
TUNING
The tuning for the LRPCA algorithm as taken from the facerec2010.baseline.lrpca.GBU_TUNING
"""
facereclib.preprocessing.Preprocessor.__init__(self, **TUNING)
self.m_lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
def __call__(self, image, annotations):
"""Preprocesses the image using the facerec2010.baseline.lrpca.LRPCA.preprocess function"""
# assure that the eye positions are in the set of annotations
if annotations is None or 'leye' not in annotations or 'reye' not in annotations:
raise ValueError("The LRPCA image cropping needs eye positions, but they are not given.")
# Warning! Left and right eye are mixed up here!
# The lrpca preprocess expects left_eye_x < right_eye_x
tile = self.m_lrpca.preprocess(
image,
rect=None,
leye = pyvision.Point(annotations['reye'][1], annotations['reye'][0]),
reye = pyvision.Point(annotations['leye'][1], annotations['leye'][0])
)
# pyvision used images in (x,y)-order.
# To be consistent to the (y,x)-order in Bob, we have to transpose
return tile.asMatrix2D().transpose().astype(numpy.float64)
[docs] def read_original_data(self, image_file):
"""Reads the original images using functionality from pyvision."""
# we use pyvision to read the images. Hence, we don't have to struggle with conversion here
return pyvision.Image(str(image_file))
[docs]class Features (facereclib.features.Extractor):
"""This class defines a wrapper for the facerec2010.baseline.lrpca.LRPCA class to be used as a :py:class:`facereclib.feature.Extractor` in the :ref:`FaceRecLib <facereclib>`."""
def __init__(self, TUNING):
"""Constructor Documentation:
TUNING
The tuning for the LRPCA algorithm as taken from the facerec2010.baseline.lrpca.GBU_TUNING
"""
facereclib.features.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **TUNING)
self.m_lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
def _py_image(self, image):
"""Converts the given image to pyvision images."""
pil_image = PIL.Image.new("L",(image.shape[1],image.shape[0]))
# TODO: Test if there is any faster method to convert the image type
for y in range(image.shape[0]):
for x in range(image.shape[1]):
# copy image content (re-order [y,x] to (x,y))
pil_image.putpixel((x,y),image[y,x])
# convert to pyvision image
py_image = pyvision.Image(pil_image)
return py_image
[docs] def train(self, image_list, extractor_file):
"""Trains the LRPCA module with the given image list and saves the result into the given extractor file using the pickle module."""
train_count = 0
for client_index in range(len(image_list)):
for image in image_list[client_index]:
# convert the image into a data type that is understood by FaceRec2010
pyimage = self._py_image(image)
# append training data to the LRPCA training
# (the None parameters are due to the fact that preprocessing happened before)
self.m_lrpca.addTraining(str(client_index), pyimage, None, None, None),
train_count += 1
facereclib.utils.info(" -> Training LRPCA with %d images" % train_count)
self.m_lrpca.train()
# and write the result to file, which in this case simply used pickle
with open(extractor_file, "wb") as f:
pickle.dump(self.m_lrpca, f)
[docs] def load(self, extractor_file):
"""Loads the trained LRPCA feature extractor from the given extractor file using the pickle module."""
# read LRPCA projector
with open(extractor_file, "rb") as f:
self.m_lrpca = pickle.load(f)
def __call__(self, image):
"""Projects the image data using the LRPCA projector and returns a numpy.ndarray."""
# create pvimage
pyimage = self._py_image(image)
# Projects the data by creating a "FaceRecord"
face_record = self.m_lrpca.getFaceRecord(pyimage, None, None, None)
# return the projected data, which is stored as a numpy.ndarray
return face_record.feature