GestureRecognitionToolkit  Version: 1.0 Revision: 04-03-15
The Gesture Recognition Toolkit (GRT) is a cross-platform, open-source, c++ machine learning library for real-time gesture recognition.
SOMQuantizer.cpp
1 /*
2  GRT MIT License
3  Copyright (c) <2012> <Nicholas Gillian, Media Lab, MIT>
4 
5  Permission is hereby granted, free of charge, to any person obtaining a copy of this software
6  and associated documentation files (the "Software"), to deal in the Software without restriction,
7  including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
9  subject to the following conditions:
10 
11  The above copyright notice and this permission notice shall be included in all copies or substantial
12  portions of the Software.
13 
14  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
15  LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
16  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
17  WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
18  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19  */
20 
21 #include "SOMQuantizer.h"
22 
23 namespace GRT{
24 
25 //Register your module with the FeatureExtraction base class
26 RegisterFeatureExtractionModule< SOMQuantizer > SOMQuantizer::registerModule("SOMQuantizer");
27 
28 SOMQuantizer::SOMQuantizer(const UINT numClusters){
29 
30  this->numClusters = numClusters;
31 
32  classType = "SOMQuantizer";
33  featureExtractionType = classType;
34  debugLog.setProceedingText("[DEBUG SOMQuantizer]");
35  errorLog.setProceedingText("[ERROR SOMQuantizer]");
36  warningLog.setProceedingText("[WARNING SOMQuantizer]");
37 }
38 
40 
41  classType = "SOMQuantizer";
42  featureExtractionType = classType;
43  debugLog.setProceedingText("[DEBUG SOMQuantizer]");
44  errorLog.setProceedingText("[ERROR SOMQuantizer]");
45  warningLog.setProceedingText("[WARNING SOMQuantizer]");
46 
47  //Invoke the equals operator to copy the data from the rhs instance to this instance
48  *this = rhs;
49 }
50 
52  //Here you should add any specific code to cleanup your custom feature extraction module if needed
53 }
54 
56  if(this!=&rhs){
57  //Here you should copy any class variables from the rhs instance to this instance
58  this->numClusters = rhs.numClusters;
59  this->som = rhs.som;
60  this->quantizationDistances = rhs.quantizationDistances;
61 
62  //Copy the base variables
64  }
65  return *this;
66 }
67 
68 bool SOMQuantizer::deepCopyFrom(const FeatureExtraction *featureExtraction){
69 
70  if( featureExtraction == NULL ) return false;
71 
72  if( this->getFeatureExtractionType() == featureExtraction->getFeatureExtractionType() ){
73 
74  //Cast the feature extraction pointer to a pointer to your custom feature extraction module
75  //Then invoke the equals operator
76  *this = *(SOMQuantizer*)featureExtraction;
77 
78  return true;
79  }
80 
81  errorLog << "clone(FeatureExtraction *featureExtraction) - FeatureExtraction Types Do Not Match!" << endl;
82 
83  return false;
84 }
85 
86 bool SOMQuantizer::computeFeatures(const VectorDouble &inputVector){
87 
88  //Run the quantize algorithm
89  quantize( inputVector );
90 
91  return true;
92 }
93 
95 
96  //Reset the base class
98 
99  if( trained ){
100  som.reset();
101  std::fill(quantizationDistances.begin(),quantizationDistances.end(),0);
102  }
103 
104  return true;
105 }
106 
108 
109  //Clear the base class
111 
112  som.clear();
113  quantizationDistances.clear();
114 
115  return true;
116 }
117 
118 bool SOMQuantizer::saveModelToFile(string filename) const{
119 
120  std::fstream file;
121  file.open(filename.c_str(), std::ios::out);
122 
123  if( !saveModelToFile( file ) ){
124  return false;
125  }
126 
127  file.close();
128 
129  return true;
130 }
131 
132 bool SOMQuantizer::loadModelFromFile(string filename){
133 
134  std::fstream file;
135  file.open(filename.c_str(), std::ios::in);
136 
137  if( !loadModelFromFile( file ) ){
138  return false;
139  }
140 
141  //Close the file
142  file.close();
143 
144  return true;
145 }
146 
147 bool SOMQuantizer::saveModelToFile(fstream &file) const{
148 
149  if( !file.is_open() ){
150  errorLog << "saveModelToFile(fstream &file) - The file is not open!" << endl;
151  return false;
152  }
153 
154  //First, you should add a header (with no spaces) e.g.
155  file << "SOM_QUANTIZER_FILE_V1.0" << endl;
156 
157  //Second, you should save the base feature extraction settings to the file
159  errorLog << "saveFeatureExtractionSettingsToFile(fstream &file) - Failed to save base feature extraction settings to file!" << endl;
160  return false;
161  }
162 
163  file << "QuantizerTrained: " << trained << endl;
164  file << "NumClusters: " << numClusters << endl;
165 
166  if( trained ){
167  file << "SOM: \n";
168  if( !som.saveModelToFile( file ) ){
169  errorLog << "saveModelToFile(fstream &file) - Failed to save SelfOrganizingMap settings to file!" << endl;
170  return false;
171  }
172  }
173 
174  return true;
175 }
176 
178 
179  //Clear any previous model
180  clear();
181 
182  if( !file.is_open() ){
183  errorLog << "loadModelFromFile(fstream &file) - The file is not open!" << endl;
184  return false;
185  }
186 
187  string word;
188 
189  //First, you should read and validate the header
190  file >> word;
191  if( word != "SOM_QUANTIZER_FILE_V1.0" ){
192  errorLog << "loadModelFromFile(fstream &file) - Invalid file format!" << endl;
193  return false;
194  }
195 
196  //Second, you should load the base feature extraction settings to the file
198  errorLog << "loadFeatureExtractionSettingsFromFile(fstream &file) - Failed to load base feature extraction settings from file!" << endl;
199  return false;
200  }
201 
202  file >> word;
203  if( word != "QuantizerTrained:" ){
204  errorLog << "loadModelFromFile(fstream &file) - Failed to load QuantizerTrained!" << endl;
205  return false;
206  }
207  file >> trained;
208 
209  file >> word;
210  if( word != "NumClusters:" ){
211  errorLog << "loadModelFromFile(fstream &file) - Failed to load NumClusters!" << endl;
212  return false;
213  }
214  file >> numClusters;
215 
216  if( trained ){
217  file >> word;
218  if( word != "SOM:" ){
219  errorLog << "loadModelFromFile(fstream &file) - Failed to load SOM!" << endl;
220  return false;
221  }
222 
223  if( !som.loadModelFromFile( file ) ){
224  errorLog << "loadModelFromFile(fstream &file) - Failed to load SelfOrganizingMap settings from file!" << endl;
225  return false;
226  }
227 
228  initialized = true;
229  featureDataReady = false;
230  quantizationDistances.resize(numClusters,0);
231  }
232 
233  return true;
234 }
235 
237  MatrixDouble data = trainingData.getDataAsMatrixDouble();
238  return train_( data );
239 }
240 
242  MatrixDouble data = trainingData.getDataAsMatrixDouble();
243  return train_( data );
244 }
245 
247  MatrixDouble data = trainingData.getDataAsMatrixDouble();
248  return train_( data );
249 }
250 
252  MatrixDouble data = trainingData.getDataAsMatrixDouble();
253  return train_( data );
254 }
255 
256 bool SOMQuantizer::train_(MatrixDouble &trainingData){
257 
258  //Clear any previous model
259  clear();
260 
261  if( trainingData.getNumRows() == 0 ){
262  errorLog << "train_(MatrixDouble &trainingData) - Failed to train quantizer, the training data is empty!" << endl;
263  return false;
264  }
265 
266  //Train the SOM model
267  som.setNetworkSize( numClusters );
268  som.setNetworkTypology( SelfOrganizingMap::RANDOM_NETWORK );
269  som.setAlphaStart( 0.5 );
270  som.setAlphaEnd( 0.1 );
271  som.setMaxNumEpochs( 1000 );
272 
273  if( !som.train_( trainingData ) ){
274  errorLog << "train(MatrixDouble &trainingData) - Failed to train quantizer!" << endl;
275  return false;
276  }
277 
278  //Flag that the feature extraction module is now initialized
279  initialized = true;
280  trained = true;
281  numInputDimensions = trainingData.getNumCols();
282  numOutputDimensions = 1; //This is always 1 for the SOMQuantizer
283  featureVector.resize(numOutputDimensions,0);
284  quantizationDistances.resize(numClusters,0);
285 
286  return true;
287 }
288 
289 UINT SOMQuantizer::quantize(const double inputValue){
290  return quantize( VectorDouble(1,inputValue) );
291 }
292 
293 UINT SOMQuantizer::quantize(const VectorDouble &inputVector){
294 
295  if( !trained ){
296  errorLog << "computeFeatures(const VectorDouble &inputVector) - The quantizer model has not been trained!" << endl;
297  return 0;
298  }
299 
300  if( inputVector.size() != numInputDimensions ){
301  errorLog << "computeFeatures(const VectorDouble &inputVector) - The size of the inputVector (" << inputVector.size() << ") does not match that of the filter (" << numInputDimensions << ")!" << endl;
302  return 0;
303  }
304 
305  //Pass the input data through the map
306  if( !som.predict( inputVector ) ){
307  errorLog << "computeFeatures(const VectorDouble &inputVector) - Failed to perform map!" << endl;
308  return 0;
309  }
310  quantizationDistances = som.getMappedData();
311 
312  //Search for the neuron with the maximum output
313  UINT quantizedValue = 0;
314  double maxValue = 0;
315  for(UINT k=0; k<numClusters; k++){
316  if( quantizationDistances[k] > maxValue ){
317  maxValue = quantizationDistances[k];
318  quantizedValue = k;
319  }
320  }
321 
322  featureVector[0] = quantizedValue;
323  featureDataReady = true;
324 
325  return quantizedValue;
326 }
327 
329  return trained;
330 }
331 
333  return numClusters;
334 }
335 
337  return (trained ? static_cast<UINT>(featureVector[0]) : 0);
338 }
339 
341  return quantizationDistances;
342 }
343 
345  return som;
346 }
347 
348 bool SOMQuantizer::setNumClusters(const UINT numClusters){
349  clear();
350  this->numClusters = numClusters;
351  return true;
352 }
353 
354 }//End of namespace GRT
virtual bool train_(MatrixDouble &data)
virtual bool loadModelFromFile(fstream &file)
virtual bool deepCopyFrom(const FeatureExtraction *featureExtraction)
bool setMaxNumEpochs(const UINT maxNumEpochs)
Definition: MLBase.cpp:237
virtual bool loadModelFromFile(string filename)
Definition: AdaBoost.cpp:25
bool train_(ClassificationData &trainingData)
The SOMQuantizer module quantizes the N-dimensional input vector to a 1-dimensional discrete value...
unsigned int getNumCols() const
Definition: Matrix.h:538
bool setNumClusters(const UINT numClusters)
SOMQuantizer(const UINT numClusters=10)
string getFeatureExtractionType() const
bool getQuantizerTrained() const
virtual bool clear()
SelfOrganizingMap getSelfOrganizingMap() const
VectorDouble getQuantizationDistances() const
UINT getNumClusters() const
virtual bool predict(VectorDouble inputVector)
Definition: MLBase.cpp:104
MatrixDouble getDataAsMatrixDouble() const
bool loadFeatureExtractionSettingsFromFile(fstream &file)
virtual bool saveModelToFile(fstream &file) const
MatrixDouble getDataAsMatrixDouble() const
virtual bool saveModelToFile(string filename) const
unsigned int getNumRows() const
Definition: Matrix.h:531
virtual ~SOMQuantizer()
bool saveFeatureExtractionSettingsToFile(fstream &file) const
SOMQuantizer & operator=(const SOMQuantizer &rhs)
virtual bool computeFeatures(const VectorDouble &inputVector)
UINT getQuantizedValue() const
virtual bool reset()
UINT quantize(const double inputValue)
bool copyBaseVariables(const FeatureExtraction *featureExtractionModule)