openModeller  Version 1.5.0
nn_alg.cpp
Go to the documentation of this file.
1 
29 #include "nn_alg.hh"
30 #include "nn.h"
31 
32 // openModeller's libraries
33 #include <openmodeller/Sampler.hh>
34 // This include is only necessary if you want to work with normalized values
35 // ScaleNormalizer is one of the available normalizers.
37 
38 #include <string.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 
42 #include <iostream>
43 
44 using namespace std;
45 
46 /****************************************************************/
47 /********************** Algorithm's Metadata ********************/
48 
49 #define NUM_PARAM 6
50 
51 #define HIDDEN_ID "HiddenLayerNeurons"
52 #define LEARNING_RATE_ID "LearningRate"
53 #define MOMENTUM_ID "Momentum"
54 #define CHOICE_ID "Choice"
55 #define EPOCH_ID "Epoch"
56 #define MIN_ERROR_ID "MinimunError"
57 
58 #define NN_LOG_PREFIX "NNAlgorithm: "
59 
60 
61 // Define all parameters
62 /******************************/
63 /*** Algorithm's parameters ***/
64 
66 
67  // Amount of neurons of the hidden layer
68  {
69  HIDDEN_ID, // Id.
70  "Number of neurons in the hidden layer", // Name.
71  Integer, // Type.
72  "Number of neurons in the hidden layer (additional layer to the input and output layers, not connected externally).", // Overview
73  "Number of neurons in the hidden layer (additional layer to the input and output layers, not connected externally).", // Description.
74  1, // Not zero if the parameter has lower limit.
75  1, // Parameter's lower limit.
76  0, // Not zero if the parameter has upper limit.
77  0, // Parameter's upper limit.
78  "14" // Parameter's typical (default) value.
79  },
80 
81  // Learning rate
82  {
83  LEARNING_RATE_ID, // Id.
84  "Learning Rate", // Name.
85  Real, // Type.
86  "Learning Rate. Training parameter that controls the size of weight and bias changes during learning.", // Overview
87  "Learning Rate. Training parameter that controls the size of weight and bias changes during learning.", // Description.
88  1, // Not zero if the parameter has lower limit.
89  0.0, // Parameter's lower limit.
90  1, // Not zero if the parameter has upper limit.
91  1.0, // Parameter's upper limit.
92  "0.3" // Parameter's typical (default) value.
93  },
94 
95  // Momentum
96  {
97  MOMENTUM_ID, // Id.
98  "Momentum", // Name.
99  Real, // Type.
100  "Momentum simply adds a fraction m of the previous weight update to the current one. The momentum parameter is used to prevent the system from converging to a local minimum or saddle point.", // Overview
101  "Momentum simply adds a fraction m of the previous weight update to the current one. The momentum parameter is used to prevent the system from converging to a local minimum or saddle point. A high momentum parameter can also help to increase the speed of convergence of the system. However, setting the momentum parameter too high can create a risk of overshooting the minimum, which can cause the system to become unstable. A momentum coefficient that is too low cannot reliably avoid local minima, and can also slow down the training of the system.", // Description.
102  1, // Not zero if the parameter has lower limit.
103  0.0, // Parameter's lower limit.
104  1, // Not zero if the parameter has upper limit.
105  1.0, // Parameter's upper limit.
106  "0.05" // Parameter's typical (default) value.
107  },
108 
109  // Choice
110  {
111  CHOICE_ID, // Id.
112  "Training type", // Name.
113  Integer, // Type.
114  "0 = train by epoch, 1 = train by minimum error", // Overview
115  "0 = train by epoch, 1 = train by minimum error", // Description.
116  1, // Not zero if the parameter has lower limit.
117  0, // Parameter's lower limit.
118  1, // Not zero if the parameter has upper limit.
119  1, // Parameter's upper limit.
120  "1" // Parameter's typical (default) value.
121  },
122 
123  // Epoch
124  {
125  EPOCH_ID, // Id.
126  "Epoch", // Name.
127  Integer, // Type.
128  "Determines when training will stop once the number of iterations exceeds epochs. When training by minimum error, this represents the maximum number of iterations.", // Overview
129  "Determines when training will stop once the number of iterations exceeds epochs. When training by minimum error, this represents the maximum number of iterations.", // Description.
130  1, // Not zero if the parameter has lower limit.
131  1, // Parameter's lower limit.
132  0, // Not zero if the parameter has upper limit.
133  0, // Parameter's upper limit.
134  "5000000" // Parameter's typical (default) value.
135  },
136 
137  // Minimun Error
138  {
139  MIN_ERROR_ID, // Id.
140  "Minimun Error", // Name.
141  Real, // Type.
142  "Minimum mean square error of the epoch.", // Overview
143  "Minimum mean square error of the epoch. Square root of the sum of squared differences between the network targets and actual outputs divided by number of patterns (only for training by minimum error).", // Description.
144  1, // Not zero if the parameter has lower limit.
145  0.0, // Parameter's lower limit.
146  1, // Not zero if the parameter has upper limit.
147  0.5, // Parameter's upper limit.
148  "0.01" // Parameter's typical (default) value.
149  },
150 
151 };
152 
153 
154 // Define the algorithm
155 /************************************/
156 /*** Algorithm's general metadata ***/
157 
159 
160  "ANN", // Id.
161  "Artificial Neural Network", // Name.
162  "0.2", // Version.
163 
164  // Overview
165  "Artificial neural networks are made up of interconnecting artificial neurons (programming constructs that mimic the properties of biological neurons). Artificial neural networks may either be used to gain an understanding of biological neural networks, or for solving artificial intelligence problems without necessarily creating a model of a real biological system. Content retrieved from Wikipedia on the 06th of May, 2008: http://en.wikipedia.org/wiki/Neural_network",
166 
167  // Description.
168  "An artificial neural network (ANN), also called a simulated neural network (SNN) or commonly just neural network (NN), is an interconnected group of artificial neurons that uses a mathematical or computational model for information processing based on a connectionistic approach to computation. In most cases an ANN is an adaptive system that changes its structure based on external or internal information that flows through the network. In more practical terms, neural networks are non-linear statistical data modeling or decision making tools. They can be used to model complex relationships between inputs and outputs or to find patterns in data. Content retrieved from Wikipedia on the 06th of May, 2008: http://en.wikipedia.org/wiki/Neural_network",
169 
170  "Chopra, Paras, modified by Alex Oshika Avilla and Fabrício Augusto Rodrigues", // Algorithm author.
171  "", // Bibliography.
172 
173  "Alex Oshika Avilla, Fabricio Augusto Rodrigues", // Code author.
174  "alex.avilla [at] poli . usp .br, fabricio.rodrigues [at] poli . usp .br", // Code author's contact.
175 
176  0, // Does not accept categorical data.
177  0, // Does not need (pseudo)absence points.
178 
179  NUM_PARAM, // Algorithm's parameters.
180  parameters
181 };
182 
183 
184 /****************************************************************/
185 /****************** Algorithm's factory function ****************/
186 /*
187 * Needed code to link this to openModeller
188 */
189 
190 OM_ALG_DLL_EXPORT
193 {
194  return new NNAlgorithm();
195 }
196 
197 OM_ALG_DLL_EXPORT
198 AlgMetadata const *
200 {
201  return &metadata;
202 }
203 
204 
205 /*********************************************/
206 /*************** NN algorithm ****************/
207 
208 /*******************/
209 /*** constructor ***/
212  _done( false ),
213  _num_layers( 0 )
214 {
215  // Normalize from 0.0 to 1.0
216  _normalizerPtr = new ScaleNormalizer( 0.0, 1.0, true );
217 }
218 
219 
220 /******************/
221 /*** destructor ***/
223 {
224 }
225 
226 
227 /**************************/
228 /*** need Normalization ***/
230 {
231  if ( _samp->numAbsence() == 0 ) {
232 
233  // It will be necessary to generate pseudo absences, so do not waste
234  // time normalizing things because normalization should ideally consider
235  // all trainning points (including pseudo-absences). In this specific case,
236  // normalization will take place in initialize().
237  return 0;
238  }
239 
240  return 1;
241 }
242 
243 
244 /******************/
245 /*** initialize ***/
246 /* Initialize the model specifying a threshold / cutoff point.
247  * This is optional (model dependent).
248  * @note This method is inherited from the Algorithm class
249  * @return 0 on error
250  */
251 int
253 {
254 
255  // NEURONS OF THE HIDDEN LAYER
256  if ( ! getParameter( HIDDEN_ID, &_nn_parameter.hid ) ) {
257 
258  Log::instance()->error( NN_LOG_PREFIX "Parameter '" HIDDEN_ID "' not passed.\n" );
259  return 0;
260  }
261 
262  if ( _nn_parameter.hid < 1 ) {
263  Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %d\n", _nn_parameter.hid );
264  return 0;
265  }
266 
267 
268  // LEARNING RATE
270 
271  Log::instance()->error( NN_LOG_PREFIX "Parameter '" LEARNING_RATE_ID "' not passed.\n" );
272  return 0;
273  }
274 
276  Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %f\n", _nn_parameter.learning_rate );
277  return 0;
278  }
279 
280 
281  // MOMENTUM
283 
284  Log::instance()->error( NN_LOG_PREFIX "Parameter '" MOMENTUM_ID "' not passed.\n" );
285  return 0;
286  }
287 
288  if ( _nn_parameter.momentum < 0 || _nn_parameter.momentum >= 1) {
289  Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %f\n", _nn_parameter.momentum );
290  return 0;
291  }
292 
293 
294  // CHOICE
296 
297  Log::instance()->error( NN_LOG_PREFIX "Parameter '" CHOICE_ID "' not passed.\n" );
298  return 0;
299  }
300 
301  if ( _nn_parameter.choice != 0 && _nn_parameter.choice != 1) {
302  Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %d\n", _nn_parameter.choice );
303  return 0;
304  }
305 
306 
307  // EPOCH
308  if ( ! getParameter( EPOCH_ID, &_nn_parameter.epoch ) ) {
309 
310  Log::instance()->error( NN_LOG_PREFIX "Parameter '" EPOCH_ID "' not passed.\n" );
311  return 0;
312  }
313 
314  if ( _nn_parameter.epoch < 1) {
315 
316  Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %d\n", _nn_parameter.epoch );
317  return 0;
318  }
319 
320 
321  // MINIMUM ERROR
322  if (_nn_parameter.choice == 1){
323 
325 
326  Log::instance()->error( NN_LOG_PREFIX "Parameter '" MIN_ERROR_ID "' not passed.\n" );
327  return 0;
328  }
329 
331 
332  Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %f\n", _nn_parameter.minimum_error );
333  return 0;
334  }
335  }
336 
337 
338  // Check the number of layers
339  _num_layers = _samp->numIndependent();
340 
341 
342  // Check the number of presences
343  num_presences = _samp->numPresence();
344 
345  if ( num_presences == 0 ) {
346 
347  Log::instance()->warn( NN_LOG_PREFIX "No presence points inside the mask!\n" );
348  return 0;
349  }
350 
351  // Load presence points.
352  presences = _samp->getPresences();
353 
354 
355  // Check the number of absences
356  num_absences = _samp->numAbsence();
357 
358  if ( num_absences == 0 ) {
359 
360  Log::instance()->debug( NN_LOG_PREFIX "Generating pseudo-absences.\n" );
361 
363 
364  absences = new OccurrencesImpl( presences->label(), presences->coordSystem() );
365 
366 
367  for ( int i = 0; i < num_absences; ++i ) {
368 
369  OccurrencePtr oc = _samp->getPseudoAbsence();
370  absences->insert( oc );
371  }
372 
373  // Compute normalization with all points
374  SamplerPtr mySamplerPtr = createSampler( _samp->getEnvironment(), presences, absences );
375 
376 
377  // They generate points that are out of Map.
378  _normalizerPtr->computeNormalization( mySamplerPtr );
380  absences->normalize( _normalizerPtr );
381  }
382  else {
383 
384  Log::instance()->debug( NN_LOG_PREFIX "Using absence points provided.\n" );
385 
386  // should be normalized already
387  absences = _samp->getAbsences();
388  }
389 
390  // Presence Points
391  vector_input.resize(num_presences + num_absences); // initialize vector_input
392  for (int i = 0; i < (num_presences + num_absences); ++i) {
393  vector_input[i].resize(_num_layers, 0);
394  }
395 
396  vector_output.resize(num_presences + num_absences); // initialize vector_output
397  for (int i = 0; i < (num_presences + num_absences); ++i) {
398  vector_output[i].resize(1, 0);
399  }
400 
401  for(int j = 0; j < num_presences; j++){
402 
403  Sample env_data = (*presences)[j]->environment();
404 
405  for(int i = 0; i < _num_layers; i++){
406 
407  vector_input[j][i] = (double)env_data[i];
408  }
409  }
410 
411  // Absence Points
412  for(int j = 0; j < num_absences; j++){
413 
414  Sample env_data = (*absences)[j]->environment();
415 
416  for(int i = 0; i < _num_layers; i++){
417 
418  vector_input[j+num_presences][i] = (double)env_data[i];
419  }
420  }
421 
422 
423  _nn_parameter.pattern = num_presences + num_absences;
424 
425  _nn_parameter.outp = 1;
426 
427 
428  // Presence points
429  for(int j = 0; j < num_presences; j++){
430 
431  for(int i = 0; i < _nn_parameter.outp; i++){ // Always i=0 because it has one output
432 
433  vector_output[j][i] = 1.00000000; // Exist presence point
434  }
435  }
436 
437  // Absence points
438  for(int j = num_presences; j < _nn_parameter.pattern; j++){
439 
440  for(int i = 0; i < _nn_parameter.outp; i++){ // Always i=0 because it has one output
441 
442  vector_output[j][i] = 0.00000000; // Exist absence point
443  }
444  }
445 
446 
447  // Initialize the variables of Neural Network that will be used at training
448  int layers[3];
449 
450  layers[0] = _num_layers;
451  layers[1] = _nn_parameter.hid;
452  layers[2] = 1; // or layer[2] = _nn_parameter.outp = 1;
453 
455 
457 
459 
460  amount_epoch = 1;
461 
462  Log::instance()->debug( NN_LOG_PREFIX "\n\n\nStarting Training... ");
463 
464 
465  // Debugs of the features of the Neural Network
466  Log::instance()->debug( NN_LOG_PREFIX "\n\n*** Features of the neural network's algorithm ***" );
467  Log::instance()->debug( NN_LOG_PREFIX "\nNumber of presence: %d", num_presences);
468  if(num_absences != 0){
469  Log::instance()->debug( NN_LOG_PREFIX "\nNumber of absence: %d", num_absences);
470  }
471  else{
472  Log::instance()->debug( NN_LOG_PREFIX "\nNumber of pseudoabsence: %d", num_presences);
473  }
474  Log::instance()->debug( NN_LOG_PREFIX "\nInput neurons: %d", _nn_parameter.inp);
475  Log::instance()->debug( NN_LOG_PREFIX "\nHidden neurons: %d", _nn_parameter.hid);
476  Log::instance()->debug( NN_LOG_PREFIX "\nOutput neuron: %d", _nn_parameter.outp);
477  Log::instance()->debug( NN_LOG_PREFIX "\nLearning Patterns: %d", _nn_parameter.pattern);
478  Log::instance()->debug( NN_LOG_PREFIX "\nLearning rate: %f", _nn_parameter.learning_rate);
479  Log::instance()->debug( NN_LOG_PREFIX "\nMomentum: %f", _nn_parameter.momentum);
481  Log::instance()->debug( NN_LOG_PREFIX "\nEpochs: %.0f", _nn_parameter.epoch);
482  if(_nn_parameter.choice == 1){
483  Log::instance()->debug( NN_LOG_PREFIX "\nMinimum mean square error: %f", _nn_parameter.minimum_error);
484  }
485 
486 
487  return 1;
488 }
489 
490 
491 /***************/
492 /*** iterate ***/
497 int
499 {
500 
501  /* TRAINING */
502  /*******************************************/
503 
504  // Training by epoch
505  if(_nn_parameter.choice == 0){
506 
507  if(amount_epoch != (unsigned long)_nn_parameter.epoch+1){
508 
509  for(int j = 0; j < _nn_parameter.pattern; j++){
510 
512  }
513 
515  }
516 
517  else _done = true; // Training ends
518  }
519 
520 
521  // Training by minimum error
522  if(_nn_parameter.choice == 1){
523 
524  // Check if number of iterations exceeded the limit
526 
527  _done = true; // Training ends
528  Log::instance()->warn( NN_LOG_PREFIX "Exceeded maximum number of iterations.\n\n");
529  }
530 
531  for(int j = 0; j < _nn_parameter.pattern; j++){
532 
534  }
535 
537 
538  // Case converge
539  if(converged == 1){
540 
541  _done = true; // Training ends
542  Log::instance()->info( NN_LOG_PREFIX "Final number of the epoch: %lu\n\n", amount_epoch);
543  }
544  }
545 
546 
547  amount_epoch++;
548 
550 
551  return 1;
552 }
553 
554 
555 /*******************/
556 /*** getProgress ***/
559 float
561 {
562 
563  if (done())
564  return 1.0;
565  else
566  return _progress;
567 }
568 
569 
570 /************/
571 /*** done ***/
578 int
580 {
581 
582  if (_done == true)
583  Log::instance()->debug( NN_LOG_PREFIX "\n\nEnding Training. ");
584 
585  return _done;
586 }
587 
588 
589 // Returns the occurrence probability at the environment conditions x
590 /*****************/
591 /*** get Value ***/
596 Scalar
597 NNAlgorithm::getValue( const Sample& x ) const
598 {
599  vector<vector<double> > env_input;// [1][_num_layers]
600  env_input.resize(1);
601  env_input[0].resize(_num_layers);
602 
603  for(int i = 0; i < _num_layers; i++){
604 
605  env_input[0][i] = (double)x[i];
606  }
607 
608  double *output;
609 
610  network.SetInputs(env_input[0]); // Load the values of each layer
611 
612  output = network.GetOutput();
613 
614  return (Scalar)*output;
615 }
616 
617 
618 
619 /***************************************************************/
620 /****************** configuration *******************************/
621 void
623 {
624  // Avoid serialization when something went wrong
625  if ( ! _done ){
626  return;
627  }
628 
629  // These two lines create a new XML element called "NNAlgorithm"
630  ConfigurationPtr model_config( new ConfigurationImpl("NNAlgorithm") );
631  config->addSubsection( model_config );
632 
633  model_config->addNameValue( "NumLayers", _num_layers );
634  model_config->addNameValue( "HiddenLayerNeurons", _nn_parameter.hid );
635 
636 
637  int *layers = new int[3];
638 
639  layers[0] = _num_layers;
640  layers[1] = _nn_parameter.hid;
641  layers[2] = 1; // or layer[2] = _nn_parameter.outp = 1;
642 
643 
644  size_t size_weight;
645 
646  size_weight = (_num_layers * _nn_parameter.hid) + _nn_parameter.hid;
647 
648  // Write weight
649 
650  ConfigurationPtr weight_config( new ConfigurationImpl( "Weight" ) );
651 
652  model_config->addSubsection( weight_config );
653 
654  double *weight_values = new double[size_weight];
655 
656 
657  int siz = 0;
658 
659  for(int i = 0; i < 3; i++){
660 
661  for(int j = 0; j < layers[i]; j++){
662 
663  if(i != 2){ // Last layer does not require weights
664 
665  for(int k = 0; k < layers[i+1]; k++){
666 
667  weight_values[siz] = network.getWeight(i, j, k); // Get weights
668  siz ++;
669  }
670  }
671  }
672  }
673 
674  weight_config->addNameValue( "Values", weight_values, size_weight);
675 
676 
677  // Write bias
678  size_t size_bias;
679 
680  size_bias = _nn_parameter.hid + 1;
681 
682  ConfigurationPtr bias_config( new ConfigurationImpl( "Bias" ) );
683 
684  model_config->addSubsection( bias_config );
685 
686  double *bias_values = new double[size_bias];
687 
688 
689  siz = 0;
690 
691  for(int i = 0; i < 3; i++){
692 
693  for(int j = 0; j < layers[i]; j++){
694 
695  if(i != 0){ // First layer does not need biases
696 
697  bias_values[siz] = network.getBias(i, j); // Get bias
698  siz++;
699  }
700  }
701  }
702 
703  bias_config->addNameValue( "ValuesBias", bias_values, size_bias);
704 
705 
706  delete[] weight_values;
707  delete[] bias_values;
708  delete[] layers;
709 }
710 
711 
712 void
714 {
715  ConstConfigurationPtr model_config = config->getSubsection( "NNAlgorithm", false );
716 
717  if ( ! model_config ) {
718  return;
719  }
720 
721  _num_layers = model_config->getAttributeAsInt( "NumLayers", 0 );
722  _nn_parameter.hid = model_config->getAttributeAsInt( "HiddenLayerNeurons", 14 );
723 
724 
725  int *layers = new int (3);
726 
727  layers[0] = _num_layers;
728  layers[1] = _nn_parameter.hid;
729  layers[2] = 1; // or layer[2] = _nn_parameter.outp = 1;
730 
732 
734 
735 
736 
737  // Weight
738  ConstConfigurationPtr weight_config = model_config->getSubsection( "Weight", false );
739 
740  if ( ! weight_config ) {
741 
742  return;
743  }
744 
745  std::vector<double> weight_values = weight_config->getAttributeAsVecDouble( "Values" );
746 
747 
748  unsigned int siz = 0;
749 
750  for(int i = 0; i < 3; i++){
751 
752  for(int j = 0; j < layers[i]; j++){
753 
754  if(i != 2){ // Last layer does not require weights
755 
756  for(int k = 0; k < layers[i+1]; k++){
757 
758  network.setWeight(i, j, k, weight_values[siz]); // Set weights
759  siz ++;
760  }
761  }
762  }
763  }
764 
765 
766  // Bias
767  ConstConfigurationPtr bias_config = model_config->getSubsection( "Bias", false );
768 
769  if ( ! bias_config ) {
770 
771  return;
772  }
773 
774  std::vector<double> bias_values = bias_config->getAttributeAsVecDouble( "ValuesBias" );
775 
776 
777  siz = 0;
778 
779  for(int i = 0; i < 3; i++){
780 
781  for(int j = 0; j < layers[i]; j++){
782 
783  if(i != 0){ // First layer does not need biases
784 
785  network.setBias(i, j, bias_values[siz]); // Set bias
786  siz++;
787  }
788  }
789  }
790 
791  _done = true;
792 }
void trainingEpoch(unsigned long actual_epoch, double epoch_total, int patterns)
Definition: nn.h:504
Scalar getValue(const Sample &x) const
Definition: nn_alg.cpp:597
double learning_rate
Definition: nn.h:29
float _progress
Definition: nn_alg.hh:123
bool _done
Definition: nn_alg.hh:100
double minimum_error
Definition: nn.h:39
void warn(const char *format,...)
'Warn' level.
Definition: Log.cpp:273
void setWeight(int i, int j, int k, double w)
Definition: nn.h:286
double getWeight(int i, int j, int k) const
Definition: nn.h:280
int Train(vector< double > inputs, vector< double > outputs, int number_pattern, int max_pattern, double momentum)
Definition: nn.h:408
int initialize()
Definition: nn_alg.cpp:252
double Scalar
Type of map values.
Definition: om_defs.hh:39
unsigned long amount_epoch
Definition: nn_alg.hh:121
#define NUM_PARAM
Definition: nn_alg.cpp:49
static Log * instance()
Returns the instance pointer, creating the object on the first call.
Definition: Log.cpp:45
nn_parameter _nn_parameter
Definition: nn_alg.hh:117
double momentum
Definition: nn.h:30
#define LEARNING_RATE_ID
Definition: nn_alg.cpp:52
#define NN_LOG_PREFIX
Definition: nn_alg.cpp:58
void setBias(int i, int j, double b)
Definition: nn.h:299
int converged
Definition: nn_alg.hh:125
static AlgParamMetadata parameters[NUM_PARAM]
Definition: nn_alg.cpp:65
void SetInputs(vector< double > inputs) const
Definition: nn.h:204
int iterate()
Definition: nn_alg.cpp:498
int num_absences
Definition: nn_alg.hh:106
SamplerPtr createSampler(const EnvironmentPtr &env, const OccurrencesPtr &presence, const OccurrencesPtr &absence)
Definition: Sampler.cpp:52
OccurrencesPtr presences
Definition: nn_alg.hh:132
#define CHOICE_ID
Definition: nn_alg.cpp:54
int done() const
Definition: nn_alg.cpp:579
int inp
Definition: nn.h:25
void error(const char *format,...)
'Error' level.
Definition: Log.cpp:290
#define MOMENTUM_ID
Definition: nn_alg.cpp:53
~NNAlgorithm()
Definition: nn_alg.cpp:222
int getParameter(std::string const &name, std::string *value)
#define MIN_ERROR_ID
Definition: nn_alg.cpp:56
void setNormalization(const SamplerPtr &samp) const
Definition: Algorithm.cpp:350
float getProgress() const
Definition: nn_alg.cpp:560
double getBias(int i, int j) const
Definition: nn.h:293
int trainingMinimumError(int patterns, double min_error)
Definition: nn.h:516
vector< vector< double > > vector_input
Definition: nn_alg.hh:113
#define EPOCH_ID
Definition: nn_alg.cpp:55
#define HIDDEN_ID
Definition: nn_alg.cpp:51
int _num_layers
Definition: nn_alg.hh:102
int pattern
Definition: nn.h:28
double epoch
Definition: nn.h:36
void RandomizeWB(void)
Definition: nn.h:213
int num_presences
Definition: nn_alg.hh:104
vector< vector< double > > vector_output
Definition: nn_alg.hh:115
OM_ALG_DLL_EXPORT AlgorithmImpl * algorithmFactory()
Definition: nn_alg.cpp:192
OM_ALG_DLL_EXPORT AlgMetadata const * algorithmMetadata()
Definition: nn_alg.cpp:199
virtual void _getConfiguration(ConfigurationPtr &) const
Definition: nn_alg.cpp:622
virtual void _setConfiguration(const ConstConfigurationPtr &)
Definition: nn_alg.cpp:713
int SetData(double learning_rate, int layers[], int tot_layers)
Definition: nn.h:173
int hid
Definition: nn.h:26
SamplerPtr _samp
Definition: Algorithm.hh:245
static AlgMetadata metadata
Definition: nn_alg.cpp:158
void info(const char *format,...)
'Info' level.
Definition: Log.cpp:256
float getProgress()
Definition: nn.h:542
double * GetOutput(void) const
Definition: nn.h:243
OccurrencesPtr absences
Definition: nn_alg.hh:130
Network network
Definition: nn_alg.hh:111
int choice
Definition: nn.h:33
int needNormalization()
Definition: nn_alg.cpp:229
virtual void computeNormalization(const ReferenceCountedPointer< const SamplerImpl > &samplerPtr)=0
void debug(const char *format,...)
'Debug' level.
Definition: Log.cpp:237
Normalizer * _normalizerPtr
Definition: Algorithm.hh:247
int outp
Definition: nn.h:27
Definition: Sample.hh:25