openModeller  Version 1.4.0
nn_alg.cpp
Go to the documentation of this file.
00001 
00029 #include "nn_alg.hh"
00030 #include "nn.h"
00031 
00032 // openModeller's libraries
00033 #include <openmodeller/Sampler.hh>
00034 // This include is only necessary if you want to work with normalized values
00035 // ScaleNormalizer is one of the available normalizers.
00036 #include <openmodeller/ScaleNormalizer.hh>
00037 
00038 #include <string.h>
00039 #include <stdio.h>
00040 #include <stdlib.h>
00041 
00042 #include <iostream>
00043 
00044 using namespace std;
00045 
00046 /****************************************************************/
00047 /********************** Algorithm's Metadata ********************/
00048 
00049 #define NUM_PARAM 6
00050 
00051 #define HIDDEN_ID        "HiddenLayerNeurons"
00052 #define LEARNING_RATE_ID "LearningRate"
00053 #define MOMENTUM_ID      "Momentum"
00054 #define CHOICE_ID        "Choice"
00055 #define EPOCH_ID         "Epoch"
00056 #define MIN_ERROR_ID     "MinimunError"
00057 
00058 #define NN_LOG_PREFIX "NNAlgorithm: "
00059 
00060 
00061 // Define all parameters
00062 /******************************/
00063 /*** Algorithm's parameters ***/
00064 
00065 static AlgParamMetadata parameters[NUM_PARAM] = {
00066 
00067   // Amount of neurons of the hidden layer
00068   {
00069     HIDDEN_ID,                               // Id.
00070     "Number of neurons in the hidden layer", // Name.
00071     Integer,                                // Type.
00072     "Number of neurons in the hidden layer (additional layer to the input and output layers, not connected externally).",  // Overview
00073     "Number of neurons in the hidden layer (additional layer to the input and output layers, not connected externally).",  // Description.
00074     1,         // Not zero if the parameter has lower limit.
00075     1,         // Parameter's lower limit.
00076     0,         // Not zero if the parameter has upper limit.
00077     0,         // Parameter's upper limit.
00078     "14"        // Parameter's typical (default) value.
00079   },
00080 
00081   // Learning rate
00082   {
00083     LEARNING_RATE_ID,         // Id.
00084     "Learning Rate",          // Name.
00085     Real,                     // Type.
00086     "Learning Rate. Training parameter that controls the size of weight and bias changes during learning.", // Overview
00087     "Learning Rate. Training parameter that controls the size of weight and bias changes during learning.", // Description.
00088     1,    // Not zero if the parameter has lower limit.
00089     0.0,  // Parameter's lower limit.
00090     1,    // Not zero if the parameter has upper limit.
00091     1.0,  // Parameter's upper limit.
00092     "0.3" // Parameter's typical (default) value.
00093   },
00094 
00095   // Momentum
00096   {
00097     MOMENTUM_ID, // Id.
00098     "Momentum",  // Name.
00099     Real,        // Type.
00100     "Momentum simply adds a fraction m of the previous weight update to the current one. The momentum parameter is used to prevent the system from converging to a local minimum or saddle point.", // Overview
00101     "Momentum simply adds a fraction m of the previous weight update to the current one. The momentum parameter is used to prevent the system from converging to a local minimum or saddle point. A high momentum parameter can also help to increase the speed of convergence of the system. However, setting the momentum parameter too high can create a risk of overshooting the minimum, which can cause the system to become unstable. A momentum coefficient that is too low cannot reliably avoid local minima, and can also slow down the training of the system.", // Description.
00102     1,         // Not zero if the parameter has lower limit.
00103     0.0,       // Parameter's lower limit.
00104     1,         // Not zero if the parameter has upper limit.
00105     1.0,       // Parameter's upper limit.
00106     "0.05"     // Parameter's typical (default) value.
00107   },
00108 
00109   // Choice
00110   {
00111     CHOICE_ID,         // Id.
00112     "Training type",   // Name.
00113     Integer,           // Type.
00114     "0 = train by epoch, 1 = train by minimum error", // Overview
00115     "0 = train by epoch, 1 = train by minimum error", // Description.
00116     1,  // Not zero if the parameter has lower limit.
00117     0,  // Parameter's lower limit.
00118     1,  // Not zero if the parameter has upper limit.
00119     1,  // Parameter's upper limit.
00120     "1" // Parameter's typical (default) value.
00121   },
00122 
00123   // Epoch
00124   {
00125     EPOCH_ID,  // Id.
00126     "Epoch",   // Name.
00127     Integer,   // Type.
00128     "Determines when training will stop once the number of iterations exceeds epochs. When training by minimum error, this represents the maximum number of iterations.", // Overview
00129     "Determines when training will stop once the number of iterations exceeds epochs. When training by minimum error, this represents the maximum number of iterations.", // Description.
00130     1,         // Not zero if the parameter has lower limit.
00131     1,         // Parameter's lower limit.
00132     0,         // Not zero if the parameter has upper limit.
00133     0,         // Parameter's upper limit.
00134     "5000000"   // Parameter's typical (default) value.
00135   },
00136 
00137   // Minimun Error
00138   {
00139     MIN_ERROR_ID,    // Id.
00140     "Minimun Error", // Name.
00141     Real,            // Type.
00142     "Minimum mean square error of the epoch.", // Overview
00143     "Minimum mean square error of the epoch. Square root of the sum of squared differences between the network targets and actual outputs divided by number of patterns (only for training by minimum error).", // Description.
00144     1,         // Not zero if the parameter has lower limit.
00145     0.0,       // Parameter's lower limit.
00146     1,         // Not zero if the parameter has upper limit.
00147     0.5,       // Parameter's upper limit.
00148     "0.01"     // Parameter's typical (default) value.
00149   },
00150 
00151 };
00152 
00153 
00154 // Define the algorithm
00155 /************************************/
00156 /*** Algorithm's general metadata ***/
00157 
00158 static AlgMetadata metadata = {
00159 
00160   "ANN",  // Id.
00161   "Artificial Neural Network", // Name.
00162   "0.2", // Version.
00163 
00164   // Overview
00165   "Artificial neural networks are made up of interconnecting artificial neurons (programming constructs that mimic the properties of biological neurons). Artificial neural networks may either be used to gain an understanding of biological neural networks, or for solving artificial intelligence problems without necessarily creating a model of a real biological system. Content retrieved from Wikipedia on the 06th of May, 2008: http://en.wikipedia.org/wiki/Neural_network",
00166 
00167   // Description.
00168   "An artificial neural network (ANN), also called a simulated neural network (SNN) or commonly just neural network (NN), is an interconnected group of artificial neurons that uses a mathematical or computational model for information processing based on a connectionistic approach to computation. In most cases an ANN is an adaptive system that changes its structure based on external or internal information that flows through the network. In more practical terms, neural networks are non-linear statistical data modeling or decision making tools. They can be used to model complex relationships between inputs and outputs or to find patterns in data. Content retrieved from Wikipedia on the 06th of May, 2008: http://en.wikipedia.org/wiki/Neural_network",
00169 
00170   "Chopra, Paras, modified by Alex Oshika Avilla and Fabrício Augusto Rodrigues", // Algorithm author.
00171   "", // Bibliography.
00172 
00173   "Alex Oshika Avilla, Fabricio Augusto Rodrigues", // Code author.
00174   "alex.avilla [at] poli . usp .br, fabricio.rodrigues [at] poli . usp .br", // Code author's contact.
00175 
00176   0, // Does not accept categorical data.
00177   0, // Does not need (pseudo)absence points.
00178 
00179   NUM_PARAM, // Algorithm's parameters.
00180   parameters
00181 };
00182 
00183 
00184 /****************************************************************/
00185 /****************** Algorithm's factory function ****************/
00186 /*
00187 * Needed code to link this to openModeller
00188 */
00189 
00190 OM_ALG_DLL_EXPORT
00191 AlgorithmImpl *
00192 algorithmFactory()
00193 {
00194   return new NNAlgorithm();
00195 }
00196 
00197 OM_ALG_DLL_EXPORT
00198 AlgMetadata const *
00199 algorithmMetadata()
00200 {
00201   return &metadata;
00202 }
00203 
00204 
00205 /*********************************************/
00206 /*************** NN algorithm ****************/
00207 
00208 /*******************/
00209 /*** constructor ***/
00210 NNAlgorithm::NNAlgorithm() : 
00211   AlgorithmImpl(&metadata),
00212   _done( false ),
00213   _num_layers( 0 )
00214 {
00215   // Normalize from 0.0 to 1.0
00216   _normalizerPtr = new ScaleNormalizer( 0.0, 1.0, true );
00217 }
00218 
00219 
00220 /******************/
00221 /*** destructor ***/
00222 NNAlgorithm::~NNAlgorithm()
00223 {
00224 }
00225 
00226 
00227 /**************************/
00228 /*** need Normalization ***/
00229 int NNAlgorithm::needNormalization()
00230 {
00231   if ( _samp->numAbsence() == 0 ) {
00232 
00233     // It will be necessary to generate pseudo absences, so do not waste
00234     // time normalizing things because normalization should ideally consider
00235     // all trainning points (including pseudo-absences). In this specific case, 
00236     // normalization will take place in initialize().
00237     return 0;
00238   }
00239 
00240   return 1;
00241 }
00242 
00243 
00244 /******************/
00245 /*** initialize ***/
00246 /* Initialize the model specifying a threshold / cutoff point.
00247  * This is optional (model dependent).
00248  * @note This method is inherited from the Algorithm class
00249  * @return 0 on error
00250  */
00251 int
00252 NNAlgorithm::initialize()
00253 {
00254 
00255   // NEURONS OF THE HIDDEN LAYER
00256   if ( ! getParameter( HIDDEN_ID, &_nn_parameter.hid ) ) {
00257 
00258     Log::instance()->error( NN_LOG_PREFIX "Parameter '" HIDDEN_ID "' not passed.\n" );
00259     return 0;
00260   }
00261 
00262   if ( _nn_parameter.hid < 1 ) {
00263     Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %d\n", _nn_parameter.hid );
00264     return 0;
00265   }
00266 
00267 
00268   // LEARNING RATE
00269   if ( ! getParameter( LEARNING_RATE_ID, &_nn_parameter.learning_rate ) ) {
00270 
00271     Log::instance()->error( NN_LOG_PREFIX "Parameter '" LEARNING_RATE_ID "' not passed.\n" );
00272     return 0;
00273   }
00274 
00275   if ( _nn_parameter.learning_rate < 0 ||  _nn_parameter.learning_rate > 1) {
00276     Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %f\n", _nn_parameter.learning_rate );
00277     return 0;
00278   }
00279 
00280 
00281   // MOMENTUM
00282   if ( ! getParameter( MOMENTUM_ID, &_nn_parameter.momentum ) ) {
00283 
00284     Log::instance()->error( NN_LOG_PREFIX "Parameter '" MOMENTUM_ID "' not passed.\n" );
00285     return 0;
00286   }
00287 
00288   if ( _nn_parameter.momentum < 0 ||  _nn_parameter.momentum >= 1) {
00289     Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %f\n", _nn_parameter.momentum );
00290     return 0;
00291   }
00292 
00293 
00294   // CHOICE
00295   if ( ! getParameter( CHOICE_ID, &_nn_parameter.choice ) ) {
00296 
00297     Log::instance()->error( NN_LOG_PREFIX "Parameter '" CHOICE_ID "' not passed.\n" );
00298     return 0;
00299   }
00300 
00301   if ( _nn_parameter.choice != 0 && _nn_parameter.choice != 1) {
00302     Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %d\n", _nn_parameter.choice );
00303     return 0;
00304   }
00305 
00306 
00307   // EPOCH
00308   if ( ! getParameter( EPOCH_ID, &_nn_parameter.epoch ) ) {
00309 
00310     Log::instance()->error( NN_LOG_PREFIX "Parameter '" EPOCH_ID "' not passed.\n" );
00311     return 0;
00312   }
00313 
00314   if ( _nn_parameter.epoch < 1) {
00315 
00316     Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %d\n", _nn_parameter.epoch );
00317     return 0;
00318   }
00319 
00320 
00321   // MINIMUM ERROR
00322   if (_nn_parameter.choice == 1){
00323 
00324     if ( ! getParameter( MIN_ERROR_ID, &_nn_parameter.minimum_error ) ) {
00325 
00326       Log::instance()->error( NN_LOG_PREFIX "Parameter '" MIN_ERROR_ID "' not passed.\n" );
00327       return 0;
00328     }
00329 
00330     if ( _nn_parameter.minimum_error < 0 ||  _nn_parameter.minimum_error > 0.5) {
00331 
00332       Log::instance()->warn( NN_LOG_PREFIX "Parameter out of range: %f\n", _nn_parameter.minimum_error );
00333       return 0;
00334     }
00335   }
00336 
00337 
00338   // Check the number of layers
00339   _num_layers = _samp->numIndependent();
00340 
00341 
00342   // Check the number of presences
00343   num_presences = _samp->numPresence();
00344 
00345   if ( num_presences == 0 ) {
00346 
00347     Log::instance()->warn( NN_LOG_PREFIX "No presence points inside the mask!\n" );
00348     return 0;
00349   }
00350 
00351   // Load presence points.
00352   presences = _samp->getPresences();
00353 
00354 
00355   // Check the number of absences
00356   num_absences = _samp->numAbsence();
00357 
00358   if ( num_absences == 0 ) {
00359 
00360     Log::instance()->debug( NN_LOG_PREFIX "Generating pseudo-absences.\n" );
00361 
00362     num_absences = num_presences;
00363 
00364     absences = new OccurrencesImpl( presences->label(), presences->coordSystem() );
00365 
00366 
00367     for ( int i = 0; i < num_absences; ++i ) {
00368 
00369       OccurrencePtr oc = _samp->getPseudoAbsence();
00370       absences->insert( oc ); 
00371     }
00372 
00373     // Compute normalization with all points
00374     SamplerPtr mySamplerPtr = createSampler( _samp->getEnvironment(), presences, absences );
00375 
00376 
00377     // They generate points that are out of Map.
00378     _normalizerPtr->computeNormalization( mySamplerPtr );
00379     setNormalization( _samp );
00380     absences->normalize( _normalizerPtr );
00381   }
00382   else {
00383 
00384     Log::instance()->debug( NN_LOG_PREFIX "Using absence points provided.\n" );
00385 
00386     // should be normalized already
00387     absences = _samp->getAbsences();
00388   }
00389 
00390   // Presence Points
00391   vector_input.resize(num_presences + num_absences); // initialize vector_input
00392   for (int i = 0; i < (num_presences + num_absences); ++i) {
00393       vector_input[i].resize(_num_layers, 0);
00394   }
00395   
00396   vector_output.resize(num_presences + num_absences); // initialize vector_output
00397   for (int i = 0; i < (num_presences + num_absences); ++i) {
00398       vector_output[i].resize(1, 0);
00399   }
00400 
00401   for(int j = 0; j < num_presences; j++){
00402 
00403     Sample env_data = (*presences)[j]->environment();
00404 
00405     for(int i = 0; i < _num_layers; i++){
00406         
00407       vector_input[j][i] = (double)env_data[i];
00408     }
00409   }
00410 
00411   // Absence Points
00412   for(int j = 0; j < num_absences; j++){
00413 
00414     Sample env_data = (*absences)[j]->environment();
00415 
00416     for(int i = 0; i < _num_layers; i++){
00417 
00418       vector_input[j+num_presences][i] = (double)env_data[i];
00419     }
00420   }
00421 
00422 
00423   _nn_parameter.pattern = num_presences + num_absences;
00424 
00425   _nn_parameter.outp = 1;
00426 
00427   
00428   // Presence points
00429   for(int j = 0; j < num_presences; j++){
00430 
00431     for(int i = 0; i < _nn_parameter.outp; i++){ // Always i=0 because it has one output
00432 
00433       vector_output[j][i] = 1.00000000; // Exist presence point
00434     }
00435   }
00436 
00437   // Absence points
00438   for(int j = num_presences; j < _nn_parameter.pattern; j++){
00439 
00440     for(int i = 0; i < _nn_parameter.outp; i++){ // Always i=0 because it has one output
00441 
00442       vector_output[j][i] = 0.00000000; // Exist absence point
00443     }
00444   }
00445 
00446 
00447   // Initialize the variables of Neural Network that will be used at training
00448   int layers[3];
00449 
00450   layers[0] = _num_layers;
00451   layers[1] = _nn_parameter.hid;
00452   layers[2] = 1; // or layer[2] = _nn_parameter.outp = 1;
00453 
00454   network.SetData(_nn_parameter.learning_rate,layers,3);
00455 
00456   network.RandomizeWB();
00457 
00458   _nn_parameter.inp = _num_layers;
00459 
00460   amount_epoch = 1;
00461 
00462   Log::instance()->debug( NN_LOG_PREFIX "\n\n\nStarting Training... ");
00463 
00464 
00465   // Debugs of the features of the Neural Network
00466   Log::instance()->debug( NN_LOG_PREFIX "\n\n***  Features of the neural network's algorithm  ***" );
00467   Log::instance()->debug( NN_LOG_PREFIX "\nNumber of presence: %d", num_presences);
00468   if(num_absences != 0){
00469     Log::instance()->debug( NN_LOG_PREFIX "\nNumber of absence: %d", num_absences);
00470   }
00471   else{
00472     Log::instance()->debug( NN_LOG_PREFIX "\nNumber of pseudoabsence: %d", num_presences);
00473   }
00474   Log::instance()->debug( NN_LOG_PREFIX "\nInput neurons: %d", _nn_parameter.inp);
00475   Log::instance()->debug( NN_LOG_PREFIX "\nHidden neurons: %d", _nn_parameter.hid);
00476   Log::instance()->debug( NN_LOG_PREFIX "\nOutput neuron: %d", _nn_parameter.outp);
00477   Log::instance()->debug( NN_LOG_PREFIX "\nLearning Patterns: %d", _nn_parameter.pattern);
00478   Log::instance()->debug( NN_LOG_PREFIX "\nLearning rate: %f", _nn_parameter.learning_rate);
00479   Log::instance()->debug( NN_LOG_PREFIX "\nMomentum: %f", _nn_parameter.momentum);
00480   Log::instance()->debug( NN_LOG_PREFIX "\nChoice: %d", _nn_parameter.choice);
00481   Log::instance()->debug( NN_LOG_PREFIX "\nEpochs: %.0f", _nn_parameter.epoch);
00482   if(_nn_parameter.choice == 1){
00483     Log::instance()->debug( NN_LOG_PREFIX "\nMinimum mean square error: %f", _nn_parameter.minimum_error);
00484   }
00485 
00486 
00487   return 1;
00488 }
00489 
00490 
00491 /***************/
00492 /*** iterate ***/
00497 int
00498 NNAlgorithm::iterate()
00499 {
00500  
00501   /*                 TRAINING                */
00502   /*******************************************/
00503 
00504   // Training by epoch
00505   if(_nn_parameter.choice == 0){
00506 
00507     if(amount_epoch != (unsigned long)_nn_parameter.epoch+1){
00508 
00509       for(int j = 0; j < _nn_parameter.pattern; j++){
00510 
00511         network.Train(vector_input[j], vector_output[j], j, _nn_parameter.pattern, _nn_parameter.momentum);
00512       }
00513 
00514       network.trainingEpoch( amount_epoch, _nn_parameter.epoch, _nn_parameter.pattern);
00515     }
00516 
00517     else _done = true; // Training ends
00518   }
00519 
00520 
00521   // Training by minimum error
00522   if(_nn_parameter.choice == 1){
00523 
00524     // Check if number of iterations exceeded the limit
00525     if(amount_epoch > _nn_parameter.epoch){
00526 
00527       _done = true; // Training ends
00528       Log::instance()->warn( NN_LOG_PREFIX "Exceeded maximum number of iterations.\n\n");
00529     }
00530 
00531     for(int j = 0; j < _nn_parameter.pattern; j++){
00532 
00533       network.Train(vector_input[j], vector_output[j], j, _nn_parameter.pattern, _nn_parameter.momentum );
00534     }
00535 
00536     converged = network.trainingMinimumError( _nn_parameter.pattern, _nn_parameter.minimum_error);
00537 
00538     // Case converge
00539     if(converged == 1){
00540 
00541       _done = true; // Training ends
00542       Log::instance()->info( NN_LOG_PREFIX "Final number of the epoch: %lu\n\n", amount_epoch);
00543     }
00544   }
00545 
00546 
00547   amount_epoch++;
00548 
00549   _progress = network.getProgress();
00550 
00551   return 1;
00552 }
00553 
00554 
00555 /*******************/
00556 /*** getProgress ***/
00559 float
00560 NNAlgorithm::getProgress() const
00561 {
00562 
00563   if (done())
00564     return 1.0;
00565   else
00566     return _progress;
00567 }
00568 
00569 
00570 /************/
00571 /*** done ***/
00578 int
00579 NNAlgorithm::done() const
00580 {
00581 
00582   if (_done == true)
00583      Log::instance()->debug( NN_LOG_PREFIX "\n\nEnding Training. ");
00584 
00585   return _done;
00586 }
00587 
00588 
00589 // Returns the occurrence probability at the environment conditions x
00590 /*****************/
00591 /*** get Value ***/
00596 Scalar
00597 NNAlgorithm::getValue( const Sample& x ) const
00598 {
00599   vector<vector<double> > env_input;// [1][_num_layers]
00600   env_input.resize(1);
00601   env_input[0].resize(_num_layers);
00602 
00603   for(int i = 0; i < _num_layers; i++){
00604 
00605     env_input[0][i] = (double)x[i];
00606   }
00607 
00608   double *output;
00609 
00610   network.SetInputs(env_input[0]); // Load the values of each layer
00611 
00612   output = network.GetOutput();
00613 
00614   return (Scalar)*output;
00615 }
00616 
00617 
00618 
00619 /***************************************************************/
00620 /****************** configuration *******************************/
00621 void
00622 NNAlgorithm::_getConfiguration( ConfigurationPtr& config ) const
00623 {
00624   // Avoid serialization when something went wrong
00625   if ( ! _done ){
00626     return;
00627   }
00628 
00629   // These two lines create a new XML element called "NNAlgorithm"
00630   ConfigurationPtr model_config( new ConfigurationImpl("NNAlgorithm") );
00631   config->addSubsection( model_config );
00632 
00633   model_config->addNameValue( "NumLayers", _num_layers );
00634   model_config->addNameValue( "HiddenLayerNeurons", _nn_parameter.hid );
00635 
00636 
00637   int *layers = new int[3];
00638 
00639   layers[0] = _num_layers;
00640   layers[1] = _nn_parameter.hid;
00641   layers[2] = 1; // or layer[2] = _nn_parameter.outp = 1;
00642 
00643 
00644   size_t size_weight;
00645 
00646   size_weight = (_num_layers * _nn_parameter.hid) + _nn_parameter.hid;
00647   
00648   // Write weight
00649    
00650   ConfigurationPtr weight_config( new ConfigurationImpl( "Weight" ) );
00651 
00652   model_config->addSubsection( weight_config );
00653 
00654   double *weight_values = new double[size_weight];
00655 
00656 
00657   int siz = 0;
00658 
00659   for(int i = 0; i < 3; i++){
00660 
00661     for(int j = 0; j < layers[i]; j++){
00662 
00663       if(i != 2){ // Last layer does not require weights
00664 
00665         for(int k = 0; k < layers[i+1]; k++){
00666 
00667           weight_values[siz] = network.getWeight(i, j, k); // Get weights
00668           siz ++;
00669         }
00670       }
00671     }
00672   }
00673 
00674   weight_config->addNameValue( "Values", weight_values,  size_weight);
00675 
00676 
00677   // Write bias
00678   size_t size_bias;
00679 
00680   size_bias = _nn_parameter.hid + 1;
00681 
00682   ConfigurationPtr bias_config( new ConfigurationImpl( "Bias" ) );
00683 
00684   model_config->addSubsection( bias_config );
00685 
00686   double *bias_values = new double[size_bias];
00687 
00688 
00689   siz = 0;
00690  
00691   for(int i = 0; i < 3; i++){
00692 
00693     for(int j = 0; j < layers[i]; j++){
00694 
00695       if(i != 0){ // First layer does not need biases
00696 
00697         bias_values[siz] = network.getBias(i, j); // Get bias
00698         siz++;
00699       }
00700     }
00701   }
00702 
00703   bias_config->addNameValue( "ValuesBias", bias_values,  size_bias);
00704 
00705 
00706   delete[] weight_values;
00707   delete[] bias_values;
00708   delete[] layers;
00709 }
00710 
00711 
00712 void
00713 NNAlgorithm::_setConfiguration( const ConstConfigurationPtr& config )
00714 {
00715   ConstConfigurationPtr model_config = config->getSubsection( "NNAlgorithm", false );
00716 
00717   if ( ! model_config ) {
00718     return;
00719   }
00720 
00721   _num_layers = model_config->getAttributeAsInt( "NumLayers", 0 );
00722   _nn_parameter.hid = model_config->getAttributeAsInt( "HiddenLayerNeurons", 14 );
00723 
00724 
00725   int *layers = new int (3);
00726 
00727   layers[0] = _num_layers;
00728   layers[1] = _nn_parameter.hid;
00729   layers[2] = 1; // or layer[2] = _nn_parameter.outp = 1;
00730 
00731   network.SetData(_nn_parameter.learning_rate,layers,3);
00732 
00733   network.RandomizeWB();
00734 
00735 
00736 
00737   // Weight
00738   ConstConfigurationPtr weight_config = model_config->getSubsection( "Weight", false );
00739 
00740   if ( ! weight_config ) {
00741 
00742     return;
00743   }
00744 
00745   std::vector<double> weight_values = weight_config->getAttributeAsVecDouble( "Values" );
00746 
00747 
00748   unsigned int siz = 0;
00749 
00750   for(int i = 0; i < 3; i++){
00751 
00752     for(int j = 0; j < layers[i]; j++){
00753 
00754       if(i != 2){ // Last layer does not require weights
00755 
00756         for(int k = 0; k < layers[i+1]; k++){
00757 
00758           network.setWeight(i, j, k, weight_values[siz]); // Set weights
00759           siz ++;
00760         }
00761       }
00762     }
00763   }
00764 
00765 
00766   // Bias
00767   ConstConfigurationPtr bias_config = model_config->getSubsection( "Bias", false );
00768 
00769   if ( ! bias_config ) {
00770 
00771     return;
00772   }
00773 
00774   std::vector<double> bias_values = bias_config->getAttributeAsVecDouble( "ValuesBias" );
00775 
00776 
00777   siz = 0;
00778  
00779   for(int i = 0; i < 3; i++){
00780 
00781     for(int j = 0; j < layers[i]; j++){
00782 
00783       if(i != 0){ // First layer does not need biases
00784 
00785         network.setBias(i, j, bias_values[siz]); // Set bias
00786         siz++;
00787       }
00788     }
00789   }
00790 
00791   _done = true;
00792 }