openModeller  Version 1.5.0
nn.h
Go to the documentation of this file.
1 #ifndef _LIBNN_H
2 #define _LIBNN_H
3 
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7 
8 #ifndef WIN32
9  #include <sys/time.h>
10  #include <sys/resource.h>
11 #else
12  #include <time.h>
13 #endif
14 
15 
16 #include <ctype.h>
17 #include <float.h>
18 #include <string.h>
19 #include <stdarg.h>
20 
21 
23 {
24  // General Variables
25  int inp;
26  int hid;
27  int outp;
28  int pattern;
29  double learning_rate;
30  double momentum;
31 
32  // Choice Variable between training by epoch or minimum error
33  int choice;
34 
35  // Variables especified of training by epoch
36  double epoch;
37 
38  // Variables especified of training by minimum error
39  double minimum_error;
40 };
41 
42 
43 /*
44  * Neural Network was based on the algorithm created by Paras Chopra.
45  * Email: paras1987@gmail.com
46  * Web: www.paraschopra.com
47  *
48  * Changes made in the algorithm are shown with A&F at the beginning of the sentence.
49  * Amended by Alex Oshika Avilla & Fabricio Augusto Rodrigues
50  */
51 
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <math.h> //For tanh
55 
56 
57 /*A&F*/// returns a float in the range -1.0f -> - 1.0f
58 #define RANDOM_CLAMP (((double)rand()-(float)rand())/10000000)
59 
60 /*A&F*///returns a float between 0 & 1
61 #define RANDOM_NUM ((double)rand()/(10000000+1))
62 
63 //using namespace std;
64 
65 
66 /*A&F*/class Dendrite{
67 
68  public:
69 
70  // Weights of the neuron
71  double d_weight;
74  unsigned long d_points_to; // The index of the neuron of the next layer to which it points
75 
76  Dendrite(double weight = 0.0, double weight_ancient = 0.0, double weight_actual = 0.0, unsigned long points_to = 0){ //Constructor
77 
78  d_weight = weight;
79  d_weight_ancient = weight_ancient;
80  d_weight_actual = weight_actual;
81  d_points_to = points_to; // Give it a initial value
82  }
83 };
84 
85 
86 class Neuron{
87 
88  public:
89 
90  unsigned long n_ID; // ID of a particular neuron in a layer. Used to find a particular neuron in an array
91  double n_value; // Value which Neuron currently is holding
92  double n_bias; // Bias of the neuron
93  double n_delta; // Used in back prop. Note it is backprop specific
94 
95  Dendrite *Dendrites; // Dendrites
96 
97  // Constructor assigning initial values
98  Neuron(unsigned long ID = 0,double value = 0.0,double bias = 0.0){
99 
100  n_ID = ID;
101  n_value = value;
102  n_bias = bias;
103  n_delta = 0.0;
104  }
105 
106  void SetDendrites(unsigned long dendrite){ // Set the dendrites from the neuron to given dendrite
107 
108  Dendrites = new Dendrite[dendrite];
109 
110  for(unsigned long i = 0; i < dendrite; i++){
111 
112  Dendrites[i].d_points_to = i; // Initialize the dendrite to attach to next layer
113  }
114  }
115 };
116 
117 
118 class Layer{
119 
120  public:
121 
122  Neuron *Neurons; // Pointer to array of neurons
123 
124  /*Layer(int size = 1){ // Size is no. of neurons in it
125 
126  Neurons = new Neuron [size];
127  } */
128 
129  void Initialize(int size) { // Initialize the layer
130 
131  Neurons = new Neuron [size];
132  }
133 
134  ~Layer(){ // Destructor deletes Neurons from the memory
135 
136  delete Neurons;
137  }
138 
139  Neuron GetNeuron(int index){ // Give the neuron at index
140 
141  return Neurons[index];
142  }
143 
144  void SetNeuron(Neuron neuron, int index){ // Sets the neuron
145 
146  Neurons[index] = neuron;
147  }
148 };
149 
150 
151 /*A&F*/class Network { // The real neural network
152 
153  public:
154 
155  double net_learning_rate; // Learning rate of network
156  Layer *Layers; // The total layers in network
157  int net_tot_layers; // Number of layers
158  double *net_inputs; // Input array
159  double *net_outputs;// Output layers
160  int *net_layers; // Array which tells no. of neurons in each layer
161  //double GetRand(void);
162 
163  double *square_error; // Mean square error of each training
164  double *mean_square_error; // Total plus of each epoch
165 
166  float progress; // Percent of training
167 
169  // Blank Constructor
170  }
171 
172 
173  int SetData(double learning_rate, int layers[], int tot_layers) { // Function to set various parameters of the net
174 
175  if (tot_layers<2) return(-1); // Return error if total no. of layers < 2
176  // Because input and output layers are necessary
177 
178  net_learning_rate = learning_rate;
179 
180  net_layers = new int [tot_layers]; // Initialize the layers array
181 
182  Layers = new Layer[tot_layers];
183 
184 
185  for(int i = 0; i < tot_layers; i++){
186 
187  net_layers[i] = layers[i];
188 
189  Layers[i].Initialize(layers[i]); // Initialize each layer with the specified size
190  }
191 
192 
193  net_inputs = new double[layers[0]];
194 
195  net_outputs = new double[layers[tot_layers-1]];
196 
197  net_tot_layers = tot_layers;
198 
199 
200  return 0;
201  }
202 
203 
204  void SetInputs(vector<double> inputs) const{ // Function to set the inputs
205 
206  for(int i = 0; i < net_layers[0]; i++){
207 
208  Layers[0].Neurons[i].n_value = inputs[i];
209  }
210  }
211 
212 
213  void RandomizeWB(void){ // Randomize weights and biases
214 
215  int i,j,k;
216 
217  for(i = 0; i < net_tot_layers; i++){
218 
219  for(j = 0; j < net_layers[i]; j++){
220 
221  if(i != net_tot_layers-1){ // Last layer does not require weights
222 
223  Layers[i].Neurons[j].SetDendrites(net_layers[i+1]); //Initialize the dendrites
224 
225  for(k = 0; k < net_layers[i+1]; k++){
226 
227  Layers[i].Neurons[j].Dendrites[k].d_weight = 0.000000; // Let weight be the zero value
228  Layers[i].Neurons[j].Dendrites[k].d_weight_ancient = 0.000000; // Let weight be the zero value
229  Layers[i].Neurons[j].Dendrites[k].d_weight_actual = GetRand(); // Let weight be the random value
230  }
231 
232  }
233 
234  if(i != 0){ // First layer does not need biases
235 
236  Layers[i].Neurons[j].n_bias = GetRand();
237  }
238  }
239  }
240  }
241 
242 
243  double * GetOutput(void) const{ // Gives the output of the net
244 
245  double *outputs;
246  int i,j,k;
247 
248  outputs = new double[net_layers[net_tot_layers-1]]; // Temp ouput array
249 
250  for(i = 1; i < net_tot_layers; i++){
251 
252  for(j = 0; j < net_layers[i]; j++){
253 
254  Layers[i].Neurons[j].n_value = 0;
255 
256  for(k = 0; k < net_layers[i-1]; k++){
257 
258  Layers[i].Neurons[j].n_value = Layers[i].Neurons[j].n_value + Layers[i-1].Neurons[k].n_value * Layers[i-1].Neurons[k].Dendrites[j].d_weight; // Multiply and add all the inputs
259  }
260 
261  Layers[i].Neurons[j].n_value = Layers[i].Neurons[j].n_value + Layers[i].Neurons[j].n_bias; // Add bias
262 
263  Layers[i].Neurons[j].n_value = Limiter(Layers[i].Neurons[j].n_value); // Squash that value
264 
265  }
266  }
267 
268  for(i = 0; i < net_layers[net_tot_layers-1]; i++){
269 
270  outputs[i] = Layers[net_tot_layers-1].Neurons[i].n_value;
271  }
272 
273  return outputs; // Return the outputs
274  }
275 
276 
277  /******************************************************************/
278  // Used at _GetConfiguration and _SetConfiguration in the library nn_alg.cpp
279 
280  double getWeight(int i, int j, int k) const{ // Get weights
281 
282  return Layers[i].Neurons[j].Dendrites[k].d_weight;
283  }
284 
285 
286  void setWeight(int i, int j, int k, double w){ // Set weights
287 
288  Layers[i].Neurons[j].Dendrites[k].d_weight = w;
289 
290  }
291 
292 
293  double getBias(int i, int j) const{ // Get bias
294 
295  return Layers[i].Neurons[j].n_bias;
296  }
297 
298 
299  void setBias(int i, int j, double b){ // Set bias
300 
301  Layers[i].Neurons[j].n_bias = b;
302  }
303  /******************************************************************/
304 
305 
306  void Update(void){ // Just a dummy function
307 
308  double *temp; // Temperory pointer
309  temp = GetOutput();
310  //GetOutput();
311  delete[] temp;
312  }
313 
314 
315  /*void SetOutputs(double outputs[]){ //Set the values of the output layer
316 
317  for(unsigned long i = 0; i < net_layers[net_tot_layers-1]; i++){
318 
319  Layers[net_tot_layers-1].Neurons[i].n_value = outputs[i]; //Set the value
320  }
321  } */
322 
323 
324  double Limiter(double value) const{ // Limiet to limit value between 1 and -1
325 
326  //return tanh(value); // To use tanh fuction
327  return (1.0/(1+exp(-value))); // To use sigmoid function
328  }
329 
330 
331  double GetRand(void){ // Return a random number between range -1 e 1 using time to seed the srand function
332 
333  int seconds;
334 
335 #ifndef WIN32
336  struct timeval time;
337  gettimeofday( &time, (struct timezone *)NULL );
338  seconds = (int)time.tv_usec;
339 #else
340  time_t timer = time( NULL );
341  struct tm *tblock = localtime(&timer);
342  seconds = tblock->tm_sec;
343 #endif
344 
345  int seed = int(seconds + 100*RANDOM_CLAMP + 100*RANDOM_NUM);
346  //srand(seconds);
347  srand(seed);
348 
349 #ifdef _GLIBCPP_HAVE_DRAND48
350  srand48(seed);
351 #endif
352 
353  return ((RANDOM_CLAMP+RANDOM_NUM)/400);
354  }
355 
356 
357  double SigmaWeightDelta( unsigned long layer_no, unsigned long neuron_no){ // Calculate sum of weights * delta. Used in back prop. layer_no is layer number. Layer number and neuron number can be zero and neuron_no is neuron number.
358 
359  double result = 0.0;
360 
361  for(int i = 0; i < net_layers[layer_no+1]; i++) { // Go through all the neurons in the next layer
362 
363  result = result + Layers[layer_no].Neurons[neuron_no].Dendrites[i].d_weight * Layers[layer_no+1].Neurons[i].n_delta; // Comput the summation
364  }
365 
366  return result;
367  }
368 
369 
370 /*
371  For output layer:
372 
373  Delta = (TargetO - ActualO) * ActualO * (1 - ActualO)
374  Weight = Weight + LearningRate * Delta * Input
375 
376  For hidden layers:
377 
378  Delta = ActualO * (1-ActualO) * Summation(Weight_from_current_to_next AND Delta_of_next)
379  Weight = Weight + LearningRate * Delta * Input
380 */
381 
382 
383  void setError(int max_pattern){ // Function to set the errors
384 
385  mean_square_error = new double;
386  *mean_square_error = 0.000000;
387 
388  square_error = new double [max_pattern];
389 
390  for(int i = 0; i < max_pattern; i++){
391 
392  square_error[i] = 0.000000;
393  }
394  }
395 
396 
397  void addError(int max_pattern){ // Function to add the errors
398 
399  for(int i = 0; i < max_pattern; i++){
400 
402  }
403 
404  delete[] square_error;
405  }
406 
407 
408  int Train(vector<double> inputs, vector<double> outputs, int number_pattern, int max_pattern, double momentum){ // The standard Backprop Learning algorithm
409 
410  int i,j,k;
411 
412  double *Target = new double;
413  double *Delta = new double;
414  double *Actual = new double;
415  double *error = new double;;
416 
417 
418  SetInputs(inputs); // Set the inputs
419 
420  Update(); // Update all the values
421 
422  //SetOutputs(outputs); // Set the outputs
423 
424 
425  if(number_pattern == 0){
426 
427  setError(max_pattern);
428  }
429 
430 
431  for(i = (net_tot_layers-1); i > 0; i--){ // Go from last layer to first layer
432 
433  for(j = 0; j < net_layers[i]; j++) {// Go thru every neuron
434 
435  if(i == (net_tot_layers-1)){ // Output layer, Needs special atential
436 
437  (*Target) = outputs[j]; // Target value
438  (*Actual) = Layers[i].Neurons[j].n_value; // Actual value
439 
440  (*Delta) = ((*Target) - (*Actual)) * (*Actual) * (1 - (*Actual)); // Function to compute error
441 
442  Layers[i].Neurons[j].n_delta = (*Delta); // Compute the delta
443 
444 
445  for(k = 0; k < net_layers[i-1]; k++) {
446 
447  Layers[i-1].Neurons[k].Dendrites[j].d_weight = ( Layers[i-1].Neurons[k].Dendrites[j].d_weight_actual) + ( net_learning_rate * (*Delta) * Layers[i-1].Neurons[k].n_value) + (momentum * ( Layers[i-1].Neurons[k].Dendrites[j].d_weight_actual - Layers[i-1].Neurons[k].Dendrites[j].d_weight_ancient)); // Calculate the new weights
448 
451  }
452 
453  Layers[i].Neurons[j].n_bias = Layers[i].Neurons[j].n_bias + (*Delta) * net_learning_rate * 1; // n_value is always 1 for bias
454 
455 
456  *error = ((*Target) - (*Actual));
457  }
458  else { // Here
459 
460  // Target value
461  (*Actual) = Layers[i].Neurons[j].n_value; // Actual value
462 
463  (*Delta) = (*Actual) * (1 - (*Actual)) * SigmaWeightDelta(i,j); // Function to compute error
464 
465 
466  for(k = 0; k < net_layers[i-1]; k++) {
467 
468  Layers[i-1].Neurons[k].Dendrites[j].d_weight = ( Layers[i-1].Neurons[k].Dendrites[j].d_weight_actual) + ( net_learning_rate * (*Delta) * Layers[i-1].Neurons[k].n_value) + (momentum * ( Layers[i-1].Neurons[k].Dendrites[j].d_weight_actual - Layers[i-1].Neurons[k].Dendrites[j].d_weight_ancient)); // Calculate the new weights
469 
472  }
473 
474 
475  if(i != 0){ // Input layer does not have a bias
476 
477  Layers[i].Neurons[j].n_bias = Layers[i].Neurons[j].n_bias + (*Delta) * net_learning_rate * 1; // n_value is always 1 for bias
478  }
479  }
480  }
481  }
482 
483 
484  square_error[number_pattern] = (*error) * (*error);
485 
486 
487  if(number_pattern == (max_pattern - 1)){
488 
489  addError(max_pattern);
490  }
491 
492 
493  delete Target;
494  delete Actual;
495  delete Delta;
496  delete error;
497 
498 
499  return 0;
500  }
501 
502 
503  // Used to training by epoch
504  void trainingEpoch( unsigned long actual_epoch, double epoch_total, int patterns){
505 
506  *mean_square_error = sqrt(*mean_square_error/patterns);
507 
508 
509  progress = (float)(1.0-(epoch_total - (actual_epoch+1.0))/(epoch_total));
510 
511  delete mean_square_error;
512  }
513 
514 
515  // Used to training by minimum error
516  int trainingMinimumError( int patterns, double min_error){
517 
518  int converg = 0;
519 
520  *mean_square_error = sqrt(*mean_square_error/patterns);
521 
522 
523  if(*mean_square_error < min_error){
524 
525  converg = 1;
526  progress = 1;
527  }
528 
529  else{
530 
531  progress = (float)(1.0-(*mean_square_error - min_error)/(*mean_square_error));
532  }
533 
534 
535  delete mean_square_error;
536 
537  return converg;
538  }
539 
540 
541  // Percent of training
542  float getProgress(){
543 
544  return progress;
545  }
546 
547 
548 
549  ~Network(){ }//delete Layers; }
550 
551 };
552 
553 
554 #ifdef __cplusplus
555 }
556 #endif
557 
558 #endif /* _LIBNN_H */
void trainingEpoch(unsigned long actual_epoch, double epoch_total, int patterns)
Definition: nn.h:504
Dendrite(double weight=0.0, double weight_ancient=0.0, double weight_actual=0.0, unsigned long points_to=0)
Definition: nn.h:76
double learning_rate
Definition: nn.h:29
double minimum_error
Definition: nn.h:39
void addError(int max_pattern)
Definition: nn.h:397
#define RANDOM_CLAMP
Definition: nn.h:58
Definition: nn.h:118
double * net_inputs
Definition: nn.h:158
Definition: nn.h:86
float progress
Definition: nn.h:166
void setWeight(int i, int j, int k, double w)
Definition: nn.h:286
double getWeight(int i, int j, int k) const
Definition: nn.h:280
int Train(vector< double > inputs, vector< double > outputs, int number_pattern, int max_pattern, double momentum)
Definition: nn.h:408
Layer * Layers
Definition: nn.h:156
Definition: nn.h:66
double n_bias
Definition: nn.h:92
double momentum
Definition: nn.h:30
void setBias(int i, int j, double b)
Definition: nn.h:299
double net_learning_rate
Definition: nn.h:155
void SetInputs(vector< double > inputs) const
Definition: nn.h:204
#define RANDOM_NUM
Definition: nn.h:61
Definition: nn.h:151
double d_weight_actual
Definition: nn.h:73
double * net_outputs
Definition: nn.h:159
int inp
Definition: nn.h:25
unsigned long d_points_to
Definition: nn.h:74
double * mean_square_error
Definition: nn.h:164
Network()
Definition: nn.h:168
int * net_layers
Definition: nn.h:160
void Update(void)
Definition: nn.h:306
void Initialize(int size)
Definition: nn.h:129
double GetRand(void)
Definition: nn.h:331
~Network()
Definition: nn.h:549
double d_weight
Definition: nn.h:71
Neuron * Neurons
Definition: nn.h:122
double getBias(int i, int j) const
Definition: nn.h:293
Dendrite * Dendrites
Definition: nn.h:95
double n_value
Definition: nn.h:91
unsigned long n_ID
Definition: nn.h:90
int trainingMinimumError(int patterns, double min_error)
Definition: nn.h:516
double d_weight_ancient
Definition: nn.h:72
int pattern
Definition: nn.h:28
double epoch
Definition: nn.h:36
void SetDendrites(unsigned long dendrite)
Definition: nn.h:106
void RandomizeWB(void)
Definition: nn.h:213
Neuron GetNeuron(int index)
Definition: nn.h:139
void SetNeuron(Neuron neuron, int index)
Definition: nn.h:144
double Limiter(double value) const
Definition: nn.h:324
Neuron(unsigned long ID=0, double value=0.0, double bias=0.0)
Definition: nn.h:98
~Layer()
Definition: nn.h:134
int SetData(double learning_rate, int layers[], int tot_layers)
Definition: nn.h:173
void setError(int max_pattern)
Definition: nn.h:383
int hid
Definition: nn.h:26
double SigmaWeightDelta(unsigned long layer_no, unsigned long neuron_no)
Definition: nn.h:357
double n_delta
Definition: nn.h:93
int net_tot_layers
Definition: nn.h:157
float getProgress()
Definition: nn.h:542
double * GetOutput(void) const
Definition: nn.h:243
double * square_error
Definition: nn.h:163
int choice
Definition: nn.h:33
int outp
Definition: nn.h:27
static char error[256]
Definition: FileParser.cpp:42