forked from peter-ch/MultiNEAT
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathNeuralNetwork.h
More file actions
229 lines (184 loc) · 7.04 KB
/
NeuralNetwork.h
File metadata and controls
229 lines (184 loc) · 7.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
#ifndef _PHENOTYPE_H
#define _PHENOTYPE_H
///////////////////////////////////////////////////////////////////////////////////////////
// MultiNEAT - Python/C++ NeuroEvolution of Augmenting Topologies Library
//
// Copyright (C) 2012 Peter Chervenski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see < http://www.gnu.org/licenses/ >.
//
// Contact info:
//
// Peter Chervenski < spookey@abv.bg >
// Shane Ryan < shane.mcdonald.ryan@gmail.com >
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// File: Phenotype.h
// Description: Definition for the phenotype data structures.
///////////////////////////////////////////////////////////////////////////////
#ifdef USE_BOOST_PYTHON
#include <boost/python.hpp>
#include <boost/python/numeric.hpp>
#include <boost/python/tuple.hpp>
#include <math.h>
#include <cmath>
namespace py = boost::python;
#endif
#include <vector>
#include "Genes.h"
namespace NEAT
{
class Connection
{
public:
unsigned short int m_source_neuron_idx; // index of source neuron
unsigned short int m_target_neuron_idx; // index of target neuron
double m_weight; // weight of the connection
double m_signal; // weight * input signal
bool m_recur_flag; // recurrence flag for displaying purposes
// can be ignored
// Hebbian learning parameters
// Ignored in case there is no lifetime learning
double m_hebb_rate;
double m_hebb_pre_rate;
// comparison operator (nessesary for boost::python)
bool operator==(Connection const& other) const
{
if ((m_source_neuron_idx == other.m_source_neuron_idx) &&
(m_target_neuron_idx == other.m_target_neuron_idx)) /*&&
(m_weight == other.m_weight) &&
(m_recur_flag == other.m_recur_flag))*/
return true;
else
return false;
}
};
class Neuron
{
public:
double m_activesum; // the synaptic input
double m_activation; // the synaptic input passed through the activation function
double m_a, m_b, m_timeconst, m_bias; // misc parameters
double m_membrane_potential; // used in leaky integrator mode
ActivationFunction m_activation_function_type;
// displaying and stuff
double m_x, m_y, m_z;
double m_sx, m_sy, m_sz;
std::vector<double> m_substrate_coords;
double m_split_y;
NeuronType m_type;
// the sensitivity matrix of this neuron (for RTRL learning)
std::vector< std::vector< double > > m_sensitivity_matrix;
// comparison operator (nessesary for boost::python)
bool operator==(Neuron const& other) const
{
if ((m_type == other.m_type) &&
(m_split_y == other.m_split_y) &&
(m_activation_function_type == other.m_activation_function_type)// &&
//(this == other.this))
)
return true;
else
return false;
}
};
class NeuralNetwork
{
/////////////////////
// RTRL variables
double m_total_error;
// Always the size of m_connections
std::vector<double> m_total_weight_change;
/////////////////////
// returns the index if that connection exists or -1 otherwise
int ConnectionExists(int a_to, int a_from);
public:
unsigned int m_num_inputs, m_num_outputs;
std::vector<Connection> m_connections; // array size - number of connections
std::vector<Neuron> m_neurons;
NeuralNetwork(bool a_Minimal); // if given false, the constructor will create a standard XOR network topology.
NeuralNetwork();
void InitRTRLMatrix(); // initializes the sensitivity cube for RTRL learning.
// assumes that neuron and connection data are already initialized
void ActivateFast(); // assumes unsigned sigmoids everywhere.
void Activate(); // any activation functions are supported
void ActivateUseInternalBias(); // like Activate() but uses m_bias as well
void ActivateLeaky(double step); // activates in leaky integrator mode
void RTRL_update_gradients();
void RTRL_update_error(double a_target);
void RTRL_update_weights(); // performs the backprop step
// Hebbian learning
void Adapt(Parameters& a_Parameters);
void Flush(); // clears all activations
void FlushCube(); // clears the sensitivity cube
void Input(std::vector<double>& a_Inputs);
#ifdef USE_BOOST_PYTHON
void Input_python_list(py::list& a_Inputs);
void Input_numpy(py::numeric::array& a_Inputs);
#endif
std::vector<double> Output();
// accessor methods
void AddNeuron(const Neuron& a_n) { m_neurons.push_back( a_n ); }
void AddConnection(const Connection& a_c) { m_connections.push_back( a_c ); }
Connection GetConnectionByIndex(unsigned int a_idx) const
{
return m_connections[a_idx];
}
Neuron GetNeuronByIndex(unsigned int a_idx) const
{
return m_neurons[a_idx];
}
void SetInputOutputDimentions(const unsigned short a_i, const unsigned short a_o)
{
m_num_inputs = a_i;
m_num_outputs = a_o;
}
unsigned int NumInputs() const
{
return m_num_inputs;
}
unsigned int NumOutputs() const
{
return m_num_outputs;
}
// clears the network and makes it a minimal one
void Clear()
{
m_neurons.clear();
m_connections.clear();
m_total_weight_change.clear();
SetInputOutputDimentions(0, 0);
}
double GetConnectionLenght(Neuron source, Neuron target)
{ double dist = 0.0;
for (unsigned int i = 0; i < source.m_substrate_coords.size(); i++)
{
dist += (target.m_substrate_coords[i] - source.m_substrate_coords[i]) *
(target.m_substrate_coords[i] - source.m_substrate_coords[i] );
}
return dist;
}
double GetTotalConnectionLength()
{
return m_connections.size();
}
// one-shot save/load
void Save(const char* a_filename);
bool Load(const char* a_filename);
// save/load from already opened files for reading/writing
void Save(FILE* a_file);
bool Load(std::ifstream& a_DataFile);
};
}; // namespace NEAT
#endif