Skip to content

Commit ddf34a3

Browse files
Rename neuron::batchSize to numberOfUses
1 parent 0570d8c commit ddf34a3

File tree

6 files changed

+28
-31
lines changed

6 files changed

+28
-31
lines changed

include/snn/neural_network/layer/LayerFactory.hpp

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ extern auto Input(TInt... sizeOfInput) -> LayerModel
1818
.numberOfNeurons = 0,
1919
.numberOfOutputs = 0,
2020
.neuron = {.numberOfInputs = 0,
21-
.batchSize = 0,
21+
.numberOfUses = 0,
2222
.numberOfWeights = 0,
2323
.bias = 0,
2424
.activationFunction = activation::identity},
@@ -40,7 +40,7 @@ auto FullyConnected(int numberOfNeurons, activation activation = activation::sig
4040
.numberOfNeurons = numberOfNeurons,
4141
.numberOfOutputs = -1,
4242
.neuron = {.numberOfInputs = -1,
43-
.batchSize = -1,
43+
.numberOfUses = -1,
4444
.numberOfWeights = -1,
4545
.bias = 1.0F,
4646
.activationFunction = activation},
@@ -61,7 +61,7 @@ auto Recurrence(int numberOfNeurons, activation activation = activation::tanh, T
6161
.numberOfNeurons = numberOfNeurons,
6262
.numberOfOutputs = -1,
6363
.neuron = {.numberOfInputs = -1,
64-
.batchSize = -1,
64+
.numberOfUses = -1,
6565
.numberOfWeights = -1,
6666
.bias = 1.0F,
6767
.activationFunction = activation},
@@ -84,7 +84,7 @@ auto GruLayer(int numberOfNeurons, TOptimizer... optimizers) -> LayerModel
8484
.neuron =
8585
{
8686
.numberOfInputs = -1,
87-
.batchSize = -1,
87+
.numberOfUses = -1,
8888
.numberOfWeights = -1,
8989
.bias = 1.0F,
9090
.activationFunction = activation::tanh,
@@ -106,7 +106,7 @@ auto MaxPooling(int kernelSize) -> LayerModel
106106
.numberOfNeurons = 0,
107107
.numberOfOutputs = -1,
108108
.neuron = {.numberOfInputs = 0,
109-
.batchSize = 0,
109+
.numberOfUses = 0,
110110
.numberOfWeights = 0,
111111
.bias = 0.0F,
112112
.activationFunction = activation::identity},
@@ -128,7 +128,7 @@ auto LocallyConnected(int numberOfLocallyConnected, int kernelSize, activation a
128128
.numberOfNeurons = -1,
129129
.numberOfOutputs = -1,
130130
.neuron = {.numberOfInputs = -1,
131-
.batchSize = -1,
131+
.numberOfUses = -1,
132132
.numberOfWeights = -1,
133133
.bias = almostZero,
134134
.activationFunction = activation},
@@ -149,14 +149,11 @@ auto Convolution(int numberOfConvolution, int kernelSize, activation activation
149149
.numberOfInputs = -1,
150150
.numberOfNeurons = 1,
151151
.numberOfOutputs = -1,
152-
.neuron =
153-
{
154-
.numberOfInputs = -1,
155-
.batchSize = -1,
156-
.numberOfWeights = -1,
157-
.bias = bias,
158-
.activationFunction = activation,
159-
},
152+
.neuron = {.numberOfInputs = -1,
153+
.numberOfUses = -1,
154+
.numberOfWeights = -1,
155+
.bias = bias,
156+
.activationFunction = activation},
160157
.numberOfFilters = numberOfConvolution,
161158
.numberOfKernels = -1,
162159
.numberOfKernelsPerFilter = -1,

include/snn/neural_network/layer/neuron/Neuron.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class Neuron
1818

1919
protected:
2020
int numberOfInputs{};
21-
int batchSize{};
21+
int numberOfUses{}; // Represents the number of times the neuron is used for one output of a layer.
2222
std::vector<float> weights;
2323
float bias{};
2424

@@ -65,7 +65,7 @@ void Neuron::serialize(Archive& archive, [[maybe_unused]] const uint32_t version
6565
archive.template register_type<StochasticGradientDescent>();
6666
archive& this->optimizer;
6767
archive& this->numberOfInputs;
68-
archive& this->batchSize;
68+
archive& this->numberOfUses;
6969
archive& this->weights;
7070
archive& this->bias;
7171
archive& this->activationFunction;

include/snn/neural_network/layer/neuron/NeuronModel.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ namespace snn
66
struct NeuronModel
77
{
88
int numberOfInputs = -1;
9-
int batchSize = -1;
9+
int numberOfUses = -1;
1010
int numberOfWeights = -1;
1111
float bias = 1.0F;
1212
activation activationFunction{};

src/neural_network/layer/LayerFactory.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -97,21 +97,21 @@ inline auto LayerFactory::build(LayerModel& model, std::vector<int>& shapeOfInpu
9797
throw InvalidArchitectureException("Input of layer has size of 0.");
9898
}
9999
model.neuron.numberOfInputs = model.numberOfInputs;
100-
model.neuron.batchSize = 1;
100+
model.neuron.numberOfUses = 1;
101101
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1; // for the bias
102102
model.numberOfOutputs = model.numberOfNeurons;
103103
return std::make_unique<FullyConnected>(model, optimizer);
104104

105105
case recurrence:
106106
model.neuron.numberOfInputs = model.numberOfInputs;
107-
model.neuron.batchSize = 1;
107+
model.neuron.numberOfUses = 1;
108108
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 2;
109109
model.numberOfOutputs = model.numberOfNeurons;
110110
return std::make_unique<Recurrence>(model, optimizer);
111111

112112
case gruLayer:
113113
model.neuron.numberOfInputs = model.numberOfInputs;
114-
model.neuron.batchSize = 1;
114+
model.neuron.numberOfUses = 1;
115115
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 2;
116116
model.numberOfOutputs = model.numberOfNeurons;
117117
return std::make_unique<GruLayer>(model, optimizer);
@@ -168,7 +168,7 @@ inline auto LayerFactory::build(LayerModel& model, std::vector<int>& shapeOfInpu
168168
model.numberOfKernels = model.numberOfNeurons;
169169
model.numberOfKernelsPerFilter = model.numberOfKernels / model.numberOfFilters;
170170
model.neuron.numberOfInputs = model.kernelSize * model.shapeOfInput[C];
171-
model.neuron.batchSize = 1;
171+
model.neuron.numberOfUses = 1;
172172
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1;
173173
model.numberOfOutputs = model.numberOfNeurons;
174174
return std::make_unique<LocallyConnected1D>(model, optimizer);
@@ -185,7 +185,7 @@ inline auto LayerFactory::build(LayerModel& model, std::vector<int>& shapeOfInpu
185185
model.numberOfKernels = model.numberOfNeurons;
186186
model.numberOfKernelsPerFilter = model.numberOfKernels / model.numberOfFilters;
187187
model.neuron.numberOfInputs = model.kernelSize * model.kernelSize * model.shapeOfInput[C];
188-
model.neuron.batchSize = 1;
188+
model.neuron.numberOfUses = 1;
189189
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1;
190190
model.numberOfOutputs = model.numberOfNeurons;
191191
return std::make_unique<LocallyConnected2D>(model, optimizer);
@@ -213,7 +213,7 @@ inline auto LayerFactory::build(LayerModel& model, std::vector<int>& shapeOfInpu
213213
computeNumberOfKernelsForConvolution1D(model.numberOfFilters, model.shapeOfInput);
214214
model.numberOfKernelsPerFilter = model.numberOfKernels / model.numberOfFilters;
215215
model.neuron.numberOfInputs = model.kernelSize * model.shapeOfInput[C];
216-
model.neuron.batchSize = model.numberOfKernelsPerFilter;
216+
model.neuron.numberOfUses = model.numberOfKernelsPerFilter;
217217
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1;
218218
model.numberOfOutputs = model.numberOfNeurons;
219219
return std::make_unique<Convolution1D>(model, optimizer);
@@ -230,7 +230,7 @@ inline auto LayerFactory::build(LayerModel& model, std::vector<int>& shapeOfInpu
230230
computeNumberOfKernelsForConvolution2D(model.numberOfFilters, model.shapeOfInput);
231231
model.numberOfKernelsPerFilter = model.numberOfKernels / model.numberOfFilters;
232232
model.neuron.numberOfInputs = model.kernelSize * model.kernelSize * model.shapeOfInput[C];
233-
model.neuron.batchSize = model.numberOfKernelsPerFilter;
233+
model.neuron.numberOfUses = model.numberOfKernelsPerFilter;
234234
model.neuron.numberOfWeights = model.neuron.numberOfInputs + 1;
235235
model.numberOfOutputs = model.numberOfNeurons;
236236
return std::make_unique<Convolution2D>(model, optimizer);

src/neural_network/layer/neuron/GatedRecurrentUnit.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@ namespace snn::internal
77
{
88
GatedRecurrentUnit::GatedRecurrentUnit(NeuronModel model, std::shared_ptr<NeuralNetworkOptimizer> optimizer)
99
: numberOfInputs(model.numberOfInputs),
10-
resetGate({model.numberOfInputs, model.batchSize, model.numberOfWeights, model.bias, activation::sigmoid},
10+
resetGate({model.numberOfInputs, model.numberOfUses, model.numberOfWeights, model.bias, activation::sigmoid},
1111
optimizer),
12-
updateGate({model.numberOfInputs, model.batchSize, model.numberOfWeights, model.bias, activation::sigmoid},
12+
updateGate({model.numberOfInputs, model.numberOfUses, model.numberOfWeights, model.bias, activation::sigmoid},
1313
optimizer),
14-
outputGate({model.numberOfInputs, model.batchSize, model.numberOfWeights, model.bias, activation::tanh},
14+
outputGate({model.numberOfInputs, model.numberOfUses, model.numberOfWeights, model.bias, activation::tanh},
1515
optimizer)
1616
{
1717
}

src/neural_network/layer/neuron/Neuron.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ namespace snn::internal
88
{
99
Neuron::Neuron(NeuronModel model, std::shared_ptr<NeuralNetworkOptimizer> optimizer)
1010
: numberOfInputs(model.numberOfInputs),
11-
batchSize(model.batchSize),
11+
numberOfUses(model.numberOfUses),
1212
bias(model.bias),
1313
activationFunction(model.activationFunction),
1414
optimizer(std::move(optimizer))
@@ -79,9 +79,9 @@ void Neuron::resetLearningVariables()
7979
{
8080
this->deltaWeights.assign(this->weights.size(), 0.0F);
8181
this->errors.assign(this->numberOfInputs, 0.0F);
82-
this->lastInputs.initialize(this->batchSize, this->numberOfInputs);
83-
this->lastError.initialize(this->batchSize);
84-
this->lastSum.initialize(this->batchSize);
82+
this->lastInputs.initialize(this->numberOfUses, this->numberOfInputs);
83+
this->lastError.initialize(this->numberOfUses);
84+
this->lastSum.initialize(this->numberOfUses);
8585
}
8686

8787
auto Neuron::operator==(const Neuron& neuron) const -> bool

0 commit comments

Comments
 (0)