Thanks for the reply @xchoo
Yes!! I will provide a demonstrating source code and response of the network.
This is a very simple code, but it is already possible to visualize the problems I mentioned and the same happens if we add more neurons in the layers. In this case, the input node of the neural network is the derivative of a force waveform from an FSR sensor. Also, the metrics resulting from this simple code are very good, but I can’t understand how NengoDL works internally with ensemble parameters!
Code:
import warnings
import os
import matplotlib.pyplot as plt
import nengo
import numpy as np
import tensorflow as tf
import nengo_dl
from nengo.dists import Choice
from nengo.dists import Uniform
import collections
import datetime
# Parameters simulation
sim_run = 80 # Time simulation
s_time = 0.005 # Sample time
s_freq = 200 # Sample frequency
# Import train data
data = np.genfromtxt(
'C:\\localdata\\traindata.txt',
delimiter=' ')
f = data.T[0, 0:40000] # FSR
slip = data.T[1, 0:40000] # Ground Truth
f_der = data.T[3, 0:40000] # Derivated
# Import test data
dataTest = np.genfromtxt(
'C:\\localdata\\testdata.txt',
delimiter=' ')
f_test = dataTest.T[0, 0:40000] # FSR
slip_test = dataTest.T[1, 0:40000] # Ground Truth
f_der_test = dataTest.T[3, 0:40000] # Derivated
# General parameters from ensambles
ens_params2 = dict(
dimensions = 1,
neuron_type=nengo.LIF(tau_rc=0.02, tau_ref=0.002, min_voltage=0, amplitude=1),
)
warnings.simplefilter("ignore")
with nengo.Network(seed=0) as model:
# Input node
inpt = nengo.Node(output=nengo.processes.PresentInput(f_der, s_time))
# First ensemble
SNN1 = nengo.Ensemble(1, **ens_params2)
# Connect everything together
conn = nengo.Connection(inpt, SNN1)
# Add a probe on the input and output, in this case snn1_p is the output
inpt_p = nengo.Probe(inpt)
snn1_p = nengo.Probe(SNN1)
# set all the connection synapses to None
for conn in model.all_connections:
conn.synapse = None
with model:
# Reshape inputs train data
inp_dr = np.reshape(f_der,(f_der.size,1,1))
inp_slip = np.reshape(slip,(slip.size,1,1))
print('--------Shape of inputs train---------')
print('Shape inp_dr: ' , inp_dr.shape)
print('Shape inp_slip: ', inp_slip.shape)
# create data dictionaries
train_inputs = {inpt: inp_dr}
train_targets = {snn1_p: inp_slip}
# reshape inputs test data
inp_dr_test = np.reshape(f_der_test,(f_der_test.size,1,1))
inp_slip_test = np.reshape(slip_test,(slip_test.size,1,1))
print('\n')
print('--------Shape of inputs test--------')
print('Shape inp_dr_test: ', inp_dr_test.shape)
print('Shape inp_slip_test: ', inp_slip_test.shape)
# Create test data dictionary
# note: using snn1_p instead of snn1_p_nofilt
test_inputs = {inpt: inp_dr_test}
test_targets = {snn1_p: inp_slip_test}
def test_mse(y_true, y_pred):
return tf.reduce_mean(tf.square(y_pred[:, :] - y_true[:, :]))
# The number of simultaneous inputs that will be passed through the network.
minibatch_size = 32
with nengo_dl.Simulator(model, minibatch_size=minibatch_size, seed=0, dt=s_time) as sim:
# Get SNN1 parameters with sim.data[] function before training
snn1_data = sim.data[SNN1]
# Get SNN1 parameters with sim.get_nengo_params() function before training
paramsBeforeSNN1 = sim.get_nengo_params([SNN1])
# Configure the model for training/evaluation before training
sim.compile(loss={snn1_p: test_mse},
metrics= {snn1_p: [tf.keras.metrics.Accuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.FalseNegatives(),
tf.keras.metrics.FalsePositives(),
tf.keras.metrics.TrueNegatives(),
tf.keras.metrics.TruePositives(),
tf.keras.metrics.Recall(),
tf.keras.metrics.Precision(),
]})
# Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch
evaluate_b = sim.evaluate(test_inputs, test_targets, verbose = 0)
# Run training
# If do_training = True -> training the neural network
# If do_training = False -> load previously trained parameters
do_training = True
if do_training:
#### Save and epoch parameters ####
nameNet = "1-neuronNFP-F-5ep"
nameNetSv = "./logs/1-neuronNFP-F-5ep"
ep =5
print('\n')
print("#### Training #### ")
sim.compile(optimizer=tf.optimizers.Adam(0.01), loss={snn1_p: tf.losses.mse},
metrics= {snn1_p: [tf.keras.metrics.Accuracy(),
]})
# Parameters callback tensorboard
log_dir = "logs/fit/" + nameNet + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tb_keras_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
tb_nengo_callback = nengo_dl.callbacks.NengoSummaries(log_dir, sim, [SNN1, SNN1.neurons])
# Train the model
history = sim.fit(train_inputs, train_targets, epochs=ep, callbacks=[tb_keras_callback, tb_nengo_callback])
# Save the parameters to file
sim.save_params(nameNetSv)
else:
# Load parameters
sim.load_params(nameNetSv)
# Configure the model for training/evaluation after training
sim.compile(loss={snn1_p: test_mse},
metrics= {snn1_p: [tf.keras.metrics.Accuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.FalseNegatives(),
tf.keras.metrics.FalsePositives(),
tf.keras.metrics.TrueNegatives(),
tf.keras.metrics.TruePositives(),
tf.keras.metrics.Recall(),
tf.keras.metrics.Precision(),
]})
# Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch
evaluate_a = sim.evaluate(test_inputs, test_targets, verbose = 0)
# Get SNN1 parameters with sim.get_nengo_params() function after training
paramsAfterSNN1 = sim.get_nengo_params([SNN1])
print('#### Neural Network: ', nameNet, ' ####')
print('\n')
print('#### Metrics Data Before Training ####')
print('MSE: ', evaluate_b["loss"])
print('Accuracy: ', evaluate_b["probe_1_accuracy"])
print('AUC: ', evaluate_b["probe_1_auc"])
print('False Negatives: ', evaluate_b["probe_1_false_negatives"])
print('False Positives: ', evaluate_b["probe_1_false_positives"])
print('True Negatives: ', evaluate_b["probe_1_true_negatives"])
print('True Positives: ', evaluate_b["probe_1_true_positives"])
print('Recall: ', evaluate_b["probe_1_recall"])
print('Precision: ', evaluate_b["probe_1_precision"])
print('\n')
print('#### Metrics Data After Training ####')
print('MSE: ', evaluate_a["loss"])
print('Accuracy: ', evaluate_a["probe_1_accuracy"])
print('AUC: ', evaluate_a["probe_1_auc_1"])
print('False Negatives: ', evaluate_a["probe_1_false_negatives_1"])
print('False Positives: ', evaluate_a["probe_1_false_positives_1"])
print('True Negatives: ', evaluate_a["probe_1_true_negatives_1"])
print('True Positives: ', evaluate_a["probe_1_true_positives_1"])
print('Recall: ', evaluate_a["probe_1_recall_1"])
print('Precision: ', evaluate_a["probe_1_precision_1"])
print('\n')
print('################### LAYER 1 #####################')
print('#### Parameters Layer1 Before Training (sim.data) ####')
print('Encoders: ', snn1_data.encoders)
print('Intercepts', snn1_data.intercepts)
print('Max Rates: ', snn1_data.max_rates)
print('Scaled Enconders ', snn1_data.scaled_encoders)
print('Gain: ', snn1_data.gain)
print('Bias: ', snn1_data.bias)
print('\n')
print('#### Parameters Layer1 After Training (sim.data) ####')
print('Encoders: ', sim.data[SNN1].encoders)
print('Intercepts', sim.data[SNN1].intercepts)
print('Max Rates: ', sim.data[SNN1].max_rates)
print('Scaled Enconders ', sim.data[SNN1].scaled_encoders)
print('Gain: ', sim.data[SNN1].gain)
print('Bias: ', sim.data[SNN1].bias)
print('\n')
print('#### Parameters Layer1 Before Training (get_nengo_params) ####')
print('Encoders: ', paramsBeforeSNN1[0]['encoders'])
print('Intercepts: ', paramsBeforeSNN1[0]['intercepts'])
print('Max Rates: ', paramsBeforeSNN1[0]['max_rates'])
print('Normalize encoders: ', paramsBeforeSNN1[0]['normalize_encoders'])
print('Gain: ', paramsBeforeSNN1[0]['gain'])
print('Bias: ', paramsBeforeSNN1[0]['bias'])
print('\n')
print('#### Parameters Layer1 After Training (get_nengo_params) ####')
print('Encoders: ', paramsAfterSNN1[0]['encoders'])
print('Intercepts: ', paramsAfterSNN1[0]['intercepts'])
print('Max Rates: ', paramsAfterSNN1[0]['max_rates'])
print('Normalize encoders: ', paramsAfterSNN1[0]['normalize_encoders'])
print('Gain: ', paramsAfterSNN1[0]['gain'])
print('Bias: ', paramsAfterSNN1[0]['bias'])
Response of the training:
#### Neural Network: 1-neuronNFP-F-5ep ####
#### Metrics Data Before Training ####
MSE: 0.0018431361531838775
Accuracy: 0.992133617401123
AUC: 0.9989702701568604
False Negatives: 0.0
False Positives: 19.0
True Negatives: 9207.0
True Positives: 54.0
Recall: 1.0
Precision: 0.7397260069847107
#### Metrics Data After Training ####
MSE: 0.001441666390746832
Accuracy: 0.993534505367279
AUC: 0.9996747970581055
False Negatives: 0.0
False Positives: 6.0
True Negatives: 9220.0
True Positives: 54.0
Recall: 1.0
Precision: 0.8999999761581421
################### LAYER 1 #####################
#### Parameters Layer1 Before Training (sim.data) ####
Encoders: [[1.]]
Intercepts [0.4569144]
Max Rates: [346.5095]
Scaled Enconders [[40.654743]]
Gain: [40.654743]
Bias: [-17.575737]
#### Parameters Layer1 After Training (sim.data) ####
Encoders: [[1.]]
Intercepts [0.5316207]
Max Rates: [326.63724]
Scaled Enconders [[39.16842]]
Gain: [39.16842]
Bias: [-19.822742]
#### Parameters Layer1 Before Training (get_nengo_params) ####
Encoders: [[1.]]
Intercepts: Uniform(low=-1.0, high=0.9)
Max Rates: Uniform(low=200, high=400)
Normalize encoders: False
Gain: [40.654743]
Bias: [-17.575737]
#### Parameters Layer1 After Training (get_nengo_params) ####
Encoders: [[0.9634403]]
Intercepts: Uniform(low=-1.0, high=0.9)
Max Rates: Uniform(low=200, high=400)
Normalize encoders: False
Gain: [40.654743]
Bias: [-19.822742]