Hello Nengo Community,

I hope you all are doing well.

I am trying to optimize the fire rates during the training using regularization as explained in the MNIST example. However, when I add the regularization in the training, my accuracy drops to 0. Here is my code…

with nengo.Network(seed=0) as net:

# set some default parameters for the neurons that will make

# the training progress more smoothly

net.config[nengo.Ensemble].max_rates = nengo.dists.Uniform(250, 400)

net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])

net.config[nengo.Connection].synapse = None

neuron_type = nengo.LIF(amplitude = 0.01)

# this is an optimization to improve the training speed,

# since we won’t require stateful behaviour in this example

nengo_dl.configure_settings(stateful=False)`# the input node that will be used to feed in input images inp = nengo.Node(np.zeros(1 * 256)) x = nengo_dl.Layer(tf.keras.layers.Dense(256))(inp, shape_in=(256)) x = nengo_dl.Layer(neuron_type)(x) out_Dense_1 = nengo.Probe(x, label="out_Dense_1") # linear readout out = nengo_dl.Layer(tf.keras.layers.Dense(units=no_of_classess))(x) out_p = nengo.Probe(out, label="out_p") out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")`

minibatch_size = 100

sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)

X_train = X_train[:, None, :]

Y_train = Y_train[:, None, None]

n_steps = 25 # timesteps.

X_test = np.tile(X_test[:, None, :], (1, n_steps, 1))

Y_test = np.tile(Y_test[:, None, None], (1, n_steps, 1))def classification_accuracy(y_true, y_pred):

return tf.metrics.sparse_categorical_accuracy(y_true[:, -1], y_pred[:, -1])sim.compile(loss={out_p_filt: classification_accuracy})

print(

“Accuracy before training:”,

sim.evaluate(X_test, {out_p_filt: Y_test}, verbose=0)[“loss”],

)

target_rate = 250

sim.compile(

optimizer=tf.optimizers.Adam(0.011),

loss={

out_p: tf.losses.SparseCategoricalCrossentropy(from_logits=True),

out_Dense_1: tf.losses.mse,

},

loss_weights={out_p: 1, out_Dense_1: 1e-3},

)

sim.fit(X_train, {out_p: Y_train,

out_Dense_1: np.ones((Y_train.shape[0], 1, out_Dense_1.size_in))* target_rate,}

, verbose=1,epochs=10)print(

“Accuracy after training:”,

sim.evaluate(X_test, {out_p_filt: Y_test}, verbose=1)[“loss”],

)

What I am trying to achieve is to have a firing rate of around let’s say 250 Hz. This makes the accuracy drops to 0.

Could someone please let me know where I am making the mistake?

Thank you so much for your answer.