Hi!

I am new to Nengo and currently trying to build a 4 layer SNN for classification in Nengo core. The goal is to use this network for classification with fewer classes (2-4). I am trying to learn from the NengoExtras Single layer MNIST example and having difficulties. Please find below my attempted code. Any help would be very much appreciated. Thank you!

```
import matplotlib.pyplot as plt
import numpy as np
from numpy import random
from nengo_extras.data import load_mnist, one_hot_from_labels
from nengo_extras.matplotlib import tile
from nengo_extras.vision import Gabor, Mask
rng = np.random.RandomState(9)
import nengo
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = 2 * X_train - 1 # normalize to -1 to 1
X_test = 2 * X_test - 1 # normalize to -1 to 1
T_train = one_hot_from_labels(y_train, classes=10)
# --- set up network parameters
n_vis = X_train.shape[1]
n_out = T_train.shape[1]
# input layer
n_in = 784
ens_params = dict(
eval_points=X_train,
neuron_type=nengo.LIFRate(),
intercepts=nengo.dists.Choice([0.1]),
max_rates=nengo.dists.Choice([100]),
)
solver = nengo.solvers.LstsqL2(reg=0.01)
with nengo.Network(seed=3) as model:
a = nengo.Ensemble(n_in, 784, **ens_params)
v = nengo.Node(size_in=n_in)
conn1 = nengo.Connection(a, v, synapse=None, solver=solver)
#2nd layer
A = nengo.Ensemble(784, 64)
v1 = nengo.Node(size_in = 64)
conn2 = nengo.Connection(A, v1, synapse=None, solver=solver)
#3rd layer
B = nengo.Ensemble(64, 64)
v2 = nengo.Node(size_in=64)
conn3 = nengo.Connection(B, v2)
#output layer
C = nengo.Ensemble(64, 10)
v3 = nengo.Node(size_in=10)
conn4 = nengo.Connection(C, v3, synapse=None, solver=solver)
solver = nengo.solvers.Solver(weights=True)
with nengo.Simulator(model) as sim:
sim.run(10)
def get_outs(sim, images):
#reshape
# encode the images to get the ensemble activations
_, acts1 = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
_, acts2 = nengo.utils.ensemble.tuning_curves(A, sim, inputs=images)
_, acts3 = nengo.utils.ensemble.tuning_curves(B, sim, inputs=images)
_, acts4 = nengo.utils.ensemble.tuning_curves(C, sim, inputs=images)
# decode the ensemble activities using the connection's decoders
npd1 = np.dot(acts1, sim.data[conn1].weights.T)
npd2 = np.dot(acts2, sim.data[conn2].weights.T)
npd3 = np.dot(acts3, sim.data[conn3].weights.T)
npd4 = np.dot(acts4, sim.data[conn4].weights.T)
return np.dot(acts4, sim.data[conn4].weights.T)
def get_error(sim, images, labels):
# the classification for each example is index of
# the output dimension with the highest value
indx = np.argmax(get_outs(sim, images), axis=1) != labels
return indx
def print_error(sim):
train_error = 100 * get_error(sim, X_train, y_train).mean()
test_error = 100 * get_error(sim, X_test, y_test).mean()
print("Train/test error: %0.2f%%, %0.2f%%" % (train_error, test_error))
#not sure if we need to use encoders, and if we must, then how to setup these?
#do we setup one encoder or need one for each layer?
encodersa = rng.normal(size=(n_in, 28 * 28))
a.encoders = encodersa
encodersA = rng.normal(size=(784, 8 * 8))
A.encoders = encodersA
encodersB = rng.normal(size=(64, 8 * 8))
B.encoders = encodersB
encodersC = rng.normal(size=(64, 10))
C.encoders = encodersC
tile(a.encoders.reshape(-1, 28, 28), rows=4, cols=6, grid=True)
tile(A.encoders.reshape(-1, 28, 28), rows=4, cols=6, grid=True)
tile(B.encoders.reshape(-1, 8, 8), rows=4, cols=6, grid=True)
tile(C.encoders.reshape(-1, 5, 2), rows=4, cols=6, grid=True)
print_error(sim)
```