ooops just realised that my second code was just the first one, copy and paste failure. so it was supposed to be the ensembles. it was just to check i wasn’t adding another activation function by mistake.
In general the ensembles code as seen below delivers an end image with more background noise in comparison to the DL version (seen above) almost as if the DL version is running an extra relu or something somewhere
with nengo.Network(seed=seed) as net:
# set up some default parameters to match the Keras defaults
# net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
# net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
# net.config[nengo.Connection].synapse = None
net.config[nengo.Connection].transform = nengo_dl.dists.Glorot()
net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([1000])
net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
net.config[nengo.Connection].synapse = None
NN_neuron_type = nengo.SpikingRectifiedLinear(amplitude=0.001)
# this is an optimization to improve the training speed,
# since we won't require stateful behaviour in this example
nengo_dl.configure_settings(stateful=False)
# input node, same as before
inp = nengo.Node(output=np.ones(7999))
# add the first dense layer
hidden = nengo.Ensemble(1024, 1, neuron_type=NN_neuron_type).neurons
nengo.Connection(inp, hidden)
hidden2 = nengo.Ensemble(512, 1, neuron_type=NN_neuron_type).neurons
nengo.Connection(hidden, hidden2)
hidden3 = nengo.Ensemble(254, 1, neuron_type=NN_neuron_type).neurons
nengo.Connection(hidden2, hidden3)
hidden4 = nengo.Ensemble(4096, 1, neuron_type=NN_neuron_type).neurons
nengo.Connection(hidden3, hidden4)
# add the linear output layer (using nengo.Node since there is
# no nonlinearity)
out = nengo.Node(size_in=4096)
nengo.Connection(hidden4, out)
# add a probe to collect output
out_p = nengo.Probe(out)
out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")