Discrepancy in Accuracy between nengo.RectifiedLinear() and nengo.SpikingRectifiedLinear() Activations

I recently encountered a puzzling scenario while working on a Bengali character recognition system similar to MNIST but with a larger number of classes (50 in total). When I utilized nengo.RectifiedLinear() activation function in the run_network() function with n_steps=10, I achieved a promising accuracy of 85%. However, when I switched to nengo.SpikingRectifiedLinear() activation, the accuracy dramatically dropped to a mere 1%.

CNN model: # input
inp = tf.keras.Input(shape=(28, 28, 1))

convolutional layers

conv0 = tf.keras.layers.Conv2D(
filters=5,
kernel_size=2,
activation=tf.nn.relu,
use_bias=False,
)(inp)

conv1 = tf.keras.layers.Conv2D(
filters=10,
kernel_size=2,
activation=tf.nn.relu,
use_bias=False,
)(conv0)
conv2 = tf.keras.layers.Conv2D(
filters=20,
kernel_size=2,
activation=tf.nn.relu,
use_bias=False,
)(conv1)
conv3 = tf.keras.layers.Conv2D(
filters=40,
kernel_size=2,
activation=tf.nn.relu,
use_bias=False,
)(conv2)

fully connected layer

flatten = tf.keras.layers.Flatten()(conv3)
dense0 = tf.keras.layers.Dense(units=256, activation=‘relu’, use_bias=False)(flatten)
dense1 = tf.keras.layers.Dense(units=512, activation=‘relu’, use_bias=False)(dense0)

dense = tf.keras.layers.Dense(units=50, use_bias=False)(dense1)

model = tf.keras.Model(inputs=inp, outputs=dense)
conversion code is: def run_network(
activation,
params_file=“Bengali_CNN_params”,
n_steps=30,
scale_firing_rates=1,
synapse=None,
n_test=3000,
):
# convert the keras model to a nengo network
nengo_converter = nengo_dl.Converter(
model,
swap_activations={tf.nn.relu: activation},
scale_firing_rates=scale_firing_rates,
synapse=synapse,
)

# get input/output objects
nengo_input = nengo_converter.inputs[inp]
nengo_output = nengo_converter.outputs[dense]

# add a probe to the first convolutional layer to record activity.
# we'll only record from a subset of neurons, to save memory.
sample_neurons = np.linspace(
    0,
    np.prod(conv0.shape[1:]),
    1000,
    endpoint=False,
    dtype=np.int32,
)
with nengo_converter.net:
    conv0_probe = nengo.Probe(nengo_converter.layers[conv0][sample_neurons])

# repeat inputs for some number of timesteps
tiled_test_images = np.tile(test_images[:n_test], (1, n_steps, 1))

# set some options to speed up simulation
with nengo_converter.net:
    nengo_dl.configure_settings(stateful=False)

# build network, load in trained weights, run inference on test images
with nengo_dl.Simulator(
    nengo_converter.net, minibatch_size=10, progress_bar=False
) as nengo_sim:
    nengo_sim.load_params(params_file)
    data = nengo_sim.predict({nengo_input: tiled_test_images})

# compute accuracy on test data, using output of network on
# last timestep
predictions = np.argmax(data[nengo_output][:, -1], axis=-1)
accuracy = (predictions == test_labels[:n_test, 0, 0]).mean()
print(f"Test accuracy: {100 * accuracy:.2f}%")

# plot the results
for ii in range(3):
    plt.figure(figsize=(12, 4))

    plt.subplot(1, 3, 1)
    plt.title("Input image")
    plt.imshow(test_images[ii, 0].reshape((28, 28)), cmap="gray")
    plt.axis("off")

    plt.subplot(1, 3, 2)
    scaled_data = data[conv0_probe][ii] * scale_firing_rates
    if isinstance(activation, nengo.SpikingRectifiedLinear):
        scaled_data *= 0.001
        rates = np.sum(scaled_data, axis=0) / (n_steps * nengo_sim.dt)
        plt.ylabel("Number of spikes")
    else:
        rates = scaled_data
        plt.ylabel("Firing rates (Hz)")
    plt.xlabel("Timestep")
    plt.title(
        f"Neural activities (conv0 mean={rates.mean():.1f} Hz, "
        f"max={rates.max():.1f} Hz)"
    )
    plt.plot(scaled_data)

    plt.subplot(1, 3, 3)
    plt.title("Output predictions")
    plt.plot(tf.nn.softmax(data[nengo_output][ii]))
    plt.legend([str(j) for j in range(10)], loc="upper left")
    plt.xlabel("Timestep")
    plt.ylabel("Probability")

    plt.tight_layout()

I’m eager to understand why such a significant difference in accuracy occurred between these two activation functions. Could anyone shed some light on the potential reasons behind this unexpected outcome? Any insights or suggestions would be greatly appreciated!

Thank you in advance for your assistance.