Nengo DL converter

Thanks so much to both @msanch35 and @zerone !!
That was exactly my problem, now if I scale the firing rate to like 10k I get actual spiking activity.

I had taken a while off the project until I had more time to implement a few things to try and get it working, but thanks to the magic of the forum it solved itself! (or well someone else solved it for me) :smiley:

now I might actually get to the LIF neuron stage

2 Likes

Hi @Eric,

I am using NengoDL version 3.3.0

I am having the same issue with:
nengo.exceptions.SimulationError: Number of saved parameters in ./my-model (5) != number of variables in the model (4)

My network is a custom network with 3 hidden layers 50 neurons each,
I’ve tried the suggested solutions #1 and #3 and all don’t work in my network architecture.
I am trying to avoid rewriting code as suggested in approach #2

Do you have any additional suggestion for my case?

Best Regards,
Alex

Hi @alex.hex,

Are you sure that your model is the same architecture that you used to save my-model? The other likely cause of this error is that your model changed between when you created my-model and the current version of your script, so that the number of parameter tensors is actually different.

Otherwise, I can’t think of any reasons why you should get this error. If you can reduce the problem to a specific reproducible test case, you can post it as a bug report on NengoDL issues.

Hi Eric,
I don’t know why reason is when I convert Darknet 19 to SNN, I have an error " SimulationError: Number of saved parameters in keras_to_snn_params (41) != number of variables in the model (44)" if synapse =0.05 or non zeros and no error for synapse=None even I turn off optimizer " ```
with converter.net:
nengo_dl.configure_settings(simplifications=[])

inp=tf.keras.layers.Input(shape=(28,28,1))

conv0 = tf.keras.layers.Conv2D(filters=8,kernel_size=3,padding="same", activation=tf.nn.relu)(inp)

pool0 = tf.keras.layers.AveragePooling2D(pool_size=2,strides=2)(conv0)

conv1 = tf.keras.layers.Conv2D(filters=16,kernel_size=3,padding="same", activation=tf.nn.relu)(pool0)

pool1 = tf.keras.layers.AveragePooling2D(pool_size=2,strides=2)(conv1)

conv2 = tf.keras.layers.Conv2D(filters=32,kernel_size=3,padding="same", activation=tf.nn.relu)(pool1)

conv3 = tf.keras.layers.Conv2D(filters=16,kernel_size=1,padding="same", activation=tf.nn.relu)(conv2)

conv4 = tf.keras.layers.Conv2D(filters=32,kernel_size=3,padding="same", activation=tf.nn.relu)(conv3)

pool2 = tf.keras.layers.AveragePooling2D(pool_size=2,strides=2)(conv4)

conv5 = tf.keras.layers.Conv2D(filters=64,kernel_size=3,padding="same", activation=tf.nn.relu)(pool2)

conv6 = tf.keras.layers.Conv2D(filters=32,kernel_size=1,padding="same", activation=tf.nn.relu)(conv5)

conv7 = tf.keras.layers.Conv2D(filters=64,kernel_size=3,padding="same", activation=tf.nn.relu)(conv6)

pool3 = tf.keras.layers.AveragePooling2D(pool_size=2,strides=2)(conv7)

conv8 = tf.keras.layers.Conv2D(filters=128,kernel_size=3,padding="same", activation=tf.nn.relu)(pool3)

conv9 = tf.keras.layers.Conv2D(filters=64,kernel_size=1,padding="same", activation=tf.nn.relu)(conv8)

conv10 = tf.keras.layers.Conv2D(filters=128,kernel_size=3,padding="same", activation=tf.nn.relu)(conv9)

conv11 = tf.keras.layers.Conv2D(filters=64,kernel_size=1,padding="same", activation=tf.nn.relu)(conv10)

conv12 = tf.keras.layers.Conv2D(filters=128,kernel_size=3,padding="same", activation=tf.nn.relu)(conv11)

# pool4 = tf.keras.layers.AveragePooling2D(pool_size=2,strides=2,padding='same')(conv12)

conv13 = tf.keras.layers.Conv2D(filters=256,kernel_size=3,padding="same", activation=tf.nn.relu)(conv12)

conv14 = tf.keras.layers.Conv2D(filters=128,kernel_size=1,padding="same", activation=tf.nn.relu)(conv13)

conv15 = tf.keras.layers.Conv2D(filters=356,kernel_size=3,padding="same", activation=tf.nn.relu)(conv14)

conv16 = tf.keras.layers.Conv2D(filters=128,kernel_size=1,padding="same", activation=tf.nn.relu)(conv15)

conv17 = tf.keras.layers.Conv2D(filters=256,kernel_size=3,padding="same", activation=tf.nn.relu)(conv16)

conv18 = tf.keras.layers.Conv2D(filters=250,kernel_size=1,padding="same", activation=tf.nn.relu)(conv16)

pool5=tf.keras.layers.GlobalAveragePooling2D()(conv18)

flatten = tf.keras.layers.Flatten()(pool5)

dense = tf.keras.layers.Dense(units=10)(flatten)

model = tf.keras.Model(inputs = inp, outputs = dense)

model.summary()

converter = nengo_dl.Converter(model)

do_training = True
if do_training:
    with converter.net:
        nengo_dl.configure_settings(simplifications=[])
    with nengo_dl.Simulator(converter.net, minibatch_size=200) as sim:
        # run training
        sim.compile(
            optimizer=tf.optimizers.Adam(0.001),
            loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
            metrics=[tf.metrics.sparse_categorical_accuracy],
        )
        sim.fit(
            {converter.inputs[inp]: train_images},
            {converter.outputs[dense]: train_labels},
            validation_data=(
                {converter.inputs[inp]: test_images},
                {converter.outputs[dense]: test_labels},
            ),
            epochs=2,
        )
       

        # save the parameters to file
        sim.save_params("./keras_to_snn_params")
else:
    # download pretrained weights
    urlretrieve(
        "https://drive.google.com/uc?export=download&"
        "id=1lBkR968AQo__t8sMMeDYGTQpBJZIs2_T",
        "keras_to_snn_params.npz",
    )

def run_network(
    activation,
    params_file="keras_to_snn_params",
    n_steps=120,
    scale_firing_rates=1,
    synapse=None,
    n_test=400,
):
    # convert the keras model to a nengo network
    nengo_converter = nengo_dl.Converter(
        model,
        swap_activations={tf.nn.relu: activation},
        scale_firing_rates=scale_firing_rates,
        synapse=synapse,
    )

    # get input/output objects
    nengo_input = nengo_converter.inputs[inp]
    nengo_output = nengo_converter.outputs[dense]
    with nengo_converter.net:
        nengo_dl.configure_settings(simplifications=[])
    

    # add a probe to the first convolutional layer to record activity.
    # we'll only record from a subset of neurons, to save memory.
    sample_neurons = np.linspace(
        0,
        np.prod(conv0.shape[1:]),
        1000,
        endpoint=False,
        dtype=np.int32,
    )
    with nengo_converter.net:
        conv0_probe = nengo.Probe(nengo_converter.layers[conv0][sample_neurons])
        
   
    # repeat inputs for some number of timesteps
    tiled_test_images = np.tile(test_images[:n_test], (1, n_steps, 1))

    # set some options to speed up simulation
    with nengo_converter.net:
        nengo_dl.configure_settings(stateful=False)

    # build network, load in trained weights, run inference on test images
    with nengo_dl.Simulator(
        nengo_converter.net, minibatch_size=10, progress_bar=False
    ) as nengo_sim:
        params = list(nengo_sim.keras_model.weights)
        print(len(params))
        nengo_sim.load_params(params_file)
        data = nengo_sim.predict({nengo_input: tiled_test_images})

    # compute accuracy on test data, using output of network on
    # last timestep
    predictions = np.argmax(data[nengo_output][:, -1], axis=-1)
    accuracy = (predictions == test_labels[:n_test, 0, 0]).mean()
    print(f"Test accuracy: {100 * accuracy:.2f}%")

    # plot the results
    for ii in range(3):
        plt.figure(figsize=(12, 4))

        plt.subplot(1, 3, 1)
        plt.title("Input image")
        plt.imshow(test_images[ii, 0].reshape((28, 28)), cmap="gray")
        plt.axis("off")

        plt.subplot(1, 3, 2)
        scaled_data = data[conv0_probe][ii] * scale_firing_rates
        if isinstance(activation, nengo.SpikingRectifiedLinear):
            scaled_data *= 0.001
            rates = np.sum(scaled_data, axis=0) / (n_steps * nengo_sim.dt)
            plt.ylabel("Number of spikes")
        else:
            rates = scaled_data
            plt.ylabel("Firing rates (Hz)")
        plt.xlabel("Timestep")
        plt.title(
            f"Neural activities (conv0 mean={rates.mean():.1f} Hz, "
            f"max={rates.max():.1f} Hz)"
        )
        plt.plot(scaled_data)

        plt.subplot(1, 3, 3)
        plt.title("Output predictions")
        plt.plot(tf.nn.softmax(data[nengo_output][ii]))
        plt.legend([str(j) for j in range(10)], loc="upper left")
        plt.xlabel("Timestep")
        plt.ylabel("Probability")

        plt.tight_layout()
    print("Loaded pretrained weights")

for s in [0.1, 0.02, 0.07]:
    print(f"Synapse={s:.3f}")
    run_network(
        activation=nengo.SpikingRectifiedLinear(),
        n_steps=10,
        synapse=s,
    )
    plt.show()

This has been addressed here. :smiley: