How to print out the connection weights when a tensornode is used?

Hello,

I am starting with the use of tensornodes for adding tensorflow layers to nengo nets. For this, I have modified the MNIST example to include a customized layer with a callable class:

import gzip
import pickle
from urllib.request import urlretrieve
import zipfile

import nengo
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

import nengo_dl


class ConvLayer:
    def __init__(self, n_channels, kernel_size, shape_in):
        self.n_channels = n_channels  # number of convolutional filters
        self.kernel_size = kernel_size  # convolutional filter size
        self.shape_in = shape_in

    def pre_build(self, shape_in, shape_out):
        self.n_mini = shape_in[0] # minibatch size
        self.size_out = shape_out[1] # output dimensionality

    def __call__(self, t, x):
        # reshape input signal to image shape
        image = tf.reshape(x, (self.n_mini, self.shape_in[0], self.shape_in[1], self.shape_in[2]))

        # apply convolutional layer
        conv = tf.layers.conv2d(image, filters=self.n_channels, kernel_size=self.kernel_size)

        return tf.reshape(conv, (self.n_mini, -1))


urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz",
            "mnist.pkl.gz")
with gzip.open("mnist.pkl.gz") as f:
    train_data, _, test_data = pickle.load(f, encoding="latin1")
train_data = list(train_data)
test_data = list(test_data)
for data in (train_data, test_data):
    one_hot = np.zeros((data[0].shape[0], 10))
    one_hot[np.arange(data[0].shape[0]), data[1]] = 1
    data[1] = one_hot

for i in range(3):
    plt.figure()
    plt.imshow(np.reshape(train_data[0][i], (28, 28)),
               cmap="gray")
    plt.axis('off')
    plt.title(str(np.argmax(train_data[1][i])));


with nengo.Network() as net:
    # set some default parameters for the neurons that will make
    # the training progress more smoothly
    net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
    net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
    neuron_type = nengo.LIF(amplitude=0.01)

    # we'll make all the nengo objects in the network
    # non-trainable. we could train them if we wanted, but they don't
    # add any representational power. note that this doesn't affect
    # the internal components of tensornodes, which will always be
    # trainable or non-trainable depending on the code written in
    # the tensornode.
    nengo_dl.configure_settings(trainable=False)

    # the input node that will be used to feed in input images
    inp = nengo.Node([0] * 28 * 28)
    out_p_inp = nengo.Probe(inp)

    # add the first convolutional layer
    x = nengo_dl.TensorNode(
        ConvLayer(32,3,(28,28,1)),
        size_in=28*28*1,size_out=26*26*32)
    conn = nengo.Connection(inp, x, synapse=None)
    out_p_c1 = nengo.Probe(x)

    # apply the neural nonlinearity
    x = nengo_dl.tensor_layer(x, neuron_type)
    out_p_nlc1 = nengo.Probe(x)

    # add another convolutional layer
    x = nengo_dl.tensor_layer(
        x, tf.layers.conv2d, shape_in=(26, 26, 32),
        filters=64, kernel_size=3)
    x = nengo_dl.tensor_layer(x, neuron_type)

    # add a pooling layer
    x = nengo_dl.tensor_layer(
        x, tf.layers.average_pooling2d, shape_in=(24, 24, 64),
        pool_size=2, strides=2)

    # another convolutional layer
    x = nengo_dl.tensor_layer(
        x, tf.layers.conv2d, shape_in=(12, 12, 64),
        filters=128, kernel_size=3)
    x = nengo_dl.tensor_layer(x, neuron_type)
    #out_p_nlc3 = nengo.Probe(x)

    # another pooling layer
    x = nengo_dl.tensor_layer(
        x, tf.layers.average_pooling2d, shape_in=(10, 10, 128),
        pool_size=2, strides=2)

    # linear readout
    x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)

    # we'll create two different output probes, one with a filter
    # (for when we're simulating the network over time and
    # accumulating spikes), and one without (for when we're
    # training the network using a rate-based approximation)
    out_p = nengo.Probe(x)
    out_p_filt = nengo.Probe(x, synapse=0.1)

minibatch_size = 200
sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)

# add the single timestep to the training data
train_data = {inp: train_data[0][:, None, :],
              out_p: train_data[1][:, None, :]}

# when testing our network with spiking neurons we will need to run it
# over time, so we repeat the input/target data for a number of
# timesteps. we're also going to reduce the number of test images, just
# to speed up this example.
n_steps = 30
test_data = {
    inp: np.tile(test_data[0][:minibatch_size*2, None, :],
                 (1, n_steps, 1)),
    out_p_filt: np.tile(test_data[1][:minibatch_size*2, None, :],
                        (1, n_steps, 1))}


def objective(outputs, targets):
    return tf.nn.softmax_cross_entropy_with_logits_v2(
        logits=outputs, labels=targets)

opt = tf.train.RMSPropOptimizer(learning_rate=0.001)


def classification_error(outputs, targets):
    return 100 * tf.reduce_mean(
        tf.cast(tf.not_equal(tf.argmax(outputs[:, -1], axis=-1),
                             tf.argmax(targets[:, -1], axis=-1)),
                tf.float32))

print("error before training: %.2f%%" % sim.loss(
    test_data, {out_p_filt: classification_error}))
do_training = True
if do_training:
    # run training
    sim.train(train_data, opt, objective={out_p: objective}, n_epochs=10)

    # save the parameters to file
    sim.save_params("./mnist_params")
else:
    # download pretrained weights
    urlretrieve(
        "https://drive.google.com/uc?export=download&"
        "id=1u9JyNuRxQDUcFgkRnI1qfJVFMdnGRsjI",
        "mnist_params.zip")
    with zipfile.ZipFile("mnist_params.zip") as f:
        f.extractall()

    # load parameters
    sim.load_params("./mnist_params")

print("error after training: %.2f%%" % sim.loss(
    test_data, {out_p_filt: classification_error}))


sim.run_steps(n_steps, data={inp: test_data[inp][:minibatch_size]})

print(sim.data[conn].weights)

In order to see the trained connection weights between the inputs and the convolutional layer, I want to print out them. However, using print(sim.data[conn].weights) (last code line) only shows one value fixed to 1.0 instead of a big matrix. How could I have access to the connection weights correctly?

Using tf.layers makes it kind of hard to get the weights, because the weight creation is hidden inside the tf.layers function, and the weights aren’t directly accessible from the conv object that gets returned (this is one of the main reasons that TensorFlow moved away from that approach in TensorFlow 2.0). You have to look up the weights by name, like

conv = tf.layers.conv2d(
    image, filters=self.n_channels, kernel_size=self.kernel_size)
self.weights = tf.get_default_graph().get_tensor_by_name(
    conv.name.split("/")[-2] + "/kernel:0"

Note that this also has the problem that your weights are being created inside the __call__ function (rather than in the pre_build stage), which will cause problems if you’re doing other things, like using unroll_simulation != 1. In general, I’d recommend creating the weights yourself, and then applying those weights directly using a function like tf.nn.convolution. Something like

def pre_build(...):
    self.weights = tf.Variable(...)

def __call__(...):
    image = tf.reshape(x, (self.n_mini, self.shape_in[0], self.shape_in[1], self.shape_in[2]))
    conv = tf.nn.convolution(image, filter=self.weights, ...)
    ...

Either way, once you have the weights stored in the TensorNode class (self.weights), you can print them out via

with nengo_dl.Simulator(...):
    ...
    print(sim.sess.run(my_tensor_node.tensor_func.weights))
1 Like

Thanks, Daniel. Now everything worked for me.

So, only to increase my comprehension about nengo, if I use a Tensornode as in the next code:

    # the input node that will be used to feed in input images
    inp = nengo.Node([0] * 28 * 28)
    out_p_inp = nengo.Probe(inp)

    # add the first convolutional layer
    x = nengo_dl.TensorNode(
        ConvLayer(32,3,(28,28,1)),
        size_in=28*28*1,size_out=26*26*32)
    conn = nengo.Connection(inp, x, synapse=None)

Then the conn object creates a connection with weights equal to 1 between inp and x instead of a tensor of trained weights, which are hidden in the class object. Is my understanding correct?

Yes, that’s exactly right. You basically have two different sets of weights, one in the ConvLayer class and one in the Connection class. That’s why you see in the examples that we often set the Connection weights to be non-trainable when we’re using TensorNodes (since we’re already creating the parameters we want inside the TensorNode).