[possible bug] Nengo_dl does not optimise encoders

Hey, I am trying to train some networks with nengo_dl and noticed something odd. Even though the encoders should be optimized according to the docs, they do not change during training.
According to the docs, Encoders should be optimised: Optimizing a NengoDL model — NengoDL documentation
When running the following code, you can see that the biases do change, but the encoders do not. I tried changing around the trainable setting but couldn’t get them to change. Furthermore, encoders also don’t seem to count towards simulator.keras_model.trainable_weights.
Code to reproduce:

# Adapted from https://www.nengo.ai/nengo-dl/examples/tensorflow-models.html
# Some setup
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import nengo
import nengo_dl
seed = 0
np.random.seed(seed)
tf.random.set_seed(seed)
(train_images, train_labels), (
    test_images,
    test_labels,
) = tf.keras.datasets.fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images = train_images.reshape((train_images.shape[0], -1))
test_images = test_images.reshape((test_images.shape[0], -1))
class_names = [
    "T-shirt/top",
    "Trouser",
    "Pullover",
    "Dress",
    "Coat",
    "Sandal",
    "Shirt",
    "Sneaker",
    "Bag",
    "Ankle boot",
]
num_classes = len(class_names)
train_images = train_images[:, None, :]
train_labels = train_labels[:, None, None]
test_images = test_images[:, None, :]
test_labels = test_labels[:, None, None]


# MODEL: HERE IT GETS INTERESTING
with nengo.Network(seed=seed) as net:
    net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
    net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
    net.config[nengo.Connection].synapse = None
    net.config[nengo.Connection].transform = nengo_dl.dists.Glorot()
    nengo_dl.configure_settings(trainable=True) # <<<< NOTE THIS
    nengo_dl.configure_settings(lif_smoothing=0.1) # neccessary to train biases

    inp = nengo.Node(output=np.ones(28 * 28))

    # ensemble to train
    hidden = nengo.Ensemble(5, 1, neuron_type=nengo.LIF())
    net.config[hidden].trainable = True # <<<< NOTE THIS
    nengo.Connection(inp, hidden.neurons)


    out = nengo.Node(size_in=num_classes)
    nengo.Connection(hidden.neurons, out)

    # add a probe to collect output
    out_p = nengo.Probe(out)

with net:
    nengo_dl.configure_settings(stateful=True, use_loop=False)

with nengo_dl.Simulator(net, minibatch_size=20) as sim:
    sim.compile(
        optimizer=tf.optimizers.Adam(),
        loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=["accuracy"],
    )
    for i in range(5):
        sim.fit(train_images, train_labels, epochs=1)
        print(sim.data[net.all_ensembles[0]].bias)
        print(sim.data[net.all_ensembles[0]].encoders)

    print(
        "Test accuracy:",
        sim.evaluate(test_images, test_labels, verbose=0)["probe_accuracy"],
    )


Am I doing something wrong / missing something? Thanks for the help :smiley: