Hi, @tbekolay.

I have one more question. I have tried to make Multivariate CNN Models with nengo where there are 7 historical data points (input: 3 by 2, output: real number). As you recommended, I set objective function with mse. I set n_epoches as 1000 but the test result is very different from what I expected (y_test = 206.0161). Could you guide me where I missed with the below code written based on your tutorial?

=========================================================

```
import nengo
import tensorflow as tf
import numpy as np
import nengo_dl
import matplotlib.pyplot as plt
n_steps = 3
x_train = np.array([[[10, 15], [20, 25], [30, 35]],
[[20, 25], [30, 35], [40, 45]],
[[30, 35], [40, 45], [50, 55]],
[[40, 45], [50, 55], [60, 65]],
[[50, 55], [60, 65], [70, 75]],
[[60, 65], [70, 75], [80, 85]],
[[70, 75], [80, 85], [90, 95]]])
print(x_train.shape)
n_features = x_train.shape[2]
y_train = np.array([65, 85, 105, 125, 145, 165, 185])
x_test = np.array([[[80, 85], [90, 95], [100, 105]]])
y_test = np.array([206.0161])
with nengo.Network() as net:
net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
neuron_type = nengo.LIF(amplitude=0.01)
nengo_dl.configure_settings(trainable=False)
# the input node that will be used to feed in input images
inp = nengo.Node([0] * n_steps * n_features)
x = nengo_dl.tensor_layer(inp, tf.layers.conv1d, shape_in=(n_steps, n_features), filters=5, kernel_size=3)
x = nengo_dl.tensor_layer(x, neuron_type)
x = nengo_dl.tensor_layer(x, tf.layers.dense, units=1)
out_p = nengo.Probe(x)
out_p_filt = nengo.Probe(x, synapse=0.1)
minibatch_size = 1
sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)
# add the single timestep to the training data
train_data = {inp: np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1] * x_train.shape[2])),
out_p: np.reshape(y_train, (y_train.shape[0], 1, 1))}
n_steps = 50
test_data = {inp: np.tile(np.reshape(x_test, (x_test.shape[0], 1, x_test.shape[1] * x_test.shape[2])), (1, n_steps, 1)),
out_p_filt: np.tile(np.reshape(y_test, (y_test.shape[0], 1, 1)), (1, n_steps, 1))}
def objective(outputs, targets):
return tf.losses.mean_squared_error(predictions=outputs, labels=targets)
opt = tf.train.RMSPropOptimizer(learning_rate=0.001)
sim.train(train_data, opt, objective={out_p: objective}, n_epochs=1000)
sim.run_steps(n_steps, data={inp: test_data[inp][:minibatch_size]})
print(sim.data[out_p][0][-1])
print(sim.data[out_p_filt][0][-1])
sim.close()
```