# Model mRNA production

hi,
I have tried to model mRNA production. Shortly, the rate equation of the production is:
d(mRNA)/dt = RNApalpha-mRNAtau, where alpha is the production rate and tau is the degradation rate. RNAp is the input to the neuron.

The problem is that when I change alpha, the amplitude of mRNA is not change, and moreover it makes J decrease, so overall d_mrna remain the same.

This is my code:

import numpy as np
import matplotlib.pyplot as plt
import random
import nengo
from nengo.dists import Uniform

Lactose = []
for i in range(0,100):
Lactose.append(random.randint(0, 1))

CAP = Lactose;
cAMP = 10;
kr = 1;
beta1 = 0.005;
Km = 2;
m = 1;
beta2 = 0.005;
Kd = 10;
n = 1;

complexx = []
for i in range(len(Lactose)):
complexx.append(CAP[i]*((cAMP/Km)**m+beta1)/(((cAMP/Km)**m)+1)); #CAP + cAMP connect to the DNA attract the RNAp

RNAp_list = []
for i in range(len(complexx)):
RNAp_list.append(((complexx[i]/Kd)**n+beta2)/(((complexx[i]/Kd)**n)+1)); #RNAp connect to the DNA in hill fuction

RNAp = np.array(RNAp_list)
RNAp = [[value] for value in RNAp]
print(RNAp)

# Define a custom neuron type for computing mRNA

class MRNANeuron(nengo.neurons.NeuronType):
def step(self, dt, J, output):
# Define parameters
alpha = 1 # Production rate of mRNA
tau = 2 # Degradation time constant

``````    # Compute the change in mRNA concentration
d_mrna = alpha * J - output * tau

# Update the mRNA concentration
output[:] += dt * d_mrna

# Ensure that mRNA concentration cannot go below 0
output[output < 0] = 0
``````

# Parameters

dt = 0.01 # Time step duration

# Define the model

model = nengo.Network()

with model:
# Define the input Node representing RNA polymerase concentration
rnap_node = nengo.Node(lambda t: RNAp[int(t)] if int(t) < len(RNAp) else RNAp[-1])

``````# Create an ensemble to represent mRNA concentration
mrna_ensemble = nengo.Ensemble(1, dimensions=1, neuron_type=MRNANeuron(),
intercepts=nengo.dists.Uniform(0.01, 0.01))
#max_rates=nengo.dists.Uniform(100, 100)) #sets the maximum firing rate of the neuron

# Connect RNAp input to the mrna_ensemble
nengo.Connection(rnap_node, mrna_ensemble, synapse=None)

# Probe the mRNA concentration over time
mrna_probe = nengo.Probe(mrna_ensemble, synapse=None)

# Probe the RNAp concentration over time
RNAp_probe = nengo.Probe(rnap_node, synapse=None)
``````

# Create the simulator

with nengo.Simulator(model, dt=dt) as sim:
# Run the simulation for the length of RNAp
sim.run(len(RNAp))

# Plot the results

import matplotlib.pyplot as plt
plt.plot(sim.trange(), sim.data[mrna_probe])
plt.xlabel(‘Time (s)’)
plt.ylabel(‘mRNA Concentration’)
plt.title(‘mRNA Concentration Over Time with Spiking Behavior’)
plt.show()

plt.plot(sim.trange(), sim.data[RNAp_probe])
plt.xlabel(‘Time (s)’)
plt.ylabel(‘RNAp Concentration’)
plt.title(‘RNAp Concentration Over Time with Spiking Behavior’)
plt.show()

print(np.max(sim.data[mrna_probe]))
print(np.max(sim.data[RNAp_probe]))
#print(sim.data[mrna_probe])
#print((sim.data[RNAp_probe]))

Does anyone have any thoughts on why this is happening?