# Re-run the code after the reset to train the models and generate the trained animation again.

import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation

# Generate data for training (sine wave points)
x_train = np.linspace(-2 * np.pi, 2 * np.pi, 1000).reshape(-1, 1)
y_train = np.sin(x_train).ravel()

# Set up lists to store models with increasing number of neurons in the hidden layer
models = []

# Train different models with increasing number of neurons
n_neurons_list = np.logspace(0, 3, 20).astype(int)
for n_neurons in n_neurons_list:
    model = MLPRegressor(hidden_layer_sizes=(n_neurons,), max_iter=10000, solver='lbfgs', random_state=0)
    model.fit(x_train, y_train)
    models.append(model)

# Now we will create an animation based on these models

# Animation setup
fig, ax = plt.subplots()
ax.set_xlim(-2 * np.pi, 2 * np.pi)
ax.set_ylim(-1.5, 1.5)
line_true, = ax.plot(x_train, y_train, label='True function (sin(x))', color='blue')
line_approx, = ax.plot([], [], label='Neural Network approximation', color='red')
plt.legend()

# Initialize the plot
def init():
    ax.set_title('Neurons in hidden layer: 0')
    line_approx.set_data([], [])
    return line_approx,

# Update function for the animation
def update(frame):
    n_neurons = frame
    y_approx = models[n_neurons - 1].predict(x_train)
    line_approx.set_data(x_train, y_approx)
    ax.set_title(f'Neurons in hidden layer: {n_neurons_list[n_neurons]}')
    return line_approx,

# Create animation with increasing number of neurons in the hidden layer
ani = FuncAnimation(fig, update, frames=np.array(len(n_neurons_list), dtype=int),
                    init_func=init, repeat=False)

if True:
    ani.save('uat-shallow-network.mp4', writer='ffmpeg', fps=3)

plt.show()