New paste Repaste Download
import numpy as np
from pyswarm import pso  # Install using `pip install pyswarm`
import matplotlib.pyplot as plt
# Define the neural network
def neural_network(weights, biases, X):
    layer_1 = np.dot(X, weights[0]) + biases[0]
    activation_1 = np.tanh(layer_1)  # Example activation function
    output = np.dot(activation_1, weights[1]) + biases[1]
    return output
# Define the loss function
def loss_function(params, X, y):
    num_weights = [(X.shape[1], 4), (4, 1)]  # Example architecture: input-4-1
    weights = [
        params[:np.prod(num_weights[0])].reshape(num_weights[0]),
        params[np.prod(num_weights[0]):np.prod(num_weights[0]) + np.prod(num_weights[1])].reshape(num_weights[1]),
    ]
    biases = [params[-5:-4], params[-4:]]  # Adjust indexing based on architecture
    predictions = neural_network(weights, biases, X)
    return np.mean((predictions - y) ** 2)
# Generate some example data
np.random.seed(42)
X = np.random.rand(100, 3)
y = np.random.rand(100, 1)
# PSO optimization
losses = []  # To store losses over iterations
def objective_function(params):
    loss = loss_function(params, X, y)
    losses.append(loss)  # Log the loss during each iteration
    return loss
# Define bounds for weights and biases
num_weights = [(3, 4), (4, 1)]  # Example architecture: 3 inputs -> 4 hidden -> 1 output
total_params = sum(np.prod(w) for w in num_weights) + 5  # + biases
lb = -1 * np.ones(total_params)  # Lower bounds
ub = 1 * np.ones(total_params)   # Upper bounds
# Run PSO
best_params, _ = pso(objective_function, lb, ub, swarmsize=50, maxiter=100)
# Brute-force search around PSO result
def brute_force_search(best_params, step_size=0.01, radius=0.1):
    param_range = np.arange(-radius, radius + step_size, step_size)
    brute_losses = []  # To store losses in brute-force refinement
    best_loss = float('inf')
    best_solution = None
    for delta in param_range:
        candidate_params = best_params + delta
        current_loss = loss_function(candidate_params, X, y)
        brute_losses.append(current_loss)  # Log loss
        if current_loss < best_loss:
            best_loss = current_loss
            best_solution = candidate_params
    return best_solution, best_loss, brute_losses
# Perform brute-force refinement
refined_params, refined_loss, brute_losses = brute_force_search(best_params)
print("Refined loss:", refined_loss)
# Plot accuracy over iterations
plt.figure(figsize=(10, 6))
plt.plot(range(len(losses)), 1 - np.array(losses), label="PSO Accuracy", marker='o')
plt.plot(
    range(len(losses), len(losses) + len(brute_losses)),
    1 - np.array(brute_losses),
    label="Brute-Force Accuracy",
    marker='x',
)
plt.axhline(1 - refined_loss, color="r", linestyle="--", label="Final Accuracy")
plt.xlabel("Iteration")
plt.ylabel("Accuracy")
plt.title("Accuracy Over Iterations")
plt.legend()
plt.grid(True)
plt.show()
Filename: None. Size: 3kb. View raw, , hex, or download this file.

This paste expires on 2024-11-27 23:58:37.284676. Pasted through web.