implement & test linreg

This commit is contained in:
2026-02-01 12:54:47 +01:00
parent e41289fcb2
commit 6beb7a6311
2 changed files with 431 additions and 0 deletions

242
utils/linreg.odin Normal file
View File

@@ -0,0 +1,242 @@
package utils
import "core:container/bit_array"
import "core:math"
import "core:math/rand"
// Solves Ax = b using Gaussian elimination with partial pivoting
solve_linear_system :: proc(A: [][]f64, b: []f64) -> []f64 {
n := len(A)
// Create augmented matrix [A|b]
aug := make([][]f64, n)
for i in 0 ..< n {
aug[i] = make([]f64, n + 1)
copy(aug[i][:n], A[i])
aug[i][n] = b[i]
}
defer {
for row in aug {delete(row)}
delete(aug)
}
// Forward elimination with partial pivoting
for col in 0 ..< n {
// Find pivot (largest absolute value in column)
max_row := col
for row in col + 1 ..< n {
if math.abs(aug[row][col]) > math.abs(aug[max_row][col]) {
max_row = row
}
}
// Swap rows
if max_row != col {
aug[col], aug[max_row] = aug[max_row], aug[col]
}
// Check for singular matrix
if math.abs(aug[col][col]) < 1e-10 {
// Matrix is singular, return zero vector
x := make([]f64, n)
return x
}
// Eliminate column entries below pivot
for row in col + 1 ..< n {
factor := aug[row][col] / aug[col][col]
for j in col ..< n + 1 {
aug[row][j] -= factor * aug[col][j]
}
}
}
// Back substitution
x := make([]f64, n)
for i in 0 ..< n {
row := n - 1 - i // Process from bottom to top
x[row] = aug[row][n]
for j in row + 1 ..< n {
x[row] -= aug[row][j] * x[j]
}
x[row] /= aug[row][row]
}
return x
}
// Linear regression using normal equation: β = (X^T X)^-1 X^T y
train_linear_regression :: proc(X: [][]f64, y: []f64) -> []f64 {
n := len(X)
m := len(X[0])
// Compute X^T X
XtX := make([][]f64, m)
for i in 0 ..< m {
XtX[i] = make([]f64, m)
for j in 0 ..< m {
sum := 0.0
for k in 0 ..< n {
sum += X[k][i] * X[k][j]
}
XtX[i][j] = sum
}
}
defer {
for row in XtX {delete(row)}
delete(XtX)
}
// Compute X^T y
Xty := make([]f64, m)
defer delete(Xty)
for i in 0 ..< m {
sum := 0.0
for k in 0 ..< n {
sum += X[k][i] * y[k]
}
Xty[i] = sum
}
// Solve (X^T X) β = X^T y using Gaussian elimination
beta := solve_linear_system(XtX, Xty)
return beta
}
predict :: proc(X: [][]f64, beta: []f64) -> []f64 {
predictions := make([]f64, len(X))
for i in 0 ..< len(X) {
sum := 0.0
for j in 0 ..< len(beta) {
sum += X[i][j] * beta[j]
}
predictions[i] = sum
}
return predictions
}
rmse :: proc(predictions: []f64, actual: []f64) -> f64 {
sum := 0.0
for i in 0 ..< len(predictions) {
diff := predictions[i] - actual[i]
sum += diff * diff
}
return math.sqrt(sum / f64(len(predictions)))
}
train_test_split :: proc(
X: [][]f64,
y: []f64,
test_size: f64 = 0.2,
random_seed: u64 = 0,
) -> (
X_train, X_test: [][]f64,
y_train, y_test: []f64,
) {
n := len(X)
test_count := int(f64(n) * test_size)
train_count := n - test_count
// Create shuffled indices
indices := make([]int, n)
defer delete(indices)
for i in 0 ..< n {
indices[i] = i
}
// Shuffle using seed
rng := rand.create(random_seed)
rand.shuffle(indices[:], rand.default_random_generator(&rng))
// Allocate splits
X_train = make([][]f64, train_count)
X_test = make([][]f64, test_count)
y_train = make([]f64, train_count)
y_test = make([]f64, test_count)
// Copy training data
for i in 0 ..< train_count {
idx := indices[i]
X_train[i] = X[idx]
y_train[i] = y[idx]
}
// Copy test data
for i in 0 ..< test_count {
idx := indices[train_count + i]
X_test[i] = X[idx]
y_test[i] = y[idx]
}
return
}
// Extract columns based on bit_array chromosome
get_columns :: proc(X: [][]f64, chrom: ^bit_array.Bit_Array) -> [][]f64 {
n_rows := len(X)
n_cols := bit_array.len(chrom)
// Count selected features
selected_count := 0
for i in 0 ..< n_cols {
if bit_array.get(chrom, i) {
selected_count += 1
}
}
if selected_count == 0 {
return nil
}
// Create subset with only selected columns
subset := make([][]f64, n_rows)
for i in 0 ..< n_rows {
subset[i] = make([]f64, selected_count)
col_idx := 0
for j in 0 ..< n_cols {
if bit_array.get(chrom, j) {
subset[i][col_idx] = X[i][j]
col_idx += 1
}
}
}
return subset
}
// Fitness function for feature selection
get_fitness :: proc(
X: [][]f64,
y: []f64,
chrom: ^bit_array.Bit_Array,
random_seed: u64 = 0,
) -> f64 {
X_selected := get_columns(X, chrom)
if X_selected == nil {
return math.F64_MAX
}
defer {
for row in X_selected {delete(row)}
delete(X_selected)
}
// Split data
X_train, X_test, y_train, y_test := train_test_split(X_selected, y, 0.2, random_seed)
defer {
delete(X_train)
delete(X_test)
delete(y_train)
delete(y_test)
}
// Train model
beta := train_linear_regression(X_train, y_train)
defer delete(beta)
// Predict on test set
predictions := predict(X_test, beta)
defer delete(predictions)
// Return RMSE
return rmse(predictions, y_test)
}

189
utils/linreg_test.odin Normal file
View File

@@ -0,0 +1,189 @@
package utils
import "core:fmt"
import "core:math"
import "core:testing"
@(test)
test_solve_simple_2x2 :: proc(t: ^testing.T) {
// Properly allocate 2D array
A := [][]f64{{2.0, 1.0}, {1.0, 3.0}}
b := []f64{5.0, 6.0}
x := solve_linear_system(A, b)
defer delete(x)
testing.expect(t, math.abs(x[0] - 1.8) < 1e-10, "x[0] should be 1.8")
testing.expect(t, math.abs(x[1] - 1.4) < 1e-10, "x[1] should be 1.4")
}
@(test)
test_solve_identity :: proc(t: ^testing.T) {
A := [][]f64{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}
b := []f64{3.0, 7.0, -2.0}
x := solve_linear_system(A, b)
defer delete(x)
for i in 0 ..< 3 {
testing.expect(t, math.abs(x[i] - b[i]) < 1e-10)
}
}
@(test)
test_solve_needs_pivoting :: proc(t: ^testing.T) {
// System that requires pivoting for numerical stability
A := [][]f64{{0.0001, 1.0}, {1.0, 1.0}}
b := []f64{1.0, 2.0}
x := solve_linear_system(A, b)
defer delete(x)
// Verify Ax = b
result := make([]f64, 2)
defer delete(result)
for i in 0 ..< 2 {
sum := 0.0
for j in 0 ..< 2 {
sum += A[i][j] * x[j]
}
result[i] = sum
}
testing.expect(t, math.abs(result[0] - b[0]) < 1e-6)
testing.expect(t, math.abs(result[1] - b[1]) < 1e-6)
}
@(test)
test_solve_singular_matrix :: proc(t: ^testing.T) {
// Singular matrix (rows are linearly dependent)
A := [][]f64{{1.0, 2.0}, {2.0, 4.0}}
b := []f64{3.0, 6.0}
x := solve_linear_system(A, b)
defer delete(x)
// Should return zero vector for singular matrix
testing.expect_value(t, len(x), 2)
testing.expect(t, math.abs(x[0]) < 1e-10)
testing.expect(t, math.abs(x[1]) < 1e-10)
}
@(test)
test_solve_larger_system :: proc(t: ^testing.T) {
A := [][]f64 {
{4.0, 1.0, 2.0, 1.0},
{1.0, 5.0, 1.0, 2.0},
{2.0, 1.0, 6.0, 1.0},
{1.0, 2.0, 1.0, 7.0},
}
b := []f64{10.0, 12.0, 14.0, 16.0}
x := solve_linear_system(A, b)
defer delete(x)
// Verify Ax ≈ b
for i in 0 ..< 4 {
sum := 0.0
for j in 0 ..< 4 {
sum += A[i][j] * x[j]
}
testing.expect(t, math.abs(sum - b[i]) < 1e-8)
}
}
@(test)
test_train_linear_regression :: proc(t: ^testing.T) {
X := [][]f64{{1.0}, {2.0}, {3.0}, {4.0}, {5.0}}
y := []f64{5.0, 7.0, 9.0, 11.0, 13.0}
beta := train_linear_regression(X, y)
defer delete(beta)
fmt.printfln("beta = %v", beta)
// For y = 2x + 3 with no intercept term:
// Best fit through origin: minimize Σ(y - βx)²
// β = Σ(xy) / Σ(x²) = (1*5 + 2*7 + 3*9 + 4*11 + 5*13) / (1 + 4 + 9 + 16 + 25)
// = (5 + 14 + 27 + 44 + 65) / 55 = 155 / 55 ≈ 2.818
testing.expect(t, math.abs(beta[0] - 2.818) < 0.01, "slope should be ~2.818")
}
@(test)
test_train_with_intercept :: proc(t: ^testing.T) {
// Dataset with intercept column: y = 2x + 3
X := [][]f64 {
{1.0, 1.0}, // [intercept, x]
{1.0, 2.0},
{1.0, 3.0},
{1.0, 4.0},
{1.0, 5.0},
}
y := []f64{5.0, 7.0, 9.0, 11.0, 13.0}
beta := train_linear_regression(X, y)
defer delete(beta)
testing.expect(t, math.abs(beta[0] - 3.0) < 1e-6, "intercept should be 3.0")
testing.expect(t, math.abs(beta[1] - 2.0) < 1e-6, "slope should be 2.0")
}
@(test)
test_predict :: proc(t: ^testing.T) {
X := [][]f64{{1.0, 1.0}, {1.0, 2.0}, {1.0, 3.0}}
beta := []f64{3.0, 2.0} // y = 2x + 3
predictions := predict(X, beta)
defer delete(predictions)
expected := []f64{5.0, 7.0, 9.0}
for i in 0 ..< len(predictions) {
testing.expect(t, math.abs(predictions[i] - expected[i]) < 1e-10)
}
}
@(test)
test_rmse :: proc(t: ^testing.T) {
predictions := []f64{5.0, 7.0, 9.0}
actual := []f64{5.1, 6.9, 9.2}
error := rmse(predictions, actual)
// RMSE = sqrt((0.1² + 0.1² + 0.2²) / 3) = sqrt(0.06/3) ≈ 0.141
testing.expect(t, math.abs(error - 0.141) < 0.01, "RMSE should be ~0.141")
}
@(test)
test_rmse_perfect_fit :: proc(t: ^testing.T) {
predictions := []f64{1.0, 2.0, 3.0}
actual := []f64{1.0, 2.0, 3.0}
error := rmse(predictions, actual)
testing.expect(t, error < 1e-10, "RMSE should be 0 for perfect fit")
}
@(test)
test_full_pipeline :: proc(t: ^testing.T) {
// Train on y = 3x + 2
X_train := [][]f64{{1.0, 1.0}, {1.0, 2.0}, {1.0, 3.0}, {1.0, 4.0}}
y_train := []f64{5.0, 8.0, 11.0, 14.0}
// Test data
X_test := [][]f64{{1.0, 5.0}, {1.0, 6.0}}
y_test := []f64{17.0, 20.0}
// Train
beta := train_linear_regression(X_train, y_train)
defer delete(beta)
// Predict
predictions := predict(X_test, beta)
defer delete(predictions)
// Evaluate
error := rmse(predictions, y_test)
testing.expect(t, error < 1e-6, "Should have near-zero error on linear data")
}