package tests import ( "math" "testing" "makarna/pkg/backend/cpu" "makarna/pkg/backend/cpu/matmul" "makarna/pkg/backend/cpu/nn" "makarna/pkg/tensor" ) // Helper for approximate float comparison func assertNearlyEqual(t *testing.T, expected, actual []float32, epsilon float32, msg string) { if len(expected) != len(actual) { t.Fatalf("%s: length mismatch: expected %d, got %d", msg, len(expected), len(actual)) } for i := range expected { diff := float32(math.Abs(float64(expected[i] - actual[i]))) if diff > epsilon { t.Errorf("%s: element %d mismatch: expected %f, got %f (diff %f > %f)", msg, i, expected[i], actual[i], diff, epsilon) return // Fail fast } } } func TestLinear_F32(t *testing.T) { // A: [2, 3] // W: [2, 3] (2 output features, 3 input features) // C: [2, 2] aData := []float32{ 1, 2, 3, 4, 5, 6, } wData := []float32{ 0.1, 0.2, 0.3, // Neuron 0 -0.1, -0.2, -0.3, // Neuron 1 } a := cpu.NewTensor(tensor.Shape{2, 3}, aData) w := cpu.NewTensor(tensor.Shape{2, 3}, wData) c := cpu.NewTensor(tensor.Shape{2, 2}, nil) if err := matmul.Linear(a, w, c); err != nil { t.Fatalf("Linear failed: %v", err) } // Expected: // Row 0: // N0: 1*0.1 + 2*0.2 + 3*0.3 = 0.1+0.4+0.9 = 1.4 // N1: 1*-0.1 + 2*-0.2 + 3*-0.3 = -0.1-0.4-0.9 = -1.4 // Row 1: // N0: 4*0.1 + 5*0.2 + 6*0.3 = 0.4+1.0+1.8 = 3.2 // N1: 4*-0.1 + 5*-0.2 + 6*-0.3 = -0.4-1.0-1.8 = -3.2 expected := []float32{1.4, -1.4, 3.2, -3.2} assertNearlyEqual(t, expected, c.DataFloat32(), 1e-5, "Linear F32") } func TestEmbedding(t *testing.T) { // Vocab: 4, Dim: 3 weights := []float32{ 0, 0, 0, // ID 0 1, 1, 1, // ID 1 2, 2, 2, // ID 2 3, 3, 3, // ID 3 } w := cpu.NewTensor(tensor.Shape{4, 3}, weights) // Sequence: [1, 3] ids := []int{1, 3} out := cpu.NewTensor(tensor.Shape{2, 3}, nil) if err := nn.Embedding(ids, w, out); err != nil { t.Fatalf("Embedding failed: %v", err) } expected := []float32{ 1, 1, 1, 3, 3, 3, } assertNearlyEqual(t, expected, out.DataFloat32(), 1e-5, "Embedding") } func TestRMSNorm(t *testing.T) { // x: [1, 4] = [1, 2, 3, 4] // mean square = (1+4+9+16)/4 = 30/4 = 7.5 // rms = sqrt(7.5 + eps) ~= 2.7386 // weight: [1, 1, 1, 1] xData := []float32{1, 2, 3, 4} x := cpu.NewTensor(tensor.Shape{1, 4}, xData) w := cpu.NewTensor(tensor.Shape{4}, []float32{1, 1, 1, 1}) err := nn.RMSNorm(x, w, 1e-5) if err != nil { t.Fatalf("RMSNorm failed: %v", err) } // Manual calc with Python: x / sqrt(mean(x**2) + eps) // 1/2.7386 = 0.3651 // 2/2.7386 = 0.7303 // ... expected := []float32{ 0.365148, 0.730297, 1.095445, 1.460593, } assertNearlyEqual(t, expected, x.DataFloat32(), 1e-4, "RMSNorm") } func TestSoftmax(t *testing.T) { // Logits: [0, 1, 2] // exp: [1, 2.718, 7.389] // sum: 11.107 // prob: [0.090, 0.244, 0.665] data := []float32{0, 1, 2} x := cpu.NewTensor(tensor.Shape{3}, data) nn.Softmax(x) expected := []float32{0.09003057, 0.24472847, 0.66524096} assertNearlyEqual(t, expected, x.DataFloat32(), 1e-5, "Softmax") }