Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Orif Milod committed Mar 26, 2024
1 parent eb7e239 commit 2365645
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 12 deletions.
1 change: 0 additions & 1 deletion gigatorch/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ def __call__(self, x):

def calc_loss(self, ys, y_pred):
# Convertin y_pred to probabilities
prob = [self.prob_fn(i, y_pred) for i in y_pred]
loss = sum(self.loss_fn(ys, y_pred), Tensor(0))
loss.backprop()
return loss.data
Expand Down
4 changes: 2 additions & 2 deletions gigatorch/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
class Tensor:
"""stores a single scalar value and its gradient"""

def __init__(self, data, _parents=[], _op=""):
self.data = data.data if isinstance(data, Tensor) else np.array(data)
def __init__(self, input, _parents=[], _op=""):
self.data = input.data if isinstance(input, Tensor) else np.array(input)
self.grad = 0.0
self._backprop = lambda: None
self._parents = _parents
Expand Down
18 changes: 9 additions & 9 deletions tests/nn_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def test_mlp_forward_pass():
model[2].weight = nn.Parameter(torch.Tensor(neuron_weigths[1]))
model[2].bias = nn.Parameter(torch.Tensor(neuron_biases[1]))

expected = model(Tensor(x))
expected = model(torch.Tensor(x))

tol = 1e-6
for i in range(len(x)):
Expand Down Expand Up @@ -121,7 +121,7 @@ def test_mlp_backward_pass():
neuron_weigths = [
[[-1, 4, -3], [1, 0.5, -1]], # 1st layer # -> tanh(-0.25) # -> tanh(0.75)
[ # 2nd layer
[0, 1], # (tanh(-0.25) * 0) + (tanh(-0.75) * 1) -> tanh(tanh(-0.75))
[0, 1], # (tanh(-0.25) * 0) + (tanh(-0.75) * 1) -> tanh(tanh(-0.75))
[-1, 1], # (tanh(-0.25) * -1) + (tanh(-0.75) * 1) -> tanh(0.39023028998)
[-1, 0], #
],
Expand All @@ -140,8 +140,8 @@ def test_mlp_backward_pass():
x = [2, -1.5, -2.5]
x_target = [1]

my_output = mlp([Tensor(a) for a in x])[0]
my_loss = mlp.calc_loss([Tensor(a) for a in x_target], [my_output])
result_output = mlp([Tensor(a) for a in x])[0]
result_loss = mlp.calc_loss([Tensor(a) for a in x_target], [result_output])

# PyTorch
model = nn.Sequential(
Expand All @@ -163,15 +163,15 @@ def test_mlp_backward_pass():
model[4].weight = nn.Parameter(torch.Tensor(neuron_weigths[2]))
model[4].bias = nn.Parameter(torch.Tensor(neuron_biases[2]))

py_output = model(Tensor(x))
expected_output = model(torch.Tensor(x))
mse_loss = nn.MSELoss()

py_loss = mse_loss(py_output, Tensor(x_target))
py_loss.backward()
expected_loss = mse_loss(expected_output, torch.Tensor(x_target))
expected_loss.backward()

tol = 1e-6
assert abs(py_loss.item() - my_loss) < tol
assert abs(py_output.item() - my_output.data) < tol
assert abs(expected_loss.item() - result_loss) < tol
assert abs(expected_output.item() - result_output.data) < tol

for layer_index in range(len(neurons_per_layer)):
py_layer = model[layer_index * 2].weight.grad
Expand Down

0 comments on commit 2365645

Please sign in to comment.