Skip to content

Commit

Permalink
Reduce tolerance on model inference 'owl' test, pillow output varies …
Browse files Browse the repository at this point in the history
…a lot, was failing locally
  • Loading branch information
rwightman committed Nov 26, 2024
1 parent c5690c0 commit 191755f
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,21 +146,21 @@ def test_model_inference(model_name, batch_size):
rand_output = model(rand_tensors['input'])
rand_features = model.forward_features(rand_tensors['input'])
rand_pre_logits = model.forward_head(rand_features, pre_logits=True)
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4)
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4)
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4), 'rand output does not match'
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4), 'rand features do not match'
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4), 'rand pre_logits do not match'

def _test_owl(owl_input):
def _test_owl(owl_input, tol=(1e-3, 1e-4)):
owl_output = model(owl_input)
owl_features = model.forward_features(owl_input)
owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True)
assert owl_output.softmax(1).argmax(1) == 24 # owl
assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-4)
assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-4)
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
assert torch.allclose(owl_output, owl_tensors['output'], rtol=tol[0], atol=tol[1]), 'owl output does not match'
assert torch.allclose(owl_features, owl_tensors['features'], rtol=tol[0], atol=tol[1]), 'owl output does not match'
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=tol[0], atol=tol[1]), 'owl output does not match'

_test_owl(owl_tensors['input']) # test with original pp owl tensor
_test_owl(pp(test_owl).unsqueeze(0)) # re-process from original jpg
_test_owl(pp(test_owl).unsqueeze(0), tol=(1e-1, 1e-1)) # re-process from original jpg, Pillow output can change a lot btw ver


@pytest.mark.base
Expand Down

0 comments on commit 191755f

Please sign in to comment.