From b16d446827b110b15aa09f3edb4fb51ce90c9a95 Mon Sep 17 00:00:00 2001 From: Colman Glagovich Date: Fri, 6 Dec 2024 11:54:12 -0800 Subject: [PATCH] #0: Fix CI: Don't cache weights for quick, AllPostCommit Llama test --- models/demos/llama3/tt/llama_embedding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/demos/llama3/tt/llama_embedding.py b/models/demos/llama3/tt/llama_embedding.py index a89ac40db9a..672f0b980f1 100644 --- a/models/demos/llama3/tt/llama_embedding.py +++ b/models/demos/llama3/tt/llama_embedding.py @@ -23,7 +23,7 @@ def __init__( base_name = args.get_state_dict_prefix("", None) + "tok_embeddings.weight" torch_weight = self.state_dict[base_name].unsqueeze(0).unsqueeze(0) - cache_name = weight_cache_path / base_name + cache_name = None if args.dummy_weights else weight_cache_path / base_name self.weights = ttnn.as_tensor( torch_weight, dtype=dtype,