Skip to content

Commit

Permalink
#0: Fix CI: Don't cache weights for quick, AllPostCommit Llama test
Browse files Browse the repository at this point in the history
  • Loading branch information
cglagovichTT committed Dec 6, 2024
1 parent b756309 commit b16d446
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion models/demos/llama3/tt/llama_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(

base_name = args.get_state_dict_prefix("", None) + "tok_embeddings.weight"
torch_weight = self.state_dict[base_name].unsqueeze(0).unsqueeze(0)
cache_name = weight_cache_path / base_name
cache_name = None if args.dummy_weights else weight_cache_path / base_name
self.weights = ttnn.as_tensor(
torch_weight,
dtype=dtype,
Expand Down

0 comments on commit b16d446

Please sign in to comment.