From 14764aa2e2e2b282c4a4dffbfab4c01d3e46e8a7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 22 Jul 2024 12:21:45 -0400 Subject: [PATCH] Rename LLAMATokenizer to SPieceTokenizer. --- comfy/text_encoders/aura_t5.py | 4 ++-- .../text_encoders/{llama_tokenizer.py => spiece_tokenizer.py} | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) rename comfy/text_encoders/{llama_tokenizer.py => spiece_tokenizer.py} (90%) diff --git a/comfy/text_encoders/aura_t5.py b/comfy/text_encoders/aura_t5.py index 6b9e4fe537c..409867af3d0 100644 --- a/comfy/text_encoders/aura_t5.py +++ b/comfy/text_encoders/aura_t5.py @@ -1,5 +1,5 @@ from comfy import sd1_clip -from .llama_tokenizer import LLAMATokenizer +from .spiece_tokenizer import SPieceTokenizer import comfy.text_encoders.t5 import os @@ -11,7 +11,7 @@ def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None): class PT5XlTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None): tokenizer_path = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_pile_tokenizer"), "tokenizer.model") - super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=LLAMATokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1) + super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1) class AuraT5Tokenizer(sd1_clip.SD1Tokenizer): def __init__(self, embedding_directory=None): diff --git a/comfy/text_encoders/llama_tokenizer.py b/comfy/text_encoders/spiece_tokenizer.py similarity index 90% rename from comfy/text_encoders/llama_tokenizer.py rename to comfy/text_encoders/spiece_tokenizer.py index a6db1da629c..d611d5bb76e 100644 --- a/comfy/text_encoders/llama_tokenizer.py +++ b/comfy/text_encoders/spiece_tokenizer.py @@ -1,9 +1,9 @@ import os -class LLAMATokenizer: +class SPieceTokenizer: @staticmethod def from_pretrained(path): - return LLAMATokenizer(path) + return SPieceTokenizer(path) def __init__(self, tokenizer_path): import sentencepiece