Quellcode durchsuchen

convert : only check for tokenizer folder if we need it (#14704)

Sigbjørn Skjæret vor 6 Monaten
Ursprung
Commit
4b91d6f71f
1 geänderte Dateien mit 6 neuen und 5 gelöschten Zeilen
  1. 6 5
      convert_hf_to_gguf_update.py

+ 6 - 5
convert_hf_to_gguf_update.py

@@ -240,11 +240,6 @@ for model in [*pre_computed_hashes, *all_models]:
     if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM:
         continue
 
-    # Skip if the tokenizer folder does not exist or there are other download issues previously
-    if not os.path.exists(f"models/tokenizers/{name}"):
-        logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
-        continue
-
     # create the tokenizer
     if chkhsh is not None:
         # if the model has a pre-computed hash, use it
@@ -254,6 +249,12 @@ for model in [*pre_computed_hashes, *all_models]:
         chkhsh = existing_models[name]
     else:
         # otherwise, compute the hash of the tokenizer
+
+        # Skip if the tokenizer folder does not exist or there are other download issues previously
+        if not os.path.exists(f"models/tokenizers/{name}"):
+            logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
+            continue
+
         try:
             logger.info(f"Loading tokenizer from {f'models/tokenizers/{name}'}...")
             if name == "t5":