@@ -1,3 +1,4 @@
+#!/usr/bin/env python3
# HF falcon--> gguf conversion
import gguf
# HF gptneox--> gguf conversion
# 7b pth llama --> gguf conversion
# Only models with a single datafile are supported, like 7B
# HF files required in the model dir: config.json tokenizer_config.json tokenizer.json tokenizer.model
import sys, struct, math, argparse
from pathlib import Path
# HF llama --> gguf conversion
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
import json
import os
import re
import argparse
import ctypes
from ctypes import cdll, c_char_p, c_void_p, POINTER, c_float, c_int
import numpy as np
import sys
sys.path.insert(0, os.path.dirname(__file__))
import matplotlib.pyplot as plt
import csv
"""
This script converts Hugging Face llama models to GGML and quantizes them.
@@ -1,4 +1,3 @@
-
#!/bin/bash
cd `dirname $0`
from flask import Flask, jsonify, request, Response
import urllib.parse
import shutil
import struct