utils.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # type: ignore[reportUnusedImport]
  4. import subprocess
  5. import os
  6. import re
  7. import json
  8. import sys
  9. import requests
  10. import time
  11. from concurrent.futures import ThreadPoolExecutor, as_completed
  12. from typing import (
  13. Any,
  14. Callable,
  15. ContextManager,
  16. Iterable,
  17. Iterator,
  18. List,
  19. Literal,
  20. Tuple,
  21. Set,
  22. )
  23. from re import RegexFlag
  24. import wget
  25. DEFAULT_HTTP_TIMEOUT = 12
  26. if "LLAMA_SANITIZE" in os.environ or "GITHUB_ACTION" in os.environ:
  27. DEFAULT_HTTP_TIMEOUT = 30
  28. class ServerResponse:
  29. headers: dict
  30. status_code: int
  31. body: dict | Any
  32. class ServerProcess:
  33. # default options
  34. debug: bool = False
  35. server_port: int = 8080
  36. server_host: str = "127.0.0.1"
  37. model_hf_repo: str = "ggml-org/models"
  38. model_hf_file: str | None = "tinyllamas/stories260K.gguf"
  39. model_alias: str = "tinyllama-2"
  40. temperature: float = 0.8
  41. seed: int = 42
  42. # custom options
  43. model_alias: str | None = None
  44. model_url: str | None = None
  45. model_file: str | None = None
  46. model_draft: str | None = None
  47. n_threads: int | None = None
  48. n_gpu_layer: int | None = None
  49. n_batch: int | None = None
  50. n_ubatch: int | None = None
  51. n_ctx: int | None = None
  52. n_ga: int | None = None
  53. n_ga_w: int | None = None
  54. n_predict: int | None = None
  55. n_prompts: int | None = 0
  56. slot_save_path: str | None = None
  57. id_slot: int | None = None
  58. cache_prompt: bool | None = None
  59. n_slots: int | None = None
  60. ctk: str | None = None
  61. ctv: str | None = None
  62. fa: bool | None = None
  63. server_continuous_batching: bool | None = False
  64. server_embeddings: bool | None = False
  65. server_reranking: bool | None = False
  66. server_metrics: bool | None = False
  67. server_slots: bool | None = False
  68. pooling: str | None = None
  69. draft: int | None = None
  70. api_key: str | None = None
  71. lora_files: List[str] | None = None
  72. disable_ctx_shift: int | None = False
  73. draft_min: int | None = None
  74. draft_max: int | None = None
  75. no_webui: bool | None = None
  76. jinja: bool | None = None
  77. reasoning_format: Literal['deepseek', 'none'] | None = None
  78. chat_template: str | None = None
  79. chat_template_file: str | None = None
  80. server_path: str | None = None
  81. # session variables
  82. process: subprocess.Popen | None = None
  83. def __init__(self):
  84. if "N_GPU_LAYERS" in os.environ:
  85. self.n_gpu_layer = int(os.environ["N_GPU_LAYERS"])
  86. if "DEBUG" in os.environ:
  87. self.debug = True
  88. if "PORT" in os.environ:
  89. self.server_port = int(os.environ["PORT"])
  90. def start(self, timeout_seconds: int | None = DEFAULT_HTTP_TIMEOUT) -> None:
  91. if self.server_path is not None:
  92. server_path = self.server_path
  93. elif "LLAMA_SERVER_BIN_PATH" in os.environ:
  94. server_path = os.environ["LLAMA_SERVER_BIN_PATH"]
  95. elif os.name == "nt":
  96. server_path = "../../../build/bin/Release/llama-server.exe"
  97. else:
  98. server_path = "../../../build/bin/llama-server"
  99. server_args = [
  100. "--host",
  101. self.server_host,
  102. "--port",
  103. self.server_port,
  104. "--temp",
  105. self.temperature,
  106. "--seed",
  107. self.seed,
  108. ]
  109. if self.model_file:
  110. server_args.extend(["--model", self.model_file])
  111. if self.model_url:
  112. server_args.extend(["--model-url", self.model_url])
  113. if self.model_draft:
  114. server_args.extend(["--model-draft", self.model_draft])
  115. if self.model_hf_repo:
  116. server_args.extend(["--hf-repo", self.model_hf_repo])
  117. if self.model_hf_file:
  118. server_args.extend(["--hf-file", self.model_hf_file])
  119. if self.n_batch:
  120. server_args.extend(["--batch-size", self.n_batch])
  121. if self.n_ubatch:
  122. server_args.extend(["--ubatch-size", self.n_ubatch])
  123. if self.n_threads:
  124. server_args.extend(["--threads", self.n_threads])
  125. if self.n_gpu_layer:
  126. server_args.extend(["--n-gpu-layers", self.n_gpu_layer])
  127. if self.draft is not None:
  128. server_args.extend(["--draft", self.draft])
  129. if self.server_continuous_batching:
  130. server_args.append("--cont-batching")
  131. if self.server_embeddings:
  132. server_args.append("--embedding")
  133. if self.server_reranking:
  134. server_args.append("--reranking")
  135. if self.server_metrics:
  136. server_args.append("--metrics")
  137. if self.server_slots:
  138. server_args.append("--slots")
  139. if self.pooling:
  140. server_args.extend(["--pooling", self.pooling])
  141. if self.model_alias:
  142. server_args.extend(["--alias", self.model_alias])
  143. if self.n_ctx:
  144. server_args.extend(["--ctx-size", self.n_ctx])
  145. if self.n_slots:
  146. server_args.extend(["--parallel", self.n_slots])
  147. if self.ctk:
  148. server_args.extend(["-ctk", self.ctk])
  149. if self.ctv:
  150. server_args.extend(["-ctv", self.ctv])
  151. if self.fa is not None:
  152. server_args.append("-fa")
  153. if self.n_predict:
  154. server_args.extend(["--n-predict", self.n_predict])
  155. if self.slot_save_path:
  156. server_args.extend(["--slot-save-path", self.slot_save_path])
  157. if self.n_ga:
  158. server_args.extend(["--grp-attn-n", self.n_ga])
  159. if self.n_ga_w:
  160. server_args.extend(["--grp-attn-w", self.n_ga_w])
  161. if self.debug:
  162. server_args.append("--verbose")
  163. if self.lora_files:
  164. for lora_file in self.lora_files:
  165. server_args.extend(["--lora", lora_file])
  166. if self.disable_ctx_shift:
  167. server_args.extend(["--no-context-shift"])
  168. if self.api_key:
  169. server_args.extend(["--api-key", self.api_key])
  170. if self.draft_max:
  171. server_args.extend(["--draft-max", self.draft_max])
  172. if self.draft_min:
  173. server_args.extend(["--draft-min", self.draft_min])
  174. if self.no_webui:
  175. server_args.append("--no-webui")
  176. if self.jinja:
  177. server_args.append("--jinja")
  178. if self.reasoning_format is not None:
  179. server_args.extend(("--reasoning-format", self.reasoning_format))
  180. if self.chat_template:
  181. server_args.extend(["--chat-template", self.chat_template])
  182. if self.chat_template_file:
  183. server_args.extend(["--chat-template-file", self.chat_template_file])
  184. args = [str(arg) for arg in [server_path, *server_args]]
  185. print(f"tests: starting server with: {' '.join(args)}")
  186. flags = 0
  187. if "nt" == os.name:
  188. flags |= subprocess.DETACHED_PROCESS
  189. flags |= subprocess.CREATE_NEW_PROCESS_GROUP
  190. flags |= subprocess.CREATE_NO_WINDOW
  191. self.process = subprocess.Popen(
  192. [str(arg) for arg in [server_path, *server_args]],
  193. creationflags=flags,
  194. stdout=sys.stdout,
  195. stderr=sys.stdout,
  196. env={**os.environ, "LLAMA_CACHE": "tmp"} if "LLAMA_CACHE" not in os.environ else None,
  197. )
  198. server_instances.add(self)
  199. print(f"server pid={self.process.pid}, pytest pid={os.getpid()}")
  200. # wait for server to start
  201. start_time = time.time()
  202. while time.time() - start_time < timeout_seconds:
  203. try:
  204. response = self.make_request("GET", "/health", headers={
  205. "Authorization": f"Bearer {self.api_key}" if self.api_key else None
  206. })
  207. if response.status_code == 200:
  208. self.ready = True
  209. return # server is ready
  210. except Exception as e:
  211. pass
  212. # Check if process died
  213. if self.process.poll() is not None:
  214. raise RuntimeError(f"Server process died with return code {self.process.returncode}")
  215. print(f"Waiting for server to start...")
  216. time.sleep(0.5)
  217. raise TimeoutError(f"Server did not start within {timeout_seconds} seconds")
  218. def stop(self) -> None:
  219. if self in server_instances:
  220. server_instances.remove(self)
  221. if self.process:
  222. print(f"Stopping server with pid={self.process.pid}")
  223. self.process.kill()
  224. self.process = None
  225. def make_request(
  226. self,
  227. method: str,
  228. path: str,
  229. data: dict | Any | None = None,
  230. headers: dict | None = None,
  231. timeout: float | None = None,
  232. ) -> ServerResponse:
  233. url = f"http://{self.server_host}:{self.server_port}{path}"
  234. parse_body = False
  235. if method == "GET":
  236. response = requests.get(url, headers=headers, timeout=timeout)
  237. parse_body = True
  238. elif method == "POST":
  239. response = requests.post(url, headers=headers, json=data, timeout=timeout)
  240. parse_body = True
  241. elif method == "OPTIONS":
  242. response = requests.options(url, headers=headers, timeout=timeout)
  243. else:
  244. raise ValueError(f"Unimplemented method: {method}")
  245. result = ServerResponse()
  246. result.headers = dict(response.headers)
  247. result.status_code = response.status_code
  248. result.body = response.json() if parse_body else None
  249. print("Response from server", json.dumps(result.body, indent=2))
  250. return result
  251. def make_stream_request(
  252. self,
  253. method: str,
  254. path: str,
  255. data: dict | None = None,
  256. headers: dict | None = None,
  257. ) -> Iterator[dict]:
  258. url = f"http://{self.server_host}:{self.server_port}{path}"
  259. if method == "POST":
  260. response = requests.post(url, headers=headers, json=data, stream=True)
  261. else:
  262. raise ValueError(f"Unimplemented method: {method}")
  263. for line_bytes in response.iter_lines():
  264. line = line_bytes.decode("utf-8")
  265. if '[DONE]' in line:
  266. break
  267. elif line.startswith('data: '):
  268. data = json.loads(line[6:])
  269. print("Partial response from server", json.dumps(data, indent=2))
  270. yield data
  271. server_instances: Set[ServerProcess] = set()
  272. class ServerPreset:
  273. @staticmethod
  274. def tinyllama2() -> ServerProcess:
  275. server = ServerProcess()
  276. server.model_hf_repo = "ggml-org/models"
  277. server.model_hf_file = "tinyllamas/stories260K.gguf"
  278. server.model_alias = "tinyllama-2"
  279. server.n_ctx = 256
  280. server.n_batch = 32
  281. server.n_slots = 2
  282. server.n_predict = 64
  283. server.seed = 42
  284. return server
  285. @staticmethod
  286. def bert_bge_small() -> ServerProcess:
  287. server = ServerProcess()
  288. server.model_hf_repo = "ggml-org/models"
  289. server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
  290. server.model_alias = "bert-bge-small"
  291. server.n_ctx = 512
  292. server.n_batch = 128
  293. server.n_ubatch = 128
  294. server.n_slots = 2
  295. server.seed = 42
  296. server.server_embeddings = True
  297. return server
  298. @staticmethod
  299. def tinyllama_infill() -> ServerProcess:
  300. server = ServerProcess()
  301. server.model_hf_repo = "ggml-org/models"
  302. server.model_hf_file = "tinyllamas/stories260K-infill.gguf"
  303. server.model_alias = "tinyllama-infill"
  304. server.n_ctx = 2048
  305. server.n_batch = 1024
  306. server.n_slots = 1
  307. server.n_predict = 64
  308. server.temperature = 0.0
  309. server.seed = 42
  310. return server
  311. @staticmethod
  312. def stories15m_moe() -> ServerProcess:
  313. server = ServerProcess()
  314. server.model_hf_repo = "ggml-org/stories15M_MOE"
  315. server.model_hf_file = "stories15M_MOE-F16.gguf"
  316. server.model_alias = "stories15m-moe"
  317. server.n_ctx = 2048
  318. server.n_batch = 1024
  319. server.n_slots = 1
  320. server.n_predict = 64
  321. server.temperature = 0.0
  322. server.seed = 42
  323. return server
  324. @staticmethod
  325. def jina_reranker_tiny() -> ServerProcess:
  326. server = ServerProcess()
  327. server.model_hf_repo = "ggml-org/models"
  328. server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf"
  329. server.model_alias = "jina-reranker"
  330. server.n_ctx = 512
  331. server.n_batch = 512
  332. server.n_slots = 1
  333. server.seed = 42
  334. server.server_reranking = True
  335. return server
  336. def parallel_function_calls(function_list: List[Tuple[Callable[..., Any], Tuple[Any, ...]]]) -> List[Any]:
  337. """
  338. Run multiple functions in parallel and return results in the same order as calls. Equivalent to Promise.all in JS.
  339. Example usage:
  340. results = parallel_function_calls([
  341. (func1, (arg1, arg2)),
  342. (func2, (arg3, arg4)),
  343. ])
  344. """
  345. results = [None] * len(function_list)
  346. exceptions = []
  347. def worker(index, func, args):
  348. try:
  349. result = func(*args)
  350. results[index] = result
  351. except Exception as e:
  352. exceptions.append((index, str(e)))
  353. with ThreadPoolExecutor() as executor:
  354. futures = []
  355. for i, (func, args) in enumerate(function_list):
  356. future = executor.submit(worker, i, func, args)
  357. futures.append(future)
  358. # Wait for all futures to complete
  359. for future in as_completed(futures):
  360. pass
  361. # Check if there were any exceptions
  362. if exceptions:
  363. print("Exceptions occurred:")
  364. for index, error in exceptions:
  365. print(f"Function at index {index}: {error}")
  366. return results
  367. def match_regex(regex: str, text: str) -> bool:
  368. return (
  369. re.compile(
  370. regex, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL
  371. ).search(text)
  372. is not None
  373. )
  374. def download_file(url: str, output_file_path: str | None = None) -> str:
  375. """
  376. Download a file from a URL to a local path. If the file already exists, it will not be downloaded again.
  377. output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory.
  378. Returns the local path of the downloaded file.
  379. """
  380. file_name = url.split('/').pop()
  381. output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path
  382. if not os.path.exists(output_file):
  383. print(f"Downloading {url} to {output_file}")
  384. wget.download(url, out=output_file)
  385. print(f"Done downloading to {output_file}")
  386. else:
  387. print(f"File already exists at {output_file}")
  388. return output_file
  389. def is_slow_test_allowed():
  390. return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON"