utils.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # type: ignore[reportUnusedImport]
  4. import subprocess
  5. import os
  6. import re
  7. import json
  8. import sys
  9. import requests
  10. import time
  11. from concurrent.futures import ThreadPoolExecutor, as_completed
  12. from typing import (
  13. Any,
  14. Callable,
  15. ContextManager,
  16. Iterable,
  17. Iterator,
  18. List,
  19. Literal,
  20. Tuple,
  21. Set,
  22. )
  23. from re import RegexFlag
  24. import wget
  25. DEFAULT_HTTP_TIMEOUT = 12 if "LLAMA_SANITIZE" not in os.environ else 30
  26. class ServerResponse:
  27. headers: dict
  28. status_code: int
  29. body: dict | Any
  30. class ServerProcess:
  31. # default options
  32. debug: bool = False
  33. server_port: int = 8080
  34. server_host: str = "127.0.0.1"
  35. model_hf_repo: str = "ggml-org/models"
  36. model_hf_file: str | None = "tinyllamas/stories260K.gguf"
  37. model_alias: str = "tinyllama-2"
  38. temperature: float = 0.8
  39. seed: int = 42
  40. # custom options
  41. model_alias: str | None = None
  42. model_url: str | None = None
  43. model_file: str | None = None
  44. model_draft: str | None = None
  45. n_threads: int | None = None
  46. n_gpu_layer: int | None = None
  47. n_batch: int | None = None
  48. n_ubatch: int | None = None
  49. n_ctx: int | None = None
  50. n_ga: int | None = None
  51. n_ga_w: int | None = None
  52. n_predict: int | None = None
  53. n_prompts: int | None = 0
  54. slot_save_path: str | None = None
  55. id_slot: int | None = None
  56. cache_prompt: bool | None = None
  57. n_slots: int | None = None
  58. server_continuous_batching: bool | None = False
  59. server_embeddings: bool | None = False
  60. server_reranking: bool | None = False
  61. server_metrics: bool | None = False
  62. server_slots: bool | None = False
  63. pooling: str | None = None
  64. draft: int | None = None
  65. api_key: str | None = None
  66. lora_files: List[str] | None = None
  67. disable_ctx_shift: int | None = False
  68. draft_min: int | None = None
  69. draft_max: int | None = None
  70. no_webui: bool | None = None
  71. jinja: bool | None = None
  72. reasoning_format: Literal['deepseek', 'none'] | None = None
  73. chat_template: str | None = None
  74. chat_template_file: str | None = None
  75. # session variables
  76. process: subprocess.Popen | None = None
  77. def __init__(self):
  78. if "N_GPU_LAYERS" in os.environ:
  79. self.n_gpu_layer = int(os.environ["N_GPU_LAYERS"])
  80. if "DEBUG" in os.environ:
  81. self.debug = True
  82. if "PORT" in os.environ:
  83. self.server_port = int(os.environ["PORT"])
  84. def start(self, timeout_seconds: int | None = DEFAULT_HTTP_TIMEOUT) -> None:
  85. if "LLAMA_SERVER_BIN_PATH" in os.environ:
  86. server_path = os.environ["LLAMA_SERVER_BIN_PATH"]
  87. elif os.name == "nt":
  88. server_path = "../../../build/bin/Release/llama-server.exe"
  89. else:
  90. server_path = "../../../build/bin/llama-server"
  91. server_args = [
  92. "--host",
  93. self.server_host,
  94. "--port",
  95. self.server_port,
  96. "--temp",
  97. self.temperature,
  98. "--seed",
  99. self.seed,
  100. ]
  101. if self.model_file:
  102. server_args.extend(["--model", self.model_file])
  103. if self.model_url:
  104. server_args.extend(["--model-url", self.model_url])
  105. if self.model_draft:
  106. server_args.extend(["--model-draft", self.model_draft])
  107. if self.model_hf_repo:
  108. server_args.extend(["--hf-repo", self.model_hf_repo])
  109. if self.model_hf_file:
  110. server_args.extend(["--hf-file", self.model_hf_file])
  111. if self.n_batch:
  112. server_args.extend(["--batch-size", self.n_batch])
  113. if self.n_ubatch:
  114. server_args.extend(["--ubatch-size", self.n_ubatch])
  115. if self.n_threads:
  116. server_args.extend(["--threads", self.n_threads])
  117. if self.n_gpu_layer:
  118. server_args.extend(["--n-gpu-layers", self.n_gpu_layer])
  119. if self.draft is not None:
  120. server_args.extend(["--draft", self.draft])
  121. if self.server_continuous_batching:
  122. server_args.append("--cont-batching")
  123. if self.server_embeddings:
  124. server_args.append("--embedding")
  125. if self.server_reranking:
  126. server_args.append("--reranking")
  127. if self.server_metrics:
  128. server_args.append("--metrics")
  129. if self.server_slots:
  130. server_args.append("--slots")
  131. if self.pooling:
  132. server_args.extend(["--pooling", self.pooling])
  133. if self.model_alias:
  134. server_args.extend(["--alias", self.model_alias])
  135. if self.n_ctx:
  136. server_args.extend(["--ctx-size", self.n_ctx])
  137. if self.n_slots:
  138. server_args.extend(["--parallel", self.n_slots])
  139. if self.n_predict:
  140. server_args.extend(["--n-predict", self.n_predict])
  141. if self.slot_save_path:
  142. server_args.extend(["--slot-save-path", self.slot_save_path])
  143. if self.n_ga:
  144. server_args.extend(["--grp-attn-n", self.n_ga])
  145. if self.n_ga_w:
  146. server_args.extend(["--grp-attn-w", self.n_ga_w])
  147. if self.debug:
  148. server_args.append("--verbose")
  149. if self.lora_files:
  150. for lora_file in self.lora_files:
  151. server_args.extend(["--lora", lora_file])
  152. if self.disable_ctx_shift:
  153. server_args.extend(["--no-context-shift"])
  154. if self.api_key:
  155. server_args.extend(["--api-key", self.api_key])
  156. if self.draft_max:
  157. server_args.extend(["--draft-max", self.draft_max])
  158. if self.draft_min:
  159. server_args.extend(["--draft-min", self.draft_min])
  160. if self.no_webui:
  161. server_args.append("--no-webui")
  162. if self.jinja:
  163. server_args.append("--jinja")
  164. if self.reasoning_format is not None:
  165. server_args.extend(("--reasoning-format", self.reasoning_format))
  166. if self.chat_template:
  167. server_args.extend(["--chat-template", self.chat_template])
  168. if self.chat_template_file:
  169. server_args.extend(["--chat-template-file", self.chat_template_file])
  170. args = [str(arg) for arg in [server_path, *server_args]]
  171. print(f"bench: starting server with: {' '.join(args)}")
  172. flags = 0
  173. if "nt" == os.name:
  174. flags |= subprocess.DETACHED_PROCESS
  175. flags |= subprocess.CREATE_NEW_PROCESS_GROUP
  176. flags |= subprocess.CREATE_NO_WINDOW
  177. self.process = subprocess.Popen(
  178. [str(arg) for arg in [server_path, *server_args]],
  179. creationflags=flags,
  180. stdout=sys.stdout,
  181. stderr=sys.stdout,
  182. env={**os.environ, "LLAMA_CACHE": "tmp"} if "LLAMA_CACHE" not in os.environ else None,
  183. )
  184. server_instances.add(self)
  185. print(f"server pid={self.process.pid}, pytest pid={os.getpid()}")
  186. # wait for server to start
  187. start_time = time.time()
  188. while time.time() - start_time < timeout_seconds:
  189. try:
  190. response = self.make_request("GET", "/health", headers={
  191. "Authorization": f"Bearer {self.api_key}" if self.api_key else None
  192. })
  193. if response.status_code == 200:
  194. self.ready = True
  195. return # server is ready
  196. except Exception as e:
  197. pass
  198. print(f"Waiting for server to start...")
  199. time.sleep(0.5)
  200. raise TimeoutError(f"Server did not start within {timeout_seconds} seconds")
  201. def stop(self) -> None:
  202. if self in server_instances:
  203. server_instances.remove(self)
  204. if self.process:
  205. print(f"Stopping server with pid={self.process.pid}")
  206. self.process.kill()
  207. self.process = None
  208. def make_request(
  209. self,
  210. method: str,
  211. path: str,
  212. data: dict | Any | None = None,
  213. headers: dict | None = None,
  214. timeout: float | None = None,
  215. ) -> ServerResponse:
  216. url = f"http://{self.server_host}:{self.server_port}{path}"
  217. parse_body = False
  218. if method == "GET":
  219. response = requests.get(url, headers=headers, timeout=timeout)
  220. parse_body = True
  221. elif method == "POST":
  222. response = requests.post(url, headers=headers, json=data, timeout=timeout)
  223. parse_body = True
  224. elif method == "OPTIONS":
  225. response = requests.options(url, headers=headers, timeout=timeout)
  226. else:
  227. raise ValueError(f"Unimplemented method: {method}")
  228. result = ServerResponse()
  229. result.headers = dict(response.headers)
  230. result.status_code = response.status_code
  231. result.body = response.json() if parse_body else None
  232. print("Response from server", json.dumps(result.body, indent=2))
  233. return result
  234. def make_stream_request(
  235. self,
  236. method: str,
  237. path: str,
  238. data: dict | None = None,
  239. headers: dict | None = None,
  240. ) -> Iterator[dict]:
  241. url = f"http://{self.server_host}:{self.server_port}{path}"
  242. if method == "POST":
  243. response = requests.post(url, headers=headers, json=data, stream=True)
  244. else:
  245. raise ValueError(f"Unimplemented method: {method}")
  246. for line_bytes in response.iter_lines():
  247. line = line_bytes.decode("utf-8")
  248. if '[DONE]' in line:
  249. break
  250. elif line.startswith('data: '):
  251. data = json.loads(line[6:])
  252. print("Partial response from server", json.dumps(data, indent=2))
  253. yield data
  254. server_instances: Set[ServerProcess] = set()
  255. class ServerPreset:
  256. @staticmethod
  257. def tinyllama2() -> ServerProcess:
  258. server = ServerProcess()
  259. server.model_hf_repo = "ggml-org/models"
  260. server.model_hf_file = "tinyllamas/stories260K.gguf"
  261. server.model_alias = "tinyllama-2"
  262. server.n_ctx = 256
  263. server.n_batch = 32
  264. server.n_slots = 2
  265. server.n_predict = 64
  266. server.seed = 42
  267. return server
  268. @staticmethod
  269. def bert_bge_small() -> ServerProcess:
  270. server = ServerProcess()
  271. server.model_hf_repo = "ggml-org/models"
  272. server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
  273. server.model_alias = "bert-bge-small"
  274. server.n_ctx = 512
  275. server.n_batch = 128
  276. server.n_ubatch = 128
  277. server.n_slots = 2
  278. server.seed = 42
  279. server.server_embeddings = True
  280. return server
  281. @staticmethod
  282. def tinyllama_infill() -> ServerProcess:
  283. server = ServerProcess()
  284. server.model_hf_repo = "ggml-org/models"
  285. server.model_hf_file = "tinyllamas/stories260K-infill.gguf"
  286. server.model_alias = "tinyllama-infill"
  287. server.n_ctx = 2048
  288. server.n_batch = 1024
  289. server.n_slots = 1
  290. server.n_predict = 64
  291. server.temperature = 0.0
  292. server.seed = 42
  293. return server
  294. @staticmethod
  295. def stories15m_moe() -> ServerProcess:
  296. server = ServerProcess()
  297. server.model_hf_repo = "ggml-org/stories15M_MOE"
  298. server.model_hf_file = "stories15M_MOE-F16.gguf"
  299. server.model_alias = "stories15m-moe"
  300. server.n_ctx = 2048
  301. server.n_batch = 1024
  302. server.n_slots = 1
  303. server.n_predict = 64
  304. server.temperature = 0.0
  305. server.seed = 42
  306. return server
  307. @staticmethod
  308. def jina_reranker_tiny() -> ServerProcess:
  309. server = ServerProcess()
  310. server.model_hf_repo = "ggml-org/models"
  311. server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf"
  312. server.model_alias = "jina-reranker"
  313. server.n_ctx = 512
  314. server.n_batch = 512
  315. server.n_slots = 1
  316. server.seed = 42
  317. server.server_reranking = True
  318. return server
  319. def parallel_function_calls(function_list: List[Tuple[Callable[..., Any], Tuple[Any, ...]]]) -> List[Any]:
  320. """
  321. Run multiple functions in parallel and return results in the same order as calls. Equivalent to Promise.all in JS.
  322. Example usage:
  323. results = parallel_function_calls([
  324. (func1, (arg1, arg2)),
  325. (func2, (arg3, arg4)),
  326. ])
  327. """
  328. results = [None] * len(function_list)
  329. exceptions = []
  330. def worker(index, func, args):
  331. try:
  332. result = func(*args)
  333. results[index] = result
  334. except Exception as e:
  335. exceptions.append((index, str(e)))
  336. with ThreadPoolExecutor() as executor:
  337. futures = []
  338. for i, (func, args) in enumerate(function_list):
  339. future = executor.submit(worker, i, func, args)
  340. futures.append(future)
  341. # Wait for all futures to complete
  342. for future in as_completed(futures):
  343. pass
  344. # Check if there were any exceptions
  345. if exceptions:
  346. print("Exceptions occurred:")
  347. for index, error in exceptions:
  348. print(f"Function at index {index}: {error}")
  349. return results
  350. def match_regex(regex: str, text: str) -> bool:
  351. return (
  352. re.compile(
  353. regex, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL
  354. ).search(text)
  355. is not None
  356. )
  357. def download_file(url: str, output_file_path: str | None = None) -> str:
  358. """
  359. Download a file from a URL to a local path. If the file already exists, it will not be downloaded again.
  360. output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory.
  361. Returns the local path of the downloaded file.
  362. """
  363. file_name = url.split('/').pop()
  364. output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path
  365. if not os.path.exists(output_file):
  366. print(f"Downloading {url} to {output_file}")
  367. wget.download(url, out=output_file)
  368. print(f"Done downloading to {output_file}")
  369. else:
  370. print(f"File already exists at {output_file}")
  371. return output_file
  372. def is_slow_test_allowed():
  373. return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON"