utils.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # type: ignore[reportUnusedImport]
  4. import subprocess
  5. import os
  6. import re
  7. import json
  8. import sys
  9. import requests
  10. import time
  11. from concurrent.futures import ThreadPoolExecutor, as_completed
  12. from typing import (
  13. Any,
  14. Callable,
  15. ContextManager,
  16. Iterable,
  17. Iterator,
  18. List,
  19. Literal,
  20. Tuple,
  21. Set,
  22. )
  23. from re import RegexFlag
  24. import wget
  25. DEFAULT_HTTP_TIMEOUT = 60
  26. class ServerResponse:
  27. headers: dict
  28. status_code: int
  29. body: dict | Any
  30. class ServerError(Exception):
  31. def __init__(self, code, body):
  32. self.code = code
  33. self.body = body
  34. class ServerProcess:
  35. # default options
  36. debug: bool = False
  37. server_port: int = 8080
  38. server_host: str = "127.0.0.1"
  39. model_hf_repo: str = "ggml-org/models"
  40. model_hf_file: str | None = "tinyllamas/stories260K.gguf"
  41. model_alias: str = "tinyllama-2"
  42. temperature: float = 0.8
  43. seed: int = 42
  44. offline: bool = False
  45. # custom options
  46. model_alias: str | None = None
  47. model_url: str | None = None
  48. model_file: str | None = None
  49. model_draft: str | None = None
  50. n_threads: int | None = None
  51. n_gpu_layer: int | None = None
  52. n_batch: int | None = None
  53. n_ubatch: int | None = None
  54. n_ctx: int | None = None
  55. n_ga: int | None = None
  56. n_ga_w: int | None = None
  57. n_predict: int | None = None
  58. n_prompts: int | None = 0
  59. slot_save_path: str | None = None
  60. id_slot: int | None = None
  61. cache_prompt: bool | None = None
  62. n_slots: int | None = None
  63. ctk: str | None = None
  64. ctv: str | None = None
  65. fa: str | None = None
  66. server_continuous_batching: bool | None = False
  67. server_embeddings: bool | None = False
  68. server_reranking: bool | None = False
  69. server_metrics: bool | None = False
  70. kv_unified: bool | None = False
  71. server_slots: bool | None = False
  72. pooling: str | None = None
  73. draft: int | None = None
  74. api_key: str | None = None
  75. lora_files: List[str] | None = None
  76. enable_ctx_shift: int | None = False
  77. draft_min: int | None = None
  78. draft_max: int | None = None
  79. no_webui: bool | None = None
  80. jinja: bool | None = None
  81. reasoning_format: Literal['deepseek', 'none', 'nothink'] | None = None
  82. reasoning_budget: int | None = None
  83. chat_template: str | None = None
  84. chat_template_file: str | None = None
  85. server_path: str | None = None
  86. mmproj_url: str | None = None
  87. # session variables
  88. process: subprocess.Popen | None = None
  89. def __init__(self):
  90. if "N_GPU_LAYERS" in os.environ:
  91. self.n_gpu_layer = int(os.environ["N_GPU_LAYERS"])
  92. if "DEBUG" in os.environ:
  93. self.debug = True
  94. if "PORT" in os.environ:
  95. self.server_port = int(os.environ["PORT"])
  96. self.external_server = "DEBUG_EXTERNAL" in os.environ
  97. def start(self, timeout_seconds: int | None = DEFAULT_HTTP_TIMEOUT) -> None:
  98. if self.external_server:
  99. print(f"[external_server]: Assuming external server running on {self.server_host}:{self.server_port}")
  100. return
  101. if self.server_path is not None:
  102. server_path = self.server_path
  103. elif "LLAMA_SERVER_BIN_PATH" in os.environ:
  104. server_path = os.environ["LLAMA_SERVER_BIN_PATH"]
  105. elif os.name == "nt":
  106. server_path = "../../../build/bin/Release/llama-server.exe"
  107. else:
  108. server_path = "../../../build/bin/llama-server"
  109. server_args = [
  110. "--host",
  111. self.server_host,
  112. "--port",
  113. self.server_port,
  114. "--temp",
  115. self.temperature,
  116. "--seed",
  117. self.seed,
  118. ]
  119. if self.offline:
  120. server_args.append("--offline")
  121. if self.model_file:
  122. server_args.extend(["--model", self.model_file])
  123. if self.model_url:
  124. server_args.extend(["--model-url", self.model_url])
  125. if self.model_draft:
  126. server_args.extend(["--model-draft", self.model_draft])
  127. if self.model_hf_repo:
  128. server_args.extend(["--hf-repo", self.model_hf_repo])
  129. if self.model_hf_file:
  130. server_args.extend(["--hf-file", self.model_hf_file])
  131. if self.n_batch:
  132. server_args.extend(["--batch-size", self.n_batch])
  133. if self.n_ubatch:
  134. server_args.extend(["--ubatch-size", self.n_ubatch])
  135. if self.n_threads:
  136. server_args.extend(["--threads", self.n_threads])
  137. if self.n_gpu_layer:
  138. server_args.extend(["--n-gpu-layers", self.n_gpu_layer])
  139. if self.draft is not None:
  140. server_args.extend(["--draft", self.draft])
  141. if self.server_continuous_batching:
  142. server_args.append("--cont-batching")
  143. if self.server_embeddings:
  144. server_args.append("--embedding")
  145. if self.server_reranking:
  146. server_args.append("--reranking")
  147. if self.server_metrics:
  148. server_args.append("--metrics")
  149. if self.kv_unified:
  150. server_args.append("--kv-unified")
  151. if self.server_slots:
  152. server_args.append("--slots")
  153. else:
  154. server_args.append("--no-slots")
  155. if self.pooling:
  156. server_args.extend(["--pooling", self.pooling])
  157. if self.model_alias:
  158. server_args.extend(["--alias", self.model_alias])
  159. if self.n_ctx:
  160. server_args.extend(["--ctx-size", self.n_ctx])
  161. if self.n_slots:
  162. server_args.extend(["--parallel", self.n_slots])
  163. if self.ctk:
  164. server_args.extend(["-ctk", self.ctk])
  165. if self.ctv:
  166. server_args.extend(["-ctv", self.ctv])
  167. if self.fa is not None:
  168. server_args.extend(["-fa", self.fa])
  169. if self.n_predict:
  170. server_args.extend(["--n-predict", self.n_predict])
  171. if self.slot_save_path:
  172. server_args.extend(["--slot-save-path", self.slot_save_path])
  173. if self.n_ga:
  174. server_args.extend(["--grp-attn-n", self.n_ga])
  175. if self.n_ga_w:
  176. server_args.extend(["--grp-attn-w", self.n_ga_w])
  177. if self.debug:
  178. server_args.append("--verbose")
  179. if self.lora_files:
  180. for lora_file in self.lora_files:
  181. server_args.extend(["--lora", lora_file])
  182. if self.enable_ctx_shift:
  183. server_args.append("--context-shift")
  184. if self.api_key:
  185. server_args.extend(["--api-key", self.api_key])
  186. if self.draft_max:
  187. server_args.extend(["--draft-max", self.draft_max])
  188. if self.draft_min:
  189. server_args.extend(["--draft-min", self.draft_min])
  190. if self.no_webui:
  191. server_args.append("--no-webui")
  192. if self.jinja:
  193. server_args.append("--jinja")
  194. else:
  195. server_args.append("--no-jinja")
  196. if self.reasoning_format is not None:
  197. server_args.extend(("--reasoning-format", self.reasoning_format))
  198. if self.reasoning_budget is not None:
  199. server_args.extend(("--reasoning-budget", self.reasoning_budget))
  200. if self.chat_template:
  201. server_args.extend(["--chat-template", self.chat_template])
  202. if self.chat_template_file:
  203. server_args.extend(["--chat-template-file", self.chat_template_file])
  204. if self.mmproj_url:
  205. server_args.extend(["--mmproj-url", self.mmproj_url])
  206. args = [str(arg) for arg in [server_path, *server_args]]
  207. print(f"tests: starting server with: {' '.join(args)}")
  208. flags = 0
  209. if "nt" == os.name:
  210. flags |= subprocess.DETACHED_PROCESS
  211. flags |= subprocess.CREATE_NEW_PROCESS_GROUP
  212. flags |= subprocess.CREATE_NO_WINDOW
  213. self.process = subprocess.Popen(
  214. [str(arg) for arg in [server_path, *server_args]],
  215. creationflags=flags,
  216. stdout=sys.stdout,
  217. stderr=sys.stdout,
  218. env={**os.environ, "LLAMA_CACHE": "tmp"} if "LLAMA_CACHE" not in os.environ else None,
  219. )
  220. server_instances.add(self)
  221. print(f"server pid={self.process.pid}, pytest pid={os.getpid()}")
  222. # wait for server to start
  223. start_time = time.time()
  224. while time.time() - start_time < timeout_seconds:
  225. try:
  226. response = self.make_request("GET", "/health", headers={
  227. "Authorization": f"Bearer {self.api_key}" if self.api_key else None
  228. })
  229. if response.status_code == 200:
  230. self.ready = True
  231. return # server is ready
  232. except Exception as e:
  233. pass
  234. # Check if process died
  235. if self.process.poll() is not None:
  236. raise RuntimeError(f"Server process died with return code {self.process.returncode}")
  237. print(f"Waiting for server to start...")
  238. time.sleep(0.5)
  239. raise TimeoutError(f"Server did not start within {timeout_seconds} seconds")
  240. def stop(self) -> None:
  241. if self.external_server:
  242. print("[external_server]: Not stopping external server")
  243. return
  244. if self in server_instances:
  245. server_instances.remove(self)
  246. if self.process:
  247. print(f"Stopping server with pid={self.process.pid}")
  248. self.process.kill()
  249. self.process = None
  250. def make_request(
  251. self,
  252. method: str,
  253. path: str,
  254. data: dict | Any | None = None,
  255. headers: dict | None = None,
  256. timeout: float | None = None,
  257. ) -> ServerResponse:
  258. url = f"http://{self.server_host}:{self.server_port}{path}"
  259. parse_body = False
  260. if method == "GET":
  261. response = requests.get(url, headers=headers, timeout=timeout)
  262. parse_body = True
  263. elif method == "POST":
  264. response = requests.post(url, headers=headers, json=data, timeout=timeout)
  265. parse_body = True
  266. elif method == "OPTIONS":
  267. response = requests.options(url, headers=headers, timeout=timeout)
  268. else:
  269. raise ValueError(f"Unimplemented method: {method}")
  270. result = ServerResponse()
  271. result.headers = dict(response.headers)
  272. result.status_code = response.status_code
  273. result.body = response.json() if parse_body else None
  274. print("Response from server", json.dumps(result.body, indent=2))
  275. return result
  276. def make_stream_request(
  277. self,
  278. method: str,
  279. path: str,
  280. data: dict | None = None,
  281. headers: dict | None = None,
  282. ) -> Iterator[dict]:
  283. url = f"http://{self.server_host}:{self.server_port}{path}"
  284. if method == "POST":
  285. response = requests.post(url, headers=headers, json=data, stream=True)
  286. else:
  287. raise ValueError(f"Unimplemented method: {method}")
  288. if response.status_code != 200:
  289. raise ServerError(response.status_code, response.json())
  290. for line_bytes in response.iter_lines():
  291. line = line_bytes.decode("utf-8")
  292. if '[DONE]' in line:
  293. break
  294. elif line.startswith('data: '):
  295. data = json.loads(line[6:])
  296. print("Partial response from server", json.dumps(data, indent=2))
  297. yield data
  298. def make_any_request(
  299. self,
  300. method: str,
  301. path: str,
  302. data: dict | None = None,
  303. headers: dict | None = None,
  304. timeout: float | None = None,
  305. ) -> dict:
  306. stream = data.get('stream', False)
  307. if stream:
  308. content: list[str] = []
  309. reasoning_content: list[str] = []
  310. tool_calls: list[dict] = []
  311. finish_reason: Optional[str] = None
  312. content_parts = 0
  313. reasoning_content_parts = 0
  314. tool_call_parts = 0
  315. arguments_parts = 0
  316. for chunk in self.make_stream_request(method, path, data, headers):
  317. if chunk['choices']:
  318. assert len(chunk['choices']) == 1, f'Expected 1 choice, got {len(chunk["choices"])}'
  319. choice = chunk['choices'][0]
  320. if choice['delta'].get('content') is not None:
  321. assert len(choice['delta']['content']) > 0, f'Expected non empty content delta!'
  322. content.append(choice['delta']['content'])
  323. content_parts += 1
  324. if choice['delta'].get('reasoning_content') is not None:
  325. assert len(choice['delta']['reasoning_content']) > 0, f'Expected non empty reasoning_content delta!'
  326. reasoning_content.append(choice['delta']['reasoning_content'])
  327. reasoning_content_parts += 1
  328. if choice['delta'].get('finish_reason') is not None:
  329. finish_reason = choice['delta']['finish_reason']
  330. for tc in choice['delta'].get('tool_calls', []):
  331. if 'function' not in tc:
  332. raise ValueError(f"Expected function type, got {tc['type']}")
  333. if tc['index'] >= len(tool_calls):
  334. assert 'id' in tc
  335. assert tc.get('type') == 'function'
  336. assert 'function' in tc and 'name' in tc['function'] and len(tc['function']['name']) > 0, \
  337. f"Expected function call with name, got {tc.get('function')}"
  338. tool_calls.append(dict(
  339. id="",
  340. type="function",
  341. function=dict(
  342. name="",
  343. arguments="",
  344. )
  345. ))
  346. tool_call = tool_calls[tc['index']]
  347. if tc.get('id') is not None:
  348. tool_call['id'] = tc['id']
  349. fct = tc['function']
  350. assert 'id' not in fct, f"Function call should not have id: {fct}"
  351. if fct.get('name') is not None:
  352. tool_call['function']['name'] = tool_call['function'].get('name', '') + fct['name']
  353. if fct.get('arguments') is not None:
  354. tool_call['function']['arguments'] += fct['arguments']
  355. arguments_parts += 1
  356. tool_call_parts += 1
  357. else:
  358. # When `include_usage` is True (the default), we expect the last chunk of the stream
  359. # immediately preceding the `data: [DONE]` message to contain a `choices` field with an empty array
  360. # and a `usage` field containing the usage statistics (n.b., llama-server also returns `timings` in
  361. # the last chunk)
  362. assert 'usage' in chunk, f"Expected finish_reason in chunk: {chunk}"
  363. assert 'timings' in chunk, f"Expected finish_reason in chunk: {chunk}"
  364. print(f'Streamed response had {content_parts} content parts, {reasoning_content_parts} reasoning_content parts, {tool_call_parts} tool call parts incl. {arguments_parts} arguments parts')
  365. result = dict(
  366. choices=[
  367. dict(
  368. index=0,
  369. finish_reason=finish_reason,
  370. message=dict(
  371. role='assistant',
  372. content=''.join(content) if content else None,
  373. reasoning_content=''.join(reasoning_content) if reasoning_content else None,
  374. tool_calls=tool_calls if tool_calls else None,
  375. ),
  376. )
  377. ],
  378. )
  379. print("Final response from server", json.dumps(result, indent=2))
  380. return result
  381. else:
  382. response = self.make_request(method, path, data, headers, timeout=timeout)
  383. assert response.status_code == 200, f"Server returned error: {response.status_code}"
  384. return response.body
  385. server_instances: Set[ServerProcess] = set()
  386. class ServerPreset:
  387. @staticmethod
  388. def load_all() -> None:
  389. """ Load all server presets to ensure model files are cached. """
  390. servers: List[ServerProcess] = [
  391. method()
  392. for name, method in ServerPreset.__dict__.items()
  393. if callable(method) and name != "load_all"
  394. ]
  395. for server in servers:
  396. server.offline = False
  397. server.start()
  398. server.stop()
  399. @staticmethod
  400. def tinyllama2() -> ServerProcess:
  401. server = ServerProcess()
  402. server.model_hf_repo = "ggml-org/models"
  403. server.model_hf_file = "tinyllamas/stories260K.gguf"
  404. server.model_alias = "tinyllama-2"
  405. server.n_ctx = 512
  406. server.n_batch = 32
  407. server.n_slots = 2
  408. server.n_predict = 64
  409. server.seed = 42
  410. return server
  411. @staticmethod
  412. def bert_bge_small() -> ServerProcess:
  413. server = ServerProcess()
  414. server.offline = True # will be downloaded by load_all()
  415. server.model_hf_repo = "ggml-org/models"
  416. server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
  417. server.model_alias = "bert-bge-small"
  418. server.n_ctx = 512
  419. server.n_batch = 128
  420. server.n_ubatch = 128
  421. server.n_slots = 2
  422. server.seed = 42
  423. server.server_embeddings = True
  424. return server
  425. @staticmethod
  426. def bert_bge_small_with_fa() -> ServerProcess:
  427. server = ServerProcess()
  428. server.offline = True # will be downloaded by load_all()
  429. server.model_hf_repo = "ggml-org/models"
  430. server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf"
  431. server.model_alias = "bert-bge-small"
  432. server.n_ctx = 1024
  433. server.n_batch = 300
  434. server.n_ubatch = 300
  435. server.n_slots = 2
  436. server.fa = "on"
  437. server.seed = 42
  438. server.server_embeddings = True
  439. return server
  440. @staticmethod
  441. def tinyllama_infill() -> ServerProcess:
  442. server = ServerProcess()
  443. server.offline = True # will be downloaded by load_all()
  444. server.model_hf_repo = "ggml-org/models"
  445. server.model_hf_file = "tinyllamas/stories260K-infill.gguf"
  446. server.model_alias = "tinyllama-infill"
  447. server.n_ctx = 2048
  448. server.n_batch = 1024
  449. server.n_slots = 1
  450. server.n_predict = 64
  451. server.temperature = 0.0
  452. server.seed = 42
  453. return server
  454. @staticmethod
  455. def stories15m_moe() -> ServerProcess:
  456. server = ServerProcess()
  457. server.offline = True # will be downloaded by load_all()
  458. server.model_hf_repo = "ggml-org/stories15M_MOE"
  459. server.model_hf_file = "stories15M_MOE-F16.gguf"
  460. server.model_alias = "stories15m-moe"
  461. server.n_ctx = 2048
  462. server.n_batch = 1024
  463. server.n_slots = 1
  464. server.n_predict = 64
  465. server.temperature = 0.0
  466. server.seed = 42
  467. return server
  468. @staticmethod
  469. def jina_reranker_tiny() -> ServerProcess:
  470. server = ServerProcess()
  471. server.offline = True # will be downloaded by load_all()
  472. server.model_hf_repo = "ggml-org/models"
  473. server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf"
  474. server.model_alias = "jina-reranker"
  475. server.n_ctx = 512
  476. server.n_batch = 512
  477. server.n_slots = 1
  478. server.seed = 42
  479. server.server_reranking = True
  480. return server
  481. @staticmethod
  482. def tinygemma3() -> ServerProcess:
  483. server = ServerProcess()
  484. server.offline = True # will be downloaded by load_all()
  485. # mmproj is already provided by HF registry API
  486. server.model_hf_repo = "ggml-org/tinygemma3-GGUF"
  487. server.model_hf_file = "tinygemma3-Q8_0.gguf"
  488. server.mmproj_url = "https://huggingface.co/ggml-org/tinygemma3-GGUF/resolve/main/mmproj-tinygemma3.gguf"
  489. server.model_alias = "tinygemma3"
  490. server.n_ctx = 1024
  491. server.n_batch = 32
  492. server.n_slots = 2
  493. server.n_predict = 4
  494. server.seed = 42
  495. return server
  496. def parallel_function_calls(function_list: List[Tuple[Callable[..., Any], Tuple[Any, ...]]]) -> List[Any]:
  497. """
  498. Run multiple functions in parallel and return results in the same order as calls. Equivalent to Promise.all in JS.
  499. Example usage:
  500. results = parallel_function_calls([
  501. (func1, (arg1, arg2)),
  502. (func2, (arg3, arg4)),
  503. ])
  504. """
  505. results = [None] * len(function_list)
  506. exceptions = []
  507. def worker(index, func, args):
  508. try:
  509. result = func(*args)
  510. results[index] = result
  511. except Exception as e:
  512. exceptions.append((index, str(e)))
  513. with ThreadPoolExecutor() as executor:
  514. futures = []
  515. for i, (func, args) in enumerate(function_list):
  516. future = executor.submit(worker, i, func, args)
  517. futures.append(future)
  518. # Wait for all futures to complete
  519. for future in as_completed(futures):
  520. pass
  521. # Check if there were any exceptions
  522. if exceptions:
  523. print("Exceptions occurred:")
  524. for index, error in exceptions:
  525. print(f"Function at index {index}: {error}")
  526. return results
  527. def match_regex(regex: str, text: str) -> bool:
  528. return (
  529. re.compile(
  530. regex, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL
  531. ).search(text)
  532. is not None
  533. )
  534. def download_file(url: str, output_file_path: str | None = None) -> str:
  535. """
  536. Download a file from a URL to a local path. If the file already exists, it will not be downloaded again.
  537. output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory.
  538. Returns the local path of the downloaded file.
  539. """
  540. file_name = url.split('/').pop()
  541. output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path
  542. if not os.path.exists(output_file):
  543. print(f"Downloading {url} to {output_file}")
  544. wget.download(url, out=output_file)
  545. print(f"Done downloading to {output_file}")
  546. else:
  547. print(f"File already exists at {output_file}")
  548. return output_file
  549. def is_slow_test_allowed():
  550. return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON"