test_completion.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. import pytest
  2. import requests
  3. import time
  4. from openai import OpenAI
  5. from utils import *
  6. server = ServerPreset.tinyllama2()
  7. @pytest.fixture(scope="module", autouse=True)
  8. def create_server():
  9. global server
  10. server = ServerPreset.tinyllama2()
  11. @pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated,return_tokens", [
  12. ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False, False),
  13. ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False, True),
  14. ])
  15. def test_completion(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool, return_tokens: bool):
  16. global server
  17. server.start()
  18. res = server.make_request("POST", "/completion", data={
  19. "n_predict": n_predict,
  20. "prompt": prompt,
  21. "return_tokens": return_tokens,
  22. })
  23. assert res.status_code == 200
  24. assert res.body["timings"]["prompt_n"] == n_prompt
  25. assert res.body["timings"]["predicted_n"] == n_predicted
  26. assert res.body["truncated"] == truncated
  27. assert type(res.body["has_new_line"]) == bool
  28. assert match_regex(re_content, res.body["content"])
  29. if return_tokens:
  30. assert len(res.body["tokens"]) > 0
  31. assert all(type(tok) == int for tok in res.body["tokens"])
  32. else:
  33. assert res.body["tokens"] == []
  34. @pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [
  35. ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False),
  36. ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False),
  37. ])
  38. def test_completion_stream(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool):
  39. global server
  40. server.start()
  41. res = server.make_stream_request("POST", "/completion", data={
  42. "n_predict": n_predict,
  43. "prompt": prompt,
  44. "stream": True,
  45. })
  46. content = ""
  47. for data in res:
  48. assert "stop" in data and type(data["stop"]) == bool
  49. if data["stop"]:
  50. assert data["timings"]["prompt_n"] == n_prompt
  51. assert data["timings"]["predicted_n"] == n_predicted
  52. assert data["truncated"] == truncated
  53. assert data["stop_type"] == "limit"
  54. assert type(data["has_new_line"]) == bool
  55. assert "generation_settings" in data
  56. assert server.n_predict is not None
  57. assert data["generation_settings"]["n_predict"] == min(n_predict, server.n_predict)
  58. assert data["generation_settings"]["seed"] == server.seed
  59. assert match_regex(re_content, content)
  60. else:
  61. assert len(data["tokens"]) > 0
  62. assert all(type(tok) == int for tok in data["tokens"])
  63. content += data["content"]
  64. def test_completion_stream_vs_non_stream():
  65. global server
  66. server.start()
  67. res_stream = server.make_stream_request("POST", "/completion", data={
  68. "n_predict": 8,
  69. "prompt": "I believe the meaning of life is",
  70. "stream": True,
  71. })
  72. res_non_stream = server.make_request("POST", "/completion", data={
  73. "n_predict": 8,
  74. "prompt": "I believe the meaning of life is",
  75. })
  76. content_stream = ""
  77. for data in res_stream:
  78. content_stream += data["content"]
  79. assert content_stream == res_non_stream.body["content"]
  80. def test_completion_with_openai_library():
  81. global server
  82. server.start()
  83. client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
  84. res = client.completions.create(
  85. model="davinci-002",
  86. prompt="I believe the meaning of life is",
  87. max_tokens=8,
  88. )
  89. assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b")
  90. assert res.choices[0].finish_reason == "length"
  91. assert res.choices[0].text is not None
  92. assert match_regex("(going|bed)+", res.choices[0].text)
  93. def test_completion_stream_with_openai_library():
  94. global server
  95. server.start()
  96. client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
  97. res = client.completions.create(
  98. model="davinci-002",
  99. prompt="I believe the meaning of life is",
  100. max_tokens=8,
  101. stream=True,
  102. )
  103. output_text = ''
  104. for data in res:
  105. choice = data.choices[0]
  106. if choice.finish_reason is None:
  107. assert choice.text is not None
  108. output_text += choice.text
  109. assert match_regex("(going|bed)+", output_text)
  110. @pytest.mark.parametrize("n_slots", [1, 2])
  111. def test_consistent_result_same_seed(n_slots: int):
  112. global server
  113. server.n_slots = n_slots
  114. server.start()
  115. last_res = None
  116. for _ in range(4):
  117. res = server.make_request("POST", "/completion", data={
  118. "prompt": "I believe the meaning of life is",
  119. "seed": 42,
  120. "temperature": 0.0,
  121. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  122. })
  123. if last_res is not None:
  124. assert res.body["content"] == last_res.body["content"]
  125. last_res = res
  126. @pytest.mark.parametrize("n_slots", [1, 2])
  127. def test_different_result_different_seed(n_slots: int):
  128. global server
  129. server.n_slots = n_slots
  130. server.start()
  131. last_res = None
  132. for seed in range(4):
  133. res = server.make_request("POST", "/completion", data={
  134. "prompt": "I believe the meaning of life is",
  135. "seed": seed,
  136. "temperature": 1.0,
  137. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  138. })
  139. if last_res is not None:
  140. assert res.body["content"] != last_res.body["content"]
  141. last_res = res
  142. # TODO figure why it don't work with temperature = 1
  143. # @pytest.mark.parametrize("temperature", [0.0, 1.0])
  144. @pytest.mark.parametrize("n_batch", [16, 32])
  145. @pytest.mark.parametrize("temperature", [0.0])
  146. def test_consistent_result_different_batch_size(n_batch: int, temperature: float):
  147. global server
  148. server.n_batch = n_batch
  149. server.start()
  150. last_res = None
  151. for _ in range(4):
  152. res = server.make_request("POST", "/completion", data={
  153. "prompt": "I believe the meaning of life is",
  154. "seed": 42,
  155. "temperature": temperature,
  156. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  157. })
  158. if last_res is not None:
  159. assert res.body["content"] == last_res.body["content"]
  160. last_res = res
  161. @pytest.mark.skip(reason="This test fails on linux, need to be fixed")
  162. def test_cache_vs_nocache_prompt():
  163. global server
  164. server.start()
  165. res_cache = server.make_request("POST", "/completion", data={
  166. "prompt": "I believe the meaning of life is",
  167. "seed": 42,
  168. "temperature": 1.0,
  169. "cache_prompt": True,
  170. })
  171. res_no_cache = server.make_request("POST", "/completion", data={
  172. "prompt": "I believe the meaning of life is",
  173. "seed": 42,
  174. "temperature": 1.0,
  175. "cache_prompt": False,
  176. })
  177. assert res_cache.body["content"] == res_no_cache.body["content"]
  178. def test_nocache_long_input_prompt():
  179. global server
  180. server.start()
  181. res = server.make_request("POST", "/completion", data={
  182. "prompt": "I believe the meaning of life is"*32,
  183. "seed": 42,
  184. "temperature": 1.0,
  185. "cache_prompt": False,
  186. })
  187. assert res.status_code == 200
  188. def test_completion_with_tokens_input():
  189. global server
  190. server.temperature = 0.0
  191. server.start()
  192. prompt_str = "I believe the meaning of life is"
  193. res = server.make_request("POST", "/tokenize", data={
  194. "content": prompt_str,
  195. "add_special": True,
  196. })
  197. assert res.status_code == 200
  198. tokens = res.body["tokens"]
  199. # single completion
  200. res = server.make_request("POST", "/completion", data={
  201. "prompt": tokens,
  202. })
  203. assert res.status_code == 200
  204. assert type(res.body["content"]) == str
  205. # batch completion
  206. res = server.make_request("POST", "/completion", data={
  207. "prompt": [tokens, tokens],
  208. })
  209. assert res.status_code == 200
  210. assert type(res.body) == list
  211. assert len(res.body) == 2
  212. assert res.body[0]["content"] == res.body[1]["content"]
  213. # mixed string and tokens
  214. res = server.make_request("POST", "/completion", data={
  215. "prompt": [tokens, prompt_str],
  216. })
  217. assert res.status_code == 200
  218. assert type(res.body) == list
  219. assert len(res.body) == 2
  220. assert res.body[0]["content"] == res.body[1]["content"]
  221. # mixed string and tokens in one sequence
  222. res = server.make_request("POST", "/completion", data={
  223. "prompt": [1, 2, 3, 4, 5, 6, prompt_str, 7, 8, 9, 10, prompt_str],
  224. })
  225. assert res.status_code == 200
  226. assert type(res.body["content"]) == str
  227. @pytest.mark.parametrize("n_slots,n_requests", [
  228. (1, 3),
  229. (2, 2),
  230. (2, 4),
  231. (4, 2), # some slots must be idle
  232. (4, 6),
  233. ])
  234. def test_completion_parallel_slots(n_slots: int, n_requests: int):
  235. global server
  236. server.n_slots = n_slots
  237. server.temperature = 0.0
  238. server.start()
  239. PROMPTS = [
  240. ("Write a very long book.", "(very|special|big)+"),
  241. ("Write another a poem.", "(small|house)+"),
  242. ("What is LLM?", "(Dad|said)+"),
  243. ("The sky is blue and I love it.", "(climb|leaf)+"),
  244. ("Write another very long music lyrics.", "(friends|step|sky)+"),
  245. ("Write a very long joke.", "(cat|Whiskers)+"),
  246. ]
  247. def check_slots_status():
  248. should_all_slots_busy = n_requests >= n_slots
  249. time.sleep(0.1)
  250. res = server.make_request("GET", "/slots")
  251. n_busy = sum([1 for slot in res.body if slot["is_processing"]])
  252. if should_all_slots_busy:
  253. assert n_busy == n_slots
  254. else:
  255. assert n_busy <= n_slots
  256. tasks = []
  257. for i in range(n_requests):
  258. prompt, re_content = PROMPTS[i % len(PROMPTS)]
  259. tasks.append((server.make_request, ("POST", "/completion", {
  260. "prompt": prompt,
  261. "seed": 42,
  262. "temperature": 1.0,
  263. })))
  264. tasks.append((check_slots_status, ()))
  265. results = parallel_function_calls(tasks)
  266. # check results
  267. for i in range(n_requests):
  268. prompt, re_content = PROMPTS[i % len(PROMPTS)]
  269. res = results[i]
  270. assert res.status_code == 200
  271. assert type(res.body["content"]) == str
  272. assert len(res.body["content"]) > 10
  273. # FIXME: the result is not deterministic when using other slot than slot 0
  274. # assert match_regex(re_content, res.body["content"])
  275. @pytest.mark.parametrize(
  276. "prompt,n_predict,response_fields",
  277. [
  278. ("I believe the meaning of life is", 8, []),
  279. ("I believe the meaning of life is", 32, ["content", "generation_settings/n_predict", "prompt"]),
  280. ],
  281. )
  282. def test_completion_response_fields(
  283. prompt: str, n_predict: int, response_fields: list[str]
  284. ):
  285. global server
  286. server.start()
  287. res = server.make_request(
  288. "POST",
  289. "/completion",
  290. data={
  291. "n_predict": n_predict,
  292. "prompt": prompt,
  293. "response_fields": response_fields,
  294. },
  295. )
  296. assert res.status_code == 200
  297. assert "content" in res.body
  298. assert len(res.body["content"])
  299. if len(response_fields):
  300. assert res.body["generation_settings/n_predict"] == n_predict
  301. assert res.body["prompt"] == "<s> " + prompt
  302. assert isinstance(res.body["content"], str)
  303. assert len(res.body) == len(response_fields)
  304. else:
  305. assert len(res.body)
  306. assert "generation_settings" in res.body
  307. def test_n_probs():
  308. global server
  309. server.start()
  310. res = server.make_request("POST", "/completion", data={
  311. "prompt": "I believe the meaning of life is",
  312. "n_probs": 10,
  313. "temperature": 0.0,
  314. "n_predict": 5,
  315. })
  316. assert res.status_code == 200
  317. assert "completion_probabilities" in res.body
  318. assert len(res.body["completion_probabilities"]) == 5
  319. for tok in res.body["completion_probabilities"]:
  320. assert "id" in tok and tok["id"] > 0
  321. assert "token" in tok and type(tok["token"]) == str
  322. assert "logprob" in tok and tok["logprob"] <= 0.0
  323. assert "bytes" in tok and type(tok["bytes"]) == list
  324. assert len(tok["top_logprobs"]) == 10
  325. for prob in tok["top_logprobs"]:
  326. assert "id" in prob and prob["id"] > 0
  327. assert "token" in prob and type(prob["token"]) == str
  328. assert "logprob" in prob and prob["logprob"] <= 0.0
  329. assert "bytes" in prob and type(prob["bytes"]) == list
  330. def test_n_probs_stream():
  331. global server
  332. server.start()
  333. res = server.make_stream_request("POST", "/completion", data={
  334. "prompt": "I believe the meaning of life is",
  335. "n_probs": 10,
  336. "temperature": 0.0,
  337. "n_predict": 5,
  338. "stream": True,
  339. })
  340. for data in res:
  341. if data["stop"] == False:
  342. assert "completion_probabilities" in data
  343. assert len(data["completion_probabilities"]) == 1
  344. for tok in data["completion_probabilities"]:
  345. assert "id" in tok and tok["id"] > 0
  346. assert "token" in tok and type(tok["token"]) == str
  347. assert "logprob" in tok and tok["logprob"] <= 0.0
  348. assert "bytes" in tok and type(tok["bytes"]) == list
  349. assert len(tok["top_logprobs"]) == 10
  350. for prob in tok["top_logprobs"]:
  351. assert "id" in prob and prob["id"] > 0
  352. assert "token" in prob and type(prob["token"]) == str
  353. assert "logprob" in prob and prob["logprob"] <= 0.0
  354. assert "bytes" in prob and type(prob["bytes"]) == list
  355. def test_n_probs_post_sampling():
  356. global server
  357. server.start()
  358. res = server.make_request("POST", "/completion", data={
  359. "prompt": "I believe the meaning of life is",
  360. "n_probs": 10,
  361. "temperature": 0.0,
  362. "n_predict": 5,
  363. "post_sampling_probs": True,
  364. })
  365. assert res.status_code == 200
  366. assert "completion_probabilities" in res.body
  367. assert len(res.body["completion_probabilities"]) == 5
  368. for tok in res.body["completion_probabilities"]:
  369. assert "id" in tok and tok["id"] > 0
  370. assert "token" in tok and type(tok["token"]) == str
  371. assert "prob" in tok and 0.0 < tok["prob"] <= 1.0
  372. assert "bytes" in tok and type(tok["bytes"]) == list
  373. assert len(tok["top_probs"]) == 10
  374. for prob in tok["top_probs"]:
  375. assert "id" in prob and prob["id"] > 0
  376. assert "token" in prob and type(prob["token"]) == str
  377. assert "prob" in prob and 0.0 <= prob["prob"] <= 1.0
  378. assert "bytes" in prob and type(prob["bytes"]) == list
  379. # because the test model usually output token with either 100% or 0% probability, we need to check all the top_probs
  380. assert any(prob["prob"] == 1.0 for prob in tok["top_probs"])
  381. def test_cancel_request():
  382. global server
  383. server.n_ctx = 4096
  384. server.n_predict = -1
  385. server.n_slots = 1
  386. server.server_slots = True
  387. server.start()
  388. # send a request that will take a long time, but cancel it before it finishes
  389. try:
  390. server.make_request("POST", "/completion", data={
  391. "prompt": "I believe the meaning of life is",
  392. }, timeout=0.1)
  393. except requests.exceptions.ReadTimeout:
  394. pass # expected
  395. # make sure the slot is free
  396. time.sleep(1) # wait for HTTP_POLLING_SECONDS
  397. res = server.make_request("GET", "/slots")
  398. assert res.body[0]["is_processing"] == False