test_completion.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. import pytest
  2. import time
  3. from utils import *
  4. server = ServerPreset.tinyllama2()
  5. @pytest.fixture(scope="module", autouse=True)
  6. def create_server():
  7. global server
  8. server = ServerPreset.tinyllama2()
  9. @pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [
  10. ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False),
  11. ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False),
  12. ])
  13. def test_completion(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool):
  14. global server
  15. server.start()
  16. res = server.make_request("POST", "/completion", data={
  17. "n_predict": n_predict,
  18. "prompt": prompt,
  19. })
  20. assert res.status_code == 200
  21. assert res.body["timings"]["prompt_n"] == n_prompt
  22. assert res.body["timings"]["predicted_n"] == n_predicted
  23. assert res.body["truncated"] == truncated
  24. assert type(res.body["has_new_line"]) == bool
  25. assert match_regex(re_content, res.body["content"])
  26. @pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [
  27. ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False),
  28. ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False),
  29. ])
  30. def test_completion_stream(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool):
  31. global server
  32. server.start()
  33. res = server.make_stream_request("POST", "/completion", data={
  34. "n_predict": n_predict,
  35. "prompt": prompt,
  36. "stream": True,
  37. })
  38. content = ""
  39. for data in res:
  40. assert "stop" in data and type(data["stop"]) == bool
  41. if data["stop"]:
  42. assert data["timings"]["prompt_n"] == n_prompt
  43. assert data["timings"]["predicted_n"] == n_predicted
  44. assert data["truncated"] == truncated
  45. assert data["stop_type"] == "limit"
  46. assert type(data["has_new_line"]) == bool
  47. assert "generation_settings" in data
  48. assert server.n_predict is not None
  49. assert data["generation_settings"]["n_predict"] == min(n_predict, server.n_predict)
  50. assert data["generation_settings"]["seed"] == server.seed
  51. assert match_regex(re_content, content)
  52. else:
  53. content += data["content"]
  54. def test_completion_stream_vs_non_stream():
  55. global server
  56. server.start()
  57. res_stream = server.make_stream_request("POST", "/completion", data={
  58. "n_predict": 8,
  59. "prompt": "I believe the meaning of life is",
  60. "stream": True,
  61. })
  62. res_non_stream = server.make_request("POST", "/completion", data={
  63. "n_predict": 8,
  64. "prompt": "I believe the meaning of life is",
  65. })
  66. content_stream = ""
  67. for data in res_stream:
  68. content_stream += data["content"]
  69. assert content_stream == res_non_stream.body["content"]
  70. @pytest.mark.parametrize("n_slots", [1, 2])
  71. def test_consistent_result_same_seed(n_slots: int):
  72. global server
  73. server.n_slots = n_slots
  74. server.start()
  75. last_res = None
  76. for _ in range(4):
  77. res = server.make_request("POST", "/completion", data={
  78. "prompt": "I believe the meaning of life is",
  79. "seed": 42,
  80. "temperature": 1.0,
  81. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  82. })
  83. if last_res is not None:
  84. assert res.body["content"] == last_res.body["content"]
  85. last_res = res
  86. @pytest.mark.parametrize("n_slots", [1, 2])
  87. def test_different_result_different_seed(n_slots: int):
  88. global server
  89. server.n_slots = n_slots
  90. server.start()
  91. last_res = None
  92. for seed in range(4):
  93. res = server.make_request("POST", "/completion", data={
  94. "prompt": "I believe the meaning of life is",
  95. "seed": seed,
  96. "temperature": 1.0,
  97. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  98. })
  99. if last_res is not None:
  100. assert res.body["content"] != last_res.body["content"]
  101. last_res = res
  102. @pytest.mark.parametrize("n_batch", [16, 32])
  103. @pytest.mark.parametrize("temperature", [0.0, 1.0])
  104. def test_consistent_result_different_batch_size(n_batch: int, temperature: float):
  105. global server
  106. server.n_batch = n_batch
  107. server.start()
  108. last_res = None
  109. for _ in range(4):
  110. res = server.make_request("POST", "/completion", data={
  111. "prompt": "I believe the meaning of life is",
  112. "seed": 42,
  113. "temperature": temperature,
  114. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  115. })
  116. if last_res is not None:
  117. assert res.body["content"] == last_res.body["content"]
  118. last_res = res
  119. @pytest.mark.skip(reason="This test fails on linux, need to be fixed")
  120. def test_cache_vs_nocache_prompt():
  121. global server
  122. server.start()
  123. res_cache = server.make_request("POST", "/completion", data={
  124. "prompt": "I believe the meaning of life is",
  125. "seed": 42,
  126. "temperature": 1.0,
  127. "cache_prompt": True,
  128. })
  129. res_no_cache = server.make_request("POST", "/completion", data={
  130. "prompt": "I believe the meaning of life is",
  131. "seed": 42,
  132. "temperature": 1.0,
  133. "cache_prompt": False,
  134. })
  135. assert res_cache.body["content"] == res_no_cache.body["content"]
  136. def test_completion_with_tokens_input():
  137. global server
  138. server.temperature = 0.0
  139. server.start()
  140. prompt_str = "I believe the meaning of life is"
  141. res = server.make_request("POST", "/tokenize", data={
  142. "content": prompt_str,
  143. "add_special": True,
  144. })
  145. assert res.status_code == 200
  146. tokens = res.body["tokens"]
  147. # single completion
  148. res = server.make_request("POST", "/completion", data={
  149. "prompt": tokens,
  150. })
  151. assert res.status_code == 200
  152. assert type(res.body["content"]) == str
  153. # batch completion
  154. res = server.make_request("POST", "/completion", data={
  155. "prompt": [tokens, tokens],
  156. })
  157. assert res.status_code == 200
  158. assert type(res.body) == list
  159. assert len(res.body) == 2
  160. assert res.body[0]["content"] == res.body[1]["content"]
  161. # mixed string and tokens
  162. res = server.make_request("POST", "/completion", data={
  163. "prompt": [tokens, prompt_str],
  164. })
  165. assert res.status_code == 200
  166. assert type(res.body) == list
  167. assert len(res.body) == 2
  168. assert res.body[0]["content"] == res.body[1]["content"]
  169. # mixed string and tokens in one sequence
  170. res = server.make_request("POST", "/completion", data={
  171. "prompt": [1, 2, 3, 4, 5, 6, prompt_str, 7, 8, 9, 10, prompt_str],
  172. })
  173. assert res.status_code == 200
  174. assert type(res.body["content"]) == str
  175. @pytest.mark.parametrize("n_slots,n_requests", [
  176. (1, 3),
  177. (2, 2),
  178. (2, 4),
  179. (4, 2), # some slots must be idle
  180. (4, 6),
  181. ])
  182. def test_completion_parallel_slots(n_slots: int, n_requests: int):
  183. global server
  184. server.n_slots = n_slots
  185. server.temperature = 0.0
  186. server.start()
  187. PROMPTS = [
  188. ("Write a very long book.", "(very|special|big)+"),
  189. ("Write another a poem.", "(small|house)+"),
  190. ("What is LLM?", "(Dad|said)+"),
  191. ("The sky is blue and I love it.", "(climb|leaf)+"),
  192. ("Write another very long music lyrics.", "(friends|step|sky)+"),
  193. ("Write a very long joke.", "(cat|Whiskers)+"),
  194. ]
  195. def check_slots_status():
  196. should_all_slots_busy = n_requests >= n_slots
  197. time.sleep(0.1)
  198. res = server.make_request("GET", "/slots")
  199. n_busy = sum([1 for slot in res.body if slot["is_processing"]])
  200. if should_all_slots_busy:
  201. assert n_busy == n_slots
  202. else:
  203. assert n_busy <= n_slots
  204. tasks = []
  205. for i in range(n_requests):
  206. prompt, re_content = PROMPTS[i % len(PROMPTS)]
  207. tasks.append((server.make_request, ("POST", "/completion", {
  208. "prompt": prompt,
  209. "seed": 42,
  210. "temperature": 1.0,
  211. })))
  212. tasks.append((check_slots_status, ()))
  213. results = parallel_function_calls(tasks)
  214. # check results
  215. for i in range(n_requests):
  216. prompt, re_content = PROMPTS[i % len(PROMPTS)]
  217. res = results[i]
  218. assert res.status_code == 200
  219. assert type(res.body["content"]) == str
  220. assert len(res.body["content"]) > 10
  221. # FIXME: the result is not deterministic when using other slot than slot 0
  222. # assert match_regex(re_content, res.body["content"])
  223. def test_n_probs():
  224. global server
  225. server.start()
  226. res = server.make_request("POST", "/completion", data={
  227. "prompt": "I believe the meaning of life is",
  228. "n_probs": 10,
  229. "temperature": 0.0,
  230. "n_predict": 5,
  231. })
  232. assert res.status_code == 200
  233. assert "completion_probabilities" in res.body
  234. assert len(res.body["completion_probabilities"]) == 5
  235. for tok in res.body["completion_probabilities"]:
  236. assert "probs" in tok
  237. assert len(tok["probs"]) == 10
  238. for prob in tok["probs"]:
  239. assert "prob" in prob
  240. assert "tok_str" in prob
  241. assert 0.0 <= prob["prob"] <= 1.0