test_completion.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. import pytest
  2. import time
  3. from utils import *
  4. server = ServerPreset.tinyllama2()
  5. @pytest.fixture(scope="module", autouse=True)
  6. def create_server():
  7. global server
  8. server = ServerPreset.tinyllama2()
  9. @pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [
  10. ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False),
  11. ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False),
  12. ])
  13. def test_completion(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool):
  14. global server
  15. server.start()
  16. res = server.make_request("POST", "/completion", data={
  17. "n_predict": n_predict,
  18. "prompt": prompt,
  19. })
  20. assert res.status_code == 200
  21. assert res.body["timings"]["prompt_n"] == n_prompt
  22. assert res.body["timings"]["predicted_n"] == n_predicted
  23. assert res.body["truncated"] == truncated
  24. assert match_regex(re_content, res.body["content"])
  25. @pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [
  26. ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False),
  27. ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False),
  28. ])
  29. def test_completion_stream(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool):
  30. global server
  31. server.start()
  32. res = server.make_stream_request("POST", "/completion", data={
  33. "n_predict": n_predict,
  34. "prompt": prompt,
  35. "stream": True,
  36. })
  37. content = ""
  38. for data in res:
  39. if data["stop"]:
  40. assert data["timings"]["prompt_n"] == n_prompt
  41. assert data["timings"]["predicted_n"] == n_predicted
  42. assert data["truncated"] == truncated
  43. assert match_regex(re_content, content)
  44. else:
  45. content += data["content"]
  46. @pytest.mark.parametrize("n_slots", [1, 2])
  47. def test_consistent_result_same_seed(n_slots: int):
  48. global server
  49. server.n_slots = n_slots
  50. server.start()
  51. last_res = None
  52. for _ in range(4):
  53. res = server.make_request("POST", "/completion", data={
  54. "prompt": "I believe the meaning of life is",
  55. "seed": 42,
  56. "temperature": 1.0,
  57. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  58. })
  59. if last_res is not None:
  60. assert res.body["content"] == last_res.body["content"]
  61. last_res = res
  62. @pytest.mark.parametrize("n_slots", [1, 2])
  63. def test_different_result_different_seed(n_slots: int):
  64. global server
  65. server.n_slots = n_slots
  66. server.start()
  67. last_res = None
  68. for seed in range(4):
  69. res = server.make_request("POST", "/completion", data={
  70. "prompt": "I believe the meaning of life is",
  71. "seed": seed,
  72. "temperature": 1.0,
  73. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  74. })
  75. if last_res is not None:
  76. assert res.body["content"] != last_res.body["content"]
  77. last_res = res
  78. @pytest.mark.parametrize("n_batch", [16, 32])
  79. @pytest.mark.parametrize("temperature", [0.0, 1.0])
  80. def test_consistent_result_different_batch_size(n_batch: int, temperature: float):
  81. global server
  82. server.n_batch = n_batch
  83. server.start()
  84. last_res = None
  85. for _ in range(4):
  86. res = server.make_request("POST", "/completion", data={
  87. "prompt": "I believe the meaning of life is",
  88. "seed": 42,
  89. "temperature": temperature,
  90. "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed
  91. })
  92. if last_res is not None:
  93. assert res.body["content"] == last_res.body["content"]
  94. last_res = res
  95. @pytest.mark.skip(reason="This test fails on linux, need to be fixed")
  96. def test_cache_vs_nocache_prompt():
  97. global server
  98. server.start()
  99. res_cache = server.make_request("POST", "/completion", data={
  100. "prompt": "I believe the meaning of life is",
  101. "seed": 42,
  102. "temperature": 1.0,
  103. "cache_prompt": True,
  104. })
  105. res_no_cache = server.make_request("POST", "/completion", data={
  106. "prompt": "I believe the meaning of life is",
  107. "seed": 42,
  108. "temperature": 1.0,
  109. "cache_prompt": False,
  110. })
  111. assert res_cache.body["content"] == res_no_cache.body["content"]
  112. def test_completion_with_tokens_input():
  113. global server
  114. server.temperature = 0.0
  115. server.start()
  116. prompt_str = "I believe the meaning of life is"
  117. res = server.make_request("POST", "/tokenize", data={
  118. "content": prompt_str,
  119. "add_special": True,
  120. })
  121. assert res.status_code == 200
  122. tokens = res.body["tokens"]
  123. # single completion
  124. res = server.make_request("POST", "/completion", data={
  125. "prompt": tokens,
  126. })
  127. assert res.status_code == 200
  128. assert type(res.body["content"]) == str
  129. # batch completion
  130. res = server.make_request("POST", "/completion", data={
  131. "prompt": [tokens, tokens],
  132. })
  133. assert res.status_code == 200
  134. assert type(res.body) == list
  135. assert len(res.body) == 2
  136. assert res.body[0]["content"] == res.body[1]["content"]
  137. # mixed string and tokens
  138. res = server.make_request("POST", "/completion", data={
  139. "prompt": [tokens, prompt_str],
  140. })
  141. assert res.status_code == 200
  142. assert type(res.body) == list
  143. assert len(res.body) == 2
  144. assert res.body[0]["content"] == res.body[1]["content"]
  145. # mixed string and tokens in one sequence
  146. res = server.make_request("POST", "/completion", data={
  147. "prompt": [1, 2, 3, 4, 5, 6, prompt_str, 7, 8, 9, 10, prompt_str],
  148. })
  149. assert res.status_code == 200
  150. assert type(res.body["content"]) == str
  151. @pytest.mark.parametrize("n_slots,n_requests", [
  152. (1, 3),
  153. (2, 2),
  154. (2, 4),
  155. (4, 2), # some slots must be idle
  156. (4, 6),
  157. ])
  158. def test_completion_parallel_slots(n_slots: int, n_requests: int):
  159. global server
  160. server.n_slots = n_slots
  161. server.temperature = 0.0
  162. server.start()
  163. PROMPTS = [
  164. ("Write a very long book.", "(very|special|big)+"),
  165. ("Write another a poem.", "(small|house)+"),
  166. ("What is LLM?", "(Dad|said)+"),
  167. ("The sky is blue and I love it.", "(climb|leaf)+"),
  168. ("Write another very long music lyrics.", "(friends|step|sky)+"),
  169. ("Write a very long joke.", "(cat|Whiskers)+"),
  170. ]
  171. def check_slots_status():
  172. should_all_slots_busy = n_requests >= n_slots
  173. time.sleep(0.1)
  174. res = server.make_request("GET", "/slots")
  175. n_busy = sum([1 for slot in res.body if slot["is_processing"]])
  176. if should_all_slots_busy:
  177. assert n_busy == n_slots
  178. else:
  179. assert n_busy <= n_slots
  180. tasks = []
  181. for i in range(n_requests):
  182. prompt, re_content = PROMPTS[i % len(PROMPTS)]
  183. tasks.append((server.make_request, ("POST", "/completion", {
  184. "prompt": prompt,
  185. "seed": 42,
  186. "temperature": 1.0,
  187. })))
  188. tasks.append((check_slots_status, ()))
  189. results = parallel_function_calls(tasks)
  190. # check results
  191. for i in range(n_requests):
  192. prompt, re_content = PROMPTS[i % len(PROMPTS)]
  193. res = results[i]
  194. assert res.status_code == 200
  195. assert type(res.body["content"]) == str
  196. assert len(res.body["content"]) > 10
  197. # FIXME: the result is not deterministic when using other slot than slot 0
  198. # assert match_regex(re_content, res.body["content"])