test_embedding.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. import pytest
  2. from openai import OpenAI
  3. from utils import *
  4. server = ServerPreset.bert_bge_small()
  5. EPSILON = 1e-3
  6. @pytest.fixture(scope="module", autouse=True)
  7. def create_server():
  8. global server
  9. server = ServerPreset.bert_bge_small()
  10. def test_embedding_single():
  11. global server
  12. server.pooling = 'last'
  13. server.start()
  14. res = server.make_request("POST", "/v1/embeddings", data={
  15. "input": "I believe the meaning of life is",
  16. })
  17. assert res.status_code == 200
  18. assert len(res.body['data']) == 1
  19. assert 'embedding' in res.body['data'][0]
  20. assert len(res.body['data'][0]['embedding']) > 1
  21. # make sure embedding vector is normalized
  22. assert abs(sum([x ** 2 for x in res.body['data'][0]['embedding']]) - 1) < EPSILON
  23. def test_embedding_multiple():
  24. global server
  25. server.pooling = 'last'
  26. server.start()
  27. res = server.make_request("POST", "/v1/embeddings", data={
  28. "input": [
  29. "I believe the meaning of life is",
  30. "Write a joke about AI from a very long prompt which will not be truncated",
  31. "This is a test",
  32. "This is another test",
  33. ],
  34. })
  35. assert res.status_code == 200
  36. assert len(res.body['data']) == 4
  37. for d in res.body['data']:
  38. assert 'embedding' in d
  39. assert len(d['embedding']) > 1
  40. @pytest.mark.parametrize(
  41. "input,is_multi_prompt",
  42. [
  43. # single prompt
  44. ("string", False),
  45. ([12, 34, 56], False),
  46. ([12, 34, "string", 56, 78], False),
  47. # multiple prompts
  48. (["string1", "string2"], True),
  49. (["string1", [12, 34, 56]], True),
  50. ([[12, 34, 56], [12, 34, 56]], True),
  51. ([[12, 34, 56], [12, "string", 34, 56]], True),
  52. ]
  53. )
  54. def test_embedding_mixed_input(input, is_multi_prompt: bool):
  55. global server
  56. server.start()
  57. res = server.make_request("POST", "/v1/embeddings", data={"input": input})
  58. assert res.status_code == 200
  59. data = res.body['data']
  60. if is_multi_prompt:
  61. assert len(data) == len(input)
  62. for d in data:
  63. assert 'embedding' in d
  64. assert len(d['embedding']) > 1
  65. else:
  66. assert 'embedding' in data[0]
  67. assert len(data[0]['embedding']) > 1
  68. def test_embedding_pooling_none():
  69. global server
  70. server.pooling = 'none'
  71. server.start()
  72. res = server.make_request("POST", "/embeddings", data={
  73. "input": "hello hello hello",
  74. })
  75. assert res.status_code == 200
  76. assert 'embedding' in res.body[0]
  77. assert len(res.body[0]['embedding']) == 5 # 3 text tokens + 2 special
  78. # make sure embedding vector is not normalized
  79. for x in res.body[0]['embedding']:
  80. assert abs(sum([x ** 2 for x in x]) - 1) > EPSILON
  81. def test_embedding_pooling_none_oai():
  82. global server
  83. server.pooling = 'none'
  84. server.start()
  85. res = server.make_request("POST", "/v1/embeddings", data={
  86. "input": "hello hello hello",
  87. })
  88. # /v1/embeddings does not support pooling type 'none'
  89. assert res.status_code == 400
  90. def test_embedding_openai_library_single():
  91. global server
  92. server.pooling = 'last'
  93. server.start()
  94. client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
  95. res = client.embeddings.create(model="text-embedding-3-small", input="I believe the meaning of life is")
  96. assert len(res.data) == 1
  97. assert len(res.data[0].embedding) > 1
  98. def test_embedding_openai_library_multiple():
  99. global server
  100. server.pooling = 'last'
  101. server.start()
  102. client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1")
  103. res = client.embeddings.create(model="text-embedding-3-small", input=[
  104. "I believe the meaning of life is",
  105. "Write a joke about AI from a very long prompt which will not be truncated",
  106. "This is a test",
  107. "This is another test",
  108. ])
  109. assert len(res.data) == 4
  110. for d in res.data:
  111. assert len(d.embedding) > 1
  112. def test_embedding_error_prompt_too_long():
  113. global server
  114. server.pooling = 'last'
  115. server.start()
  116. res = server.make_request("POST", "/v1/embeddings", data={
  117. "input": "This is a test " * 512,
  118. })
  119. assert res.status_code != 200
  120. assert "too large" in res.body["error"]["message"]
  121. def test_same_prompt_give_same_result():
  122. server.pooling = 'last'
  123. server.start()
  124. res = server.make_request("POST", "/v1/embeddings", data={
  125. "input": [
  126. "I believe the meaning of life is",
  127. "I believe the meaning of life is",
  128. "I believe the meaning of life is",
  129. "I believe the meaning of life is",
  130. "I believe the meaning of life is",
  131. ],
  132. })
  133. assert res.status_code == 200
  134. assert len(res.body['data']) == 5
  135. for i in range(1, len(res.body['data'])):
  136. v0 = res.body['data'][0]['embedding']
  137. vi = res.body['data'][i]['embedding']
  138. for x, y in zip(v0, vi):
  139. assert abs(x - y) < EPSILON
  140. @pytest.mark.parametrize(
  141. "content,n_tokens",
  142. [
  143. ("I believe the meaning of life is", 9),
  144. ("This is a test", 6),
  145. ]
  146. )
  147. def test_embedding_usage_single(content, n_tokens):
  148. global server
  149. server.start()
  150. res = server.make_request("POST", "/v1/embeddings", data={"input": content})
  151. assert res.status_code == 200
  152. assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
  153. assert res.body['usage']['prompt_tokens'] == n_tokens
  154. def test_embedding_usage_multiple():
  155. global server
  156. server.start()
  157. res = server.make_request("POST", "/v1/embeddings", data={
  158. "input": [
  159. "I believe the meaning of life is",
  160. "I believe the meaning of life is",
  161. ],
  162. })
  163. assert res.status_code == 200
  164. assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
  165. assert res.body['usage']['prompt_tokens'] == 2 * 9