Skip to content

Commit

Permalink
fix langsmith load test
Browse files Browse the repository at this point in the history
  • Loading branch information
ishaan-jaff committed Sep 12, 2024
1 parent a1f8fcf commit b01a42e
Showing 1 changed file with 55 additions and 74 deletions.
129 changes: 55 additions & 74 deletions tests/load_tests/test_langsmith_load_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,46 +13,56 @@


def test_langsmith_logging_async():
# this tests time added to make langsmith logging calls, vs just acompletion calls
try:

os.environ["LANGSMITH_API_KEY"] = "lsv2_anything"
os.environ["LANGSMITH_PROJECT"] = "pr-b"
os.environ["LANGSMITH_BASE_URL"] = "http://0.0.0.0:8090"

# Make 5 calls with an empty success_callback
litellm.success_callback = []
litellm.callbacks = []
litellm._async_success_callback = []
litellm._async_failure_callback = []
litellm._async_failure_callback = []
litellm.failure_callback = []
start_time_empty_callback = asyncio.run(make_async_calls())
print("done with no callback test")

print("starting langsmith test")
# Make 5 calls with success_callback set to "langsmith"
litellm.success_callback = ["langsmith"]
start_time_langsmith = asyncio.run(make_async_calls())
print("done with langsmith test")

# Compare the time for both scenarios
print(f"Time taken with success_callback='langsmith': {start_time_langsmith}")
print(f"Time taken with empty success_callback: {start_time_empty_callback}")

# Calculate the percentage difference
percentage_diff = (
abs(start_time_langsmith - start_time_empty_callback)
/ start_time_empty_callback
* 100
)

# Assert that the difference is not more than 10%
percentage_diffs = []

for run in range(3):
print(f"\nRun {run + 1}:")

# Test with empty success_callback
litellm.success_callback = []
litellm.callbacks = []
litellm._async_success_callback = []
litellm._async_failure_callback = []
litellm.failure_callback = []
start_time_empty_callback = asyncio.run(make_async_calls())
print("Done with no callback test")

# Test with langsmith callback
print("Starting langsmith test")
litellm.success_callback = ["langsmith"]
start_time_langsmith = asyncio.run(make_async_calls())
print("Done with langsmith test")

# Compare times and calculate percentage difference
print(f"Time with success_callback='langsmith': {start_time_langsmith}")
print(f"Time with empty success_callback: {start_time_empty_callback}")

percentage_diff = (
abs(start_time_langsmith - start_time_empty_callback)
/ start_time_empty_callback
* 100
)
percentage_diffs.append(percentage_diff)
print(f"Performance difference: {percentage_diff:.2f}%")
print("percentage_diffs", percentage_diffs)
# Calculate average percentage difference
avg_percentage_diff = sum(percentage_diffs) / len(percentage_diffs)
print(f"\nAverage performance difference: {avg_percentage_diff:.2f}%")

# Assert that the average difference is not more than 10%
assert (
percentage_diff < 10
), f"Performance difference of {percentage_diff:.2f}% exceeds 10% threshold"
avg_percentage_diff < 10
), f"Average performance difference of {avg_percentage_diff:.2f}% exceeds 10% threshold"

print(f"Performance difference: {percentage_diff:.2f}%")
except litellm.Timeout as e:
pass
except Exception as e:
pytest.fail(f"An exception occurred - {e}")

except litellm.Timeout as e:
pass
Expand All @@ -61,52 +71,23 @@ def test_langsmith_logging_async():


async def make_async_calls(metadata=None, **completion_kwargs):
tasks = []
for _ in range(100):
tasks.append(create_async_task())

# Measure the start time before running the tasks
start_time = asyncio.get_event_loop().time()

# Wait for all tasks to complete
responses = await asyncio.gather(*tasks)

# Print the responses when tasks return
for idx, response in enumerate(responses):
print(f"Response from Task {idx + 1}: {response}")

await asyncio.sleep(1)

for _ in range(100):
tasks.append(create_async_task())

# Measure the start time before running the tasks
start_time = asyncio.get_event_loop().time()

# Wait for all tasks to complete
responses = await asyncio.gather(*tasks)

# Print the responses when tasks return
for idx, response in enumerate(responses):
print(f"Response from Task {idx + 1}: {response}")

await asyncio.sleep(1)
total_tasks = 300
batch_size = 100
total_time = 0

for _ in range(100):
tasks.append(create_async_task())
for batch in range(3):
tasks = [create_async_task() for _ in range(batch_size)]

# Measure the start time before running the tasks
start_time = asyncio.get_event_loop().time()
start_time = asyncio.get_event_loop().time()
responses = await asyncio.gather(*tasks)

# Wait for all tasks to complete
responses = await asyncio.gather(*tasks)
for idx, response in enumerate(responses):
print(f"Response from Task {batch * batch_size + idx + 1}: {response}")

# Print the responses when tasks return
for idx, response in enumerate(responses):
print(f"Response from Task {idx + 1}: {response}")
await asyncio.sleep(1)

# Calculate the total time taken
total_time = asyncio.get_event_loop().time() - start_time
batch_time = asyncio.get_event_loop().time() - start_time
total_time += batch_time

return total_time

Expand Down

0 comments on commit b01a42e

Please sign in to comment.