Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: replace gpt-3.5-turbo-0613 (deprecated model) #5794

Merged
merged 1 commit into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion litellm/tests/model_cost.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
"gpt-3.5-turbo-0613": 7.7e-05
"gpt-3.5-turbo": 7.7e-05
}
2 changes: 1 addition & 1 deletion litellm/tests/test_exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ async def test_content_policy_exception_openai():
# this is ony a test - we needed some way to invoke the exception :(
litellm.set_verbose = True
response = await litellm.acompletion(
model="gpt-3.5-turbo-0613",
model="gpt-3.5-turbo",
stream=True,
messages=[
{"role": "user", "content": "Gimme the lyrics to Don't Stop Me Now"}
Expand Down
8 changes: 4 additions & 4 deletions litellm/tests/test_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -1057,9 +1057,9 @@ def test_router_region_pre_call_check(allowed_model_region):
def test_function_calling():
model_list = [
{
"model_name": "gpt-3.5-turbo-0613",
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
Expand Down Expand Up @@ -1088,7 +1088,7 @@ def test_function_calling():

router = Router(model_list=model_list)
response = router.completion(
model="gpt-3.5-turbo-0613", messages=messages, functions=functions
model="gpt-3.5-turbo", messages=messages, functions=functions
)
router.reset()
print(response)
Expand All @@ -1104,7 +1104,7 @@ def test_function_calling_on_router():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
},
Expand Down
11 changes: 6 additions & 5 deletions litellm/tests/test_router_caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def test_router_async_caching_with_ssl_url():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
Expand All @@ -57,7 +57,7 @@ def test_router_sync_caching_with_ssl_url():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
Expand All @@ -84,7 +84,7 @@ async def test_acompletion_caching_on_router():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
Expand Down Expand Up @@ -201,7 +201,7 @@ async def test_acompletion_caching_with_ttl_on_router():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
Expand Down Expand Up @@ -266,8 +266,9 @@ async def test_acompletion_caching_on_router_caching_groups():
{
"model_name": "openai-gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
"mock_response": "Hello world",
},
"tpm": 100000,
"rpm": 10000,
Expand Down
2 changes: 1 addition & 1 deletion litellm/tests/test_router_client_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ async def test_router_init():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"model_info": {"id": "1234"},
Expand Down
32 changes: 19 additions & 13 deletions litellm/tests/test_router_get_deployments.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,25 @@
# Tests for router.get_available_deployment
# specifically test if it can pick the correct LLM when rpm/tpm set
# These are fast Tests, and make no API calls
import sys, os, time
import traceback, asyncio
import asyncio
import os
import sys
import time
import traceback

import pytest

sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import Router
from concurrent.futures import ThreadPoolExecutor
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor

from dotenv import load_dotenv

import litellm
from litellm import Router

load_dotenv()


Expand All @@ -27,7 +33,7 @@ def test_weighted_selection_router():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
"rpm": 6,
},
Expand Down Expand Up @@ -83,7 +89,7 @@ def test_weighted_selection_router_tpm():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
"tpm": 5,
},
Expand Down Expand Up @@ -139,7 +145,7 @@ def test_weighted_selection_router_tpm_as_router_param():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 5,
Expand Down Expand Up @@ -195,7 +201,7 @@ def test_weighted_selection_router_rpm_as_router_param():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"rpm": 5,
Expand Down Expand Up @@ -252,7 +258,7 @@ def test_weighted_selection_router_no_rpm_set():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
"rpm": 6,
},
Expand Down Expand Up @@ -311,7 +317,7 @@ def test_model_group_aliases():
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
"tpm": 1,
},
Expand Down Expand Up @@ -537,7 +543,7 @@ async def test_weighted_selection_router_async(rpm_list, tpm_list):
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"api_key": os.getenv("OPENAI_API_KEY"),
"rpm": rpm_list[0],
"tpm": tpm_list[0],
Expand Down Expand Up @@ -580,7 +586,7 @@ async def test_weighted_selection_router_async(rpm_list, tpm_list):
else:
# Assert both are used
assert selection_counts["azure/chatgpt-v-2"] > 0
assert selection_counts["gpt-3.5-turbo-0613"] > 0
assert selection_counts["gpt-3.5-turbo"] > 0
router.reset()
except Exception as e:
traceback.print_exc()
Expand Down
10 changes: 5 additions & 5 deletions litellm/tests/test_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -2509,7 +2509,7 @@ async def completion_call():
"id": "chatcmpl-7zVNA4sXUftpIg6W8WlntCyeBj2JY",
"object": "chat.completion",
"created": 1694892960,
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
Expand Down Expand Up @@ -2573,7 +2573,7 @@ def validate_final_structure(item, structure=function_calling_output_structure):
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
"object": "chat.completion.chunk",
"created": 1694893248,
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
Expand Down Expand Up @@ -2646,7 +2646,7 @@ def validate_first_function_call_chunk_structure(item):
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
"object": "chat.completion.chunk",
"created": 1694893248,
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"choices": [
{
"index": 0,
Expand Down Expand Up @@ -2690,7 +2690,7 @@ def validate_second_function_call_chunk_structure(data):
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
"object": "chat.completion.chunk",
"created": 1694893248,
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"choices": [{"index": 0, "delta": {}, "finish_reason": "function_call"}],
}

Expand Down Expand Up @@ -3476,7 +3476,7 @@ def test_unit_test_custom_stream_wrapper_openai():
)
],
"created": 1721353246,
"model": "gpt-3.5-turbo-0613",
"model": "gpt-3.5-turbo",
"object": "chat.completion.chunk",
"system_fingerprint": None,
"usage": None,
Expand Down
6 changes: 3 additions & 3 deletions litellm/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,7 @@ def test_get_llm_provider_ft_models():
All ft prefixed models should map to OpenAI
gpt-3.5-turbo-0125 (recommended),
gpt-3.5-turbo-1106,
gpt-3.5-turbo-0613,
gpt-3.5-turbo,
gpt-4-0613 (experimental)
gpt-4o-2024-05-13.
babbage-002, davinci-002,
Expand All @@ -610,13 +610,13 @@ def test_get_llm_provider_ft_models():
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo-1106")
assert custom_llm_provider == "openai"

model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo-0613")
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo")
assert custom_llm_provider == "openai"

model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-4-0613")
assert custom_llm_provider == "openai"

model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo-0613")
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo")
assert custom_llm_provider == "openai"

model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-4o-2024-05-13")
Expand Down
2 changes: 1 addition & 1 deletion litellm/tests/user_cost.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"total_budget": 10,
"current_cost": 7.3e-05,
"model_cost": {
"gpt-3.5-turbo-0613": 7.3e-05
"gpt-3.5-turbo": 7.3e-05
}
},
"12345": {
Expand Down