chore(api/tests): apply ruff reformat #7590 (#7591)

Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
Bowen Liang
2024-08-23 23:52:25 +08:00
committed by GitHub
parent 2da63654e5
commit b035c02f78
155 changed files with 4279 additions and 5925 deletions

View File

@@ -1,4 +1,4 @@
"""
LocalAI Embedding Interface is temporarily unavailable due to
we could not find a way to test it for now.
"""
LocalAI Embedding Interface is temporarily unavailable due to
we could not find a way to test it for now.
"""

View File

@@ -21,99 +21,78 @@ def test_validate_credentials_for_chat_model():
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': 'hahahaha',
'completion_type': 'completion',
}
"server_url": "hahahaha",
"completion_type": "completion",
},
)
model.validate_credentials(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
}
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
)
def test_invoke_completion_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
},
prompt_messages=[
UserPromptMessage(
content='ping'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
prompt_messages=[UserPromptMessage(content="ping")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=[],
user="abc-123",
stream=False
stream=False,
)
assert isinstance(response, LLMResult)
assert len(response.message.content) > 0
assert response.usage.total_tokens > 0
def test_invoke_chat_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
},
prompt_messages=[
UserPromptMessage(
content='ping'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[UserPromptMessage(content="ping")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=[],
user="abc-123",
stream=False
stream=False,
)
assert isinstance(response, LLMResult)
assert len(response.message.content) > 0
assert response.usage.total_tokens > 0
def test_invoke_stream_completion_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
prompt_messages=[
UserPromptMessage(
content='Hello World!'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
},
stop=['you'],
prompt_messages=[UserPromptMessage(content="Hello World!")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=["you"],
stream=True,
user="abc-123"
user="abc-123",
)
assert isinstance(response, Generator)
@@ -123,28 +102,21 @@ def test_invoke_stream_completion_model():
assert isinstance(chunk.delta.message, AssistantPromptMessage)
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
def test_invoke_stream_chat_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[
UserPromptMessage(
content='Hello World!'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
},
stop=['you'],
prompt_messages=[UserPromptMessage(content="Hello World!")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=["you"],
stream=True,
user="abc-123"
user="abc-123",
)
assert isinstance(response, Generator)
@@ -154,64 +126,48 @@ def test_invoke_stream_chat_model():
assert isinstance(chunk.delta.message, AssistantPromptMessage)
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
def test_get_num_tokens():
model = LocalAILanguageModel()
num_tokens = model.get_num_tokens(
model='????',
model="????",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[
SystemPromptMessage(
content='You are a helpful AI assistant.',
content="You are a helpful AI assistant.",
),
UserPromptMessage(
content='Hello World!'
)
UserPromptMessage(content="Hello World!"),
],
tools=[
PromptMessageTool(
name='get_current_weather',
description='Get the current weather in a given location',
name="get_current_weather",
description="Get the current weather in a given location",
parameters={
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": [
"c",
"f"
]
}
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
"unit": {"type": "string", "enum": ["c", "f"]},
},
"required": [
"location"
]
}
"required": ["location"],
},
)
]
],
)
assert isinstance(num_tokens, int)
assert num_tokens == 77
num_tokens = model.get_num_tokens(
model='????',
model="????",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[
UserPromptMessage(
content='Hello World!'
)
],
prompt_messages=[UserPromptMessage(content="Hello World!")],
)
assert isinstance(num_tokens, int)

View File

@@ -12,30 +12,29 @@ def test_validate_credentials_for_chat_model():
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(
model='bge-reranker-v2-m3',
model="bge-reranker-v2-m3",
credentials={
'server_url': 'hahahaha',
'completion_type': 'completion',
}
"server_url": "hahahaha",
"completion_type": "completion",
},
)
model.validate_credentials(
model='bge-reranker-base',
model="bge-reranker-base",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
}
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
)
def test_invoke_rerank_model():
model = LocalaiRerankModel()
response = model.invoke(
model='bge-reranker-base',
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL')
},
query='Organic skincare products for sensitive skin',
model="bge-reranker-base",
credentials={"server_url": os.environ.get("LOCALAI_SERVER_URL")},
query="Organic skincare products for sensitive skin",
docs=[
"Eco-friendly kitchenware for modern homes",
"Biodegradable cleaning supplies for eco-conscious consumers",
@@ -45,43 +44,38 @@ def test_invoke_rerank_model():
"Sustainable gardening tools and compost solutions",
"Sensitive skin-friendly facial cleansers and toners",
"Organic food wraps and storage solutions",
"Yoga mats made from recycled materials"
"Yoga mats made from recycled materials",
],
top_n=3,
score_threshold=0.75,
user="abc-123"
user="abc-123",
)
assert isinstance(response, RerankResult)
assert len(response.docs) == 3
def test__invoke():
model = LocalaiRerankModel()
# Test case 1: Empty docs
result = model._invoke(
model='bge-reranker-base',
credentials={
'server_url': 'https://example.com',
'api_key': '1234567890'
},
query='Organic skincare products for sensitive skin',
model="bge-reranker-base",
credentials={"server_url": "https://example.com", "api_key": "1234567890"},
query="Organic skincare products for sensitive skin",
docs=[],
top_n=3,
score_threshold=0.75,
user="abc-123"
user="abc-123",
)
assert isinstance(result, RerankResult)
assert len(result.docs) == 0
# Test case 2: Valid invocation
result = model._invoke(
model='bge-reranker-base',
credentials={
'server_url': 'https://example.com',
'api_key': '1234567890'
},
query='Organic skincare products for sensitive skin',
model="bge-reranker-base",
credentials={"server_url": "https://example.com", "api_key": "1234567890"},
query="Organic skincare products for sensitive skin",
docs=[
"Eco-friendly kitchenware for modern homes",
"Biodegradable cleaning supplies for eco-conscious consumers",
@@ -91,12 +85,12 @@ def test__invoke():
"Sustainable gardening tools and compost solutions",
"Sensitive skin-friendly facial cleansers and toners",
"Organic food wraps and storage solutions",
"Yoga mats made from recycled materials"
"Yoga mats made from recycled materials",
],
top_n=3,
score_threshold=0.75,
user="abc-123"
user="abc-123",
)
assert isinstance(result, RerankResult)
assert len(result.docs) == 3
assert all(isinstance(doc, RerankDocument) for doc in result.docs)
assert all(isinstance(doc, RerankDocument) for doc in result.docs)

View File

@@ -10,19 +10,9 @@ def test_validate_credentials():
model = LocalAISpeech2text()
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(
model='whisper-1',
credentials={
'server_url': 'invalid_url'
}
)
model.validate_credentials(model="whisper-1", credentials={"server_url": "invalid_url"})
model.validate_credentials(
model='whisper-1',
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL')
}
)
model.validate_credentials(model="whisper-1", credentials={"server_url": os.environ.get("LOCALAI_SERVER_URL")})
def test_invoke_model():
@@ -32,23 +22,21 @@ def test_invoke_model():
current_dir = os.path.dirname(os.path.abspath(__file__))
# Get assets directory
assets_dir = os.path.join(os.path.dirname(current_dir), 'assets')
assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
# Construct the path to the audio file
audio_file_path = os.path.join(assets_dir, 'audio.mp3')
audio_file_path = os.path.join(assets_dir, "audio.mp3")
# Open the file and get the file object
with open(audio_file_path, 'rb') as audio_file:
with open(audio_file_path, "rb") as audio_file:
file = audio_file
result = model.invoke(
model='whisper-1',
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL')
},
model="whisper-1",
credentials={"server_url": os.environ.get("LOCALAI_SERVER_URL")},
file=file,
user="abc-123"
user="abc-123",
)
assert isinstance(result, str)
assert result == '1, 2, 3, 4, 5, 6, 7, 8, 9, 10'
assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"