chore(api/tests): apply ruff reformat #7590 (#7591)

Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
Bowen Liang
2024-08-23 23:52:25 +08:00
committed by GitHub
parent 2da63654e5
commit b035c02f78
155 changed files with 4279 additions and 5925 deletions

View File

@@ -21,99 +21,78 @@ def test_validate_credentials_for_chat_model():
with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': 'hahahaha',
'completion_type': 'completion',
}
"server_url": "hahahaha",
"completion_type": "completion",
},
)
model.validate_credentials(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
}
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
)
def test_invoke_completion_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
},
prompt_messages=[
UserPromptMessage(
content='ping'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
prompt_messages=[UserPromptMessage(content="ping")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=[],
user="abc-123",
stream=False
stream=False,
)
assert isinstance(response, LLMResult)
assert len(response.message.content) > 0
assert response.usage.total_tokens > 0
def test_invoke_chat_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
},
prompt_messages=[
UserPromptMessage(
content='ping'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[UserPromptMessage(content="ping")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=[],
user="abc-123",
stream=False
stream=False,
)
assert isinstance(response, LLMResult)
assert len(response.message.content) > 0
assert response.usage.total_tokens > 0
def test_invoke_stream_completion_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "completion",
},
prompt_messages=[
UserPromptMessage(
content='Hello World!'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
},
stop=['you'],
prompt_messages=[UserPromptMessage(content="Hello World!")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=["you"],
stream=True,
user="abc-123"
user="abc-123",
)
assert isinstance(response, Generator)
@@ -123,28 +102,21 @@ def test_invoke_stream_completion_model():
assert isinstance(chunk.delta.message, AssistantPromptMessage)
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
def test_invoke_stream_chat_model():
model = LocalAILanguageModel()
response = model.invoke(
model='chinese-llama-2-7b',
model="chinese-llama-2-7b",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[
UserPromptMessage(
content='Hello World!'
)
],
model_parameters={
'temperature': 0.7,
'top_p': 1.0,
'max_tokens': 10
},
stop=['you'],
prompt_messages=[UserPromptMessage(content="Hello World!")],
model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10},
stop=["you"],
stream=True,
user="abc-123"
user="abc-123",
)
assert isinstance(response, Generator)
@@ -154,64 +126,48 @@ def test_invoke_stream_chat_model():
assert isinstance(chunk.delta.message, AssistantPromptMessage)
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
def test_get_num_tokens():
model = LocalAILanguageModel()
num_tokens = model.get_num_tokens(
model='????',
model="????",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[
SystemPromptMessage(
content='You are a helpful AI assistant.',
content="You are a helpful AI assistant.",
),
UserPromptMessage(
content='Hello World!'
)
UserPromptMessage(content="Hello World!"),
],
tools=[
PromptMessageTool(
name='get_current_weather',
description='Get the current weather in a given location',
name="get_current_weather",
description="Get the current weather in a given location",
parameters={
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": [
"c",
"f"
]
}
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
"unit": {"type": "string", "enum": ["c", "f"]},
},
"required": [
"location"
]
}
"required": ["location"],
},
)
]
],
)
assert isinstance(num_tokens, int)
assert num_tokens == 77
num_tokens = model.get_num_tokens(
model='????',
model="????",
credentials={
'server_url': os.environ.get('LOCALAI_SERVER_URL'),
'completion_type': 'chat_completion',
"server_url": os.environ.get("LOCALAI_SERVER_URL"),
"completion_type": "chat_completion",
},
prompt_messages=[
UserPromptMessage(
content='Hello World!'
)
],
prompt_messages=[UserPromptMessage(content="Hello World!")],
)
assert isinstance(num_tokens, int)