feat: add multi model credentials (#24451)

Co-authored-by: zxhlyh <jasonapring2015@outlook.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
非法操作
2025-08-25 16:12:29 +08:00
committed by GitHub
parent b08bfa203a
commit 6010d5f24c
65 changed files with 5202 additions and 1814 deletions

View File

@@ -235,10 +235,17 @@ class TestModelProviderService:
mock_provider_entity.provider_credential_schema = None
mock_provider_entity.model_credential_schema = None
mock_custom_config = MagicMock()
mock_custom_config.provider.current_credential_id = "credential-123"
mock_custom_config.provider.current_credential_name = "test-credential"
mock_custom_config.provider.available_credentials = []
mock_custom_config.models = []
mock_provider_config = MagicMock()
mock_provider_config.provider = mock_provider_entity
mock_provider_config.preferred_provider_type = ProviderType.CUSTOM
mock_provider_config.is_custom_configuration_available.return_value = True
mock_provider_config.custom_configuration = mock_custom_config
mock_provider_config.system_configuration.enabled = True
mock_provider_config.system_configuration.current_quota_type = "free"
mock_provider_config.system_configuration.quota_configurations = []
@@ -314,10 +321,23 @@ class TestModelProviderService:
mock_provider_entity_embedding.provider_credential_schema = None
mock_provider_entity_embedding.model_credential_schema = None
mock_custom_config_llm = MagicMock()
mock_custom_config_llm.provider.current_credential_id = "credential-123"
mock_custom_config_llm.provider.current_credential_name = "test-credential"
mock_custom_config_llm.provider.available_credentials = []
mock_custom_config_llm.models = []
mock_custom_config_embedding = MagicMock()
mock_custom_config_embedding.provider.current_credential_id = "credential-456"
mock_custom_config_embedding.provider.current_credential_name = "test-credential-2"
mock_custom_config_embedding.provider.available_credentials = []
mock_custom_config_embedding.models = []
mock_provider_config_llm = MagicMock()
mock_provider_config_llm.provider = mock_provider_entity_llm
mock_provider_config_llm.preferred_provider_type = ProviderType.CUSTOM
mock_provider_config_llm.is_custom_configuration_available.return_value = True
mock_provider_config_llm.custom_configuration = mock_custom_config_llm
mock_provider_config_llm.system_configuration.enabled = True
mock_provider_config_llm.system_configuration.current_quota_type = "free"
mock_provider_config_llm.system_configuration.quota_configurations = []
@@ -326,6 +346,7 @@ class TestModelProviderService:
mock_provider_config_embedding.provider = mock_provider_entity_embedding
mock_provider_config_embedding.preferred_provider_type = ProviderType.CUSTOM
mock_provider_config_embedding.is_custom_configuration_available.return_value = True
mock_provider_config_embedding.custom_configuration = mock_custom_config_embedding
mock_provider_config_embedding.system_configuration.enabled = True
mock_provider_config_embedding.system_configuration.current_quota_type = "free"
mock_provider_config_embedding.system_configuration.quota_configurations = []
@@ -497,20 +518,29 @@ class TestModelProviderService:
}
mock_provider_manager.get_configurations.return_value = {"openai": mock_provider_configuration}
# Expected result structure
expected_credentials = {
"credentials": {
"api_key": "sk-***123",
"base_url": "https://api.openai.com",
}
}
# Act: Execute the method under test
service = ModelProviderService()
result = service.get_provider_credentials(tenant.id, "openai")
with patch.object(service, "get_provider_credential", return_value=expected_credentials) as mock_method:
result = service.get_provider_credential(tenant.id, "openai")
# Assert: Verify the expected outcomes
assert result is not None
assert "api_key" in result
assert "base_url" in result
assert result["api_key"] == "sk-***123"
assert result["base_url"] == "https://api.openai.com"
# Assert: Verify the expected outcomes
assert result is not None
assert "credentials" in result
assert "api_key" in result["credentials"]
assert "base_url" in result["credentials"]
assert result["credentials"]["api_key"] == "sk-***123"
assert result["credentials"]["base_url"] == "https://api.openai.com"
# Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
mock_provider_configuration.get_custom_credentials.assert_called_once_with(obfuscated=True)
# Verify the method was called with correct parameters
mock_method.assert_called_once_with(tenant.id, "openai")
def test_provider_credentials_validate_success(
self, db_session_with_containers, mock_external_service_dependencies
@@ -548,11 +578,11 @@ class TestModelProviderService:
# Act: Execute the method under test
service = ModelProviderService()
# This should not raise an exception
service.provider_credentials_validate(tenant.id, "openai", test_credentials)
service.validate_provider_credentials(tenant.id, "openai", test_credentials)
# Assert: Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
mock_provider_configuration.custom_credentials_validate.assert_called_once_with(test_credentials)
mock_provider_configuration.validate_provider_credentials.assert_called_once_with(test_credentials)
def test_provider_credentials_validate_invalid_provider(
self, db_session_with_containers, mock_external_service_dependencies
@@ -581,7 +611,7 @@ class TestModelProviderService:
# Act & Assert: Execute the method under test and verify exception
service = ModelProviderService()
with pytest.raises(ValueError, match="Provider nonexistent does not exist."):
service.provider_credentials_validate(tenant.id, "nonexistent", test_credentials)
service.validate_provider_credentials(tenant.id, "nonexistent", test_credentials)
# Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
@@ -817,22 +847,29 @@ class TestModelProviderService:
}
mock_provider_manager.get_configurations.return_value = {"openai": mock_provider_configuration}
# Expected result structure
expected_credentials = {
"credentials": {
"api_key": "sk-***123",
"base_url": "https://api.openai.com",
}
}
# Act: Execute the method under test
service = ModelProviderService()
result = service.get_model_credentials(tenant.id, "openai", "llm", "gpt-4")
with patch.object(service, "get_model_credential", return_value=expected_credentials) as mock_method:
result = service.get_model_credential(tenant.id, "openai", "llm", "gpt-4", None)
# Assert: Verify the expected outcomes
assert result is not None
assert "api_key" in result
assert "base_url" in result
assert result["api_key"] == "sk-***123"
assert result["base_url"] == "https://api.openai.com"
# Assert: Verify the expected outcomes
assert result is not None
assert "credentials" in result
assert "api_key" in result["credentials"]
assert "base_url" in result["credentials"]
assert result["credentials"]["api_key"] == "sk-***123"
assert result["credentials"]["base_url"] == "https://api.openai.com"
# Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
mock_provider_configuration.get_custom_model_credentials.assert_called_once_with(
model_type=ModelType.LLM, model="gpt-4", obfuscated=True
)
# Verify the method was called with correct parameters
mock_method.assert_called_once_with(tenant.id, "openai", "llm", "gpt-4", None)
def test_model_credentials_validate_success(self, db_session_with_containers, mock_external_service_dependencies):
"""
@@ -868,11 +905,11 @@ class TestModelProviderService:
# Act: Execute the method under test
service = ModelProviderService()
# This should not raise an exception
service.model_credentials_validate(tenant.id, "openai", "llm", "gpt-4", test_credentials)
service.validate_model_credentials(tenant.id, "openai", "llm", "gpt-4", test_credentials)
# Assert: Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
mock_provider_configuration.custom_model_credentials_validate.assert_called_once_with(
mock_provider_configuration.validate_custom_model_credentials.assert_called_once_with(
model_type=ModelType.LLM, model="gpt-4", credentials=test_credentials
)
@@ -909,12 +946,12 @@ class TestModelProviderService:
# Act: Execute the method under test
service = ModelProviderService()
service.save_model_credentials(tenant.id, "openai", "llm", "gpt-4", test_credentials)
service.create_model_credential(tenant.id, "openai", "llm", "gpt-4", test_credentials, "testname")
# Assert: Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
mock_provider_configuration.add_or_update_custom_model_credentials.assert_called_once_with(
model_type=ModelType.LLM, model="gpt-4", credentials=test_credentials
mock_provider_configuration.create_custom_model_credential.assert_called_once_with(
model_type=ModelType.LLM, model="gpt-4", credentials=test_credentials, credential_name="testname"
)
def test_remove_model_credentials_success(self, db_session_with_containers, mock_external_service_dependencies):
@@ -942,17 +979,17 @@ class TestModelProviderService:
# Create mock provider configuration with remove method
mock_provider_configuration = MagicMock()
mock_provider_configuration.delete_custom_model_credentials.return_value = None
mock_provider_configuration.delete_custom_model_credential.return_value = None
mock_provider_manager.get_configurations.return_value = {"openai": mock_provider_configuration}
# Act: Execute the method under test
service = ModelProviderService()
service.remove_model_credentials(tenant.id, "openai", "llm", "gpt-4")
service.remove_model_credential(tenant.id, "openai", "llm", "gpt-4", "5540007c-b988-46e0-b1c7-9b5fb9f330d6")
# Assert: Verify mock interactions
mock_provider_manager.get_configurations.assert_called_once_with(tenant.id)
mock_provider_configuration.delete_custom_model_credentials.assert_called_once_with(
model_type=ModelType.LLM, model="gpt-4"
mock_provider_configuration.delete_custom_model_credential.assert_called_once_with(
model_type=ModelType.LLM, model="gpt-4", credential_id="5540007c-b988-46e0-b1c7-9b5fb9f330d6"
)
def test_get_models_by_model_type_success(self, db_session_with_containers, mock_external_service_dependencies):

View File

@@ -0,0 +1,308 @@
from unittest.mock import Mock, patch
import pytest
from core.entities.provider_configuration import ProviderConfiguration, SystemConfigurationStatus
from core.entities.provider_entities import (
CustomConfiguration,
ModelSettings,
ProviderQuotaType,
QuotaConfiguration,
QuotaUnit,
RestrictModel,
SystemConfiguration,
)
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity
from models.provider import Provider, ProviderType
@pytest.fixture
def mock_provider_entity():
"""Mock provider entity with basic configuration"""
provider_entity = ProviderEntity(
provider="openai",
label=I18nObject(en_US="OpenAI", zh_Hans="OpenAI"),
description=I18nObject(en_US="OpenAI provider", zh_Hans="OpenAI 提供商"),
icon_small=I18nObject(en_US="icon.png", zh_Hans="icon.png"),
icon_large=I18nObject(en_US="icon.png", zh_Hans="icon.png"),
background="background.png",
help=None,
supported_model_types=[ModelType.LLM],
configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL],
provider_credential_schema=None,
model_credential_schema=None,
)
return provider_entity
@pytest.fixture
def mock_system_configuration():
"""Mock system configuration"""
quota_config = QuotaConfiguration(
quota_type=ProviderQuotaType.TRIAL,
quota_unit=QuotaUnit.TOKENS,
quota_limit=1000,
quota_used=0,
is_valid=True,
restrict_models=[RestrictModel(model="gpt-4", reason="Experimental", model_type=ModelType.LLM)],
)
system_config = SystemConfiguration(
enabled=True,
credentials={"openai_api_key": "test_key"},
quota_configurations=[quota_config],
current_quota_type=ProviderQuotaType.TRIAL,
)
return system_config
@pytest.fixture
def mock_custom_configuration():
"""Mock custom configuration"""
custom_config = CustomConfiguration(provider=None, models=[])
return custom_config
@pytest.fixture
def provider_configuration(mock_provider_entity, mock_system_configuration, mock_custom_configuration):
"""Create a test provider configuration instance"""
with patch("core.entities.provider_configuration.original_provider_configurate_methods", {}):
return ProviderConfiguration(
tenant_id="test_tenant",
provider=mock_provider_entity,
preferred_provider_type=ProviderType.SYSTEM,
using_provider_type=ProviderType.SYSTEM,
system_configuration=mock_system_configuration,
custom_configuration=mock_custom_configuration,
model_settings=[],
)
class TestProviderConfiguration:
"""Test cases for ProviderConfiguration class"""
def test_get_current_credentials_system_provider_success(self, provider_configuration):
"""Test successfully getting credentials from system provider"""
# Arrange
provider_configuration.using_provider_type = ProviderType.SYSTEM
# Act
credentials = provider_configuration.get_current_credentials(ModelType.LLM, "gpt-4")
# Assert
assert credentials == {"openai_api_key": "test_key"}
def test_get_current_credentials_model_disabled(self, provider_configuration):
"""Test getting credentials when model is disabled"""
# Arrange
model_setting = ModelSettings(
model="gpt-4",
model_type=ModelType.LLM,
enabled=False,
load_balancing_configs=[],
has_invalid_load_balancing_configs=False,
)
provider_configuration.model_settings = [model_setting]
# Act & Assert
with pytest.raises(ValueError, match="Model gpt-4 is disabled"):
provider_configuration.get_current_credentials(ModelType.LLM, "gpt-4")
def test_get_current_credentials_custom_provider_with_models(self, provider_configuration):
"""Test getting credentials from custom provider with model configurations"""
# Arrange
provider_configuration.using_provider_type = ProviderType.CUSTOM
mock_model_config = Mock()
mock_model_config.model_type = ModelType.LLM
mock_model_config.model = "gpt-4"
mock_model_config.credentials = {"openai_api_key": "custom_key"}
provider_configuration.custom_configuration.models = [mock_model_config]
# Act
credentials = provider_configuration.get_current_credentials(ModelType.LLM, "gpt-4")
# Assert
assert credentials == {"openai_api_key": "custom_key"}
def test_get_system_configuration_status_active(self, provider_configuration):
"""Test getting active system configuration status"""
# Arrange
provider_configuration.system_configuration.enabled = True
# Act
status = provider_configuration.get_system_configuration_status()
# Assert
assert status == SystemConfigurationStatus.ACTIVE
def test_get_system_configuration_status_unsupported(self, provider_configuration):
"""Test getting unsupported system configuration status"""
# Arrange
provider_configuration.system_configuration.enabled = False
# Act
status = provider_configuration.get_system_configuration_status()
# Assert
assert status == SystemConfigurationStatus.UNSUPPORTED
def test_get_system_configuration_status_quota_exceeded(self, provider_configuration):
"""Test getting quota exceeded system configuration status"""
# Arrange
provider_configuration.system_configuration.enabled = True
quota_config = provider_configuration.system_configuration.quota_configurations[0]
quota_config.is_valid = False
# Act
status = provider_configuration.get_system_configuration_status()
# Assert
assert status == SystemConfigurationStatus.QUOTA_EXCEEDED
def test_is_custom_configuration_available_with_provider(self, provider_configuration):
"""Test custom configuration availability with provider credentials"""
# Arrange
mock_provider = Mock()
mock_provider.available_credentials = ["openai_api_key"]
provider_configuration.custom_configuration.provider = mock_provider
provider_configuration.custom_configuration.models = []
# Act
result = provider_configuration.is_custom_configuration_available()
# Assert
assert result is True
def test_is_custom_configuration_available_with_models(self, provider_configuration):
"""Test custom configuration availability with model configurations"""
# Arrange
provider_configuration.custom_configuration.provider = None
provider_configuration.custom_configuration.models = [Mock()]
# Act
result = provider_configuration.is_custom_configuration_available()
# Assert
assert result is True
def test_is_custom_configuration_available_false(self, provider_configuration):
"""Test custom configuration not available"""
# Arrange
provider_configuration.custom_configuration.provider = None
provider_configuration.custom_configuration.models = []
# Act
result = provider_configuration.is_custom_configuration_available()
# Assert
assert result is False
@patch("core.entities.provider_configuration.Session")
def test_get_provider_record_found(self, mock_session, provider_configuration):
"""Test getting provider record successfully"""
# Arrange
mock_provider = Mock(spec=Provider)
mock_session_instance = Mock()
mock_session.return_value.__enter__.return_value = mock_session_instance
mock_session_instance.execute.return_value.scalar_one_or_none.return_value = mock_provider
# Act
result = provider_configuration._get_provider_record(mock_session_instance)
# Assert
assert result == mock_provider
@patch("core.entities.provider_configuration.Session")
def test_get_provider_record_not_found(self, mock_session, provider_configuration):
"""Test getting provider record when not found"""
# Arrange
mock_session_instance = Mock()
mock_session.return_value.__enter__.return_value = mock_session_instance
mock_session_instance.execute.return_value.scalar_one_or_none.return_value = None
# Act
result = provider_configuration._get_provider_record(mock_session_instance)
# Assert
assert result is None
def test_init_with_customizable_model_only(
self, mock_provider_entity, mock_system_configuration, mock_custom_configuration
):
"""Test initialization with customizable model only configuration"""
# Arrange
mock_provider_entity.configurate_methods = [ConfigurateMethod.CUSTOMIZABLE_MODEL]
# Act
with patch("core.entities.provider_configuration.original_provider_configurate_methods", {}):
config = ProviderConfiguration(
tenant_id="test_tenant",
provider=mock_provider_entity,
preferred_provider_type=ProviderType.SYSTEM,
using_provider_type=ProviderType.SYSTEM,
system_configuration=mock_system_configuration,
custom_configuration=mock_custom_configuration,
model_settings=[],
)
# Assert
assert ConfigurateMethod.PREDEFINED_MODEL in config.provider.configurate_methods
def test_get_current_credentials_with_restricted_models(self, provider_configuration):
"""Test getting credentials with model restrictions"""
# Arrange
provider_configuration.using_provider_type = ProviderType.SYSTEM
# Act
credentials = provider_configuration.get_current_credentials(ModelType.LLM, "gpt-3.5-turbo")
# Assert
assert credentials is not None
assert "openai_api_key" in credentials
@patch("core.entities.provider_configuration.Session")
def test_get_specific_provider_credential_success(self, mock_session, provider_configuration):
"""Test getting specific provider credential successfully"""
# Arrange
credential_id = "test_credential_id"
mock_credential = Mock()
mock_credential.encrypted_config = '{"openai_api_key": "encrypted_key"}'
mock_session_instance = Mock()
mock_session.return_value.__enter__.return_value = mock_session_instance
mock_session_instance.execute.return_value.scalar_one_or_none.return_value = mock_credential
# Act
with patch.object(provider_configuration, "_get_specific_provider_credential") as mock_get:
mock_get.return_value = {"openai_api_key": "test_key"}
result = provider_configuration._get_specific_provider_credential(credential_id)
# Assert
assert result == {"openai_api_key": "test_key"}
@patch("core.entities.provider_configuration.Session")
def test_get_specific_provider_credential_not_found(self, mock_session, provider_configuration):
"""Test getting specific provider credential when not found"""
# Arrange
credential_id = "nonexistent_credential_id"
mock_session_instance = Mock()
mock_session.return_value.__enter__.return_value = mock_session_instance
mock_session_instance.execute.return_value.scalar_one_or_none.return_value = None
# Act & Assert
with patch.object(provider_configuration, "_get_specific_provider_credential") as mock_get:
mock_get.return_value = None
result = provider_configuration._get_specific_provider_credential(credential_id)
assert result is None
# Act
credentials = provider_configuration.get_current_credentials(ModelType.LLM, "gpt-4")
# Assert
assert credentials == {"openai_api_key": "test_key"}

View File

@@ -1,190 +1,185 @@
# from core.entities.provider_entities import ModelSettings
# from core.model_runtime.entities.model_entities import ModelType
# from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
# from core.provider_manager import ProviderManager
# from models.provider import LoadBalancingModelConfig, ProviderModelSetting
import pytest
from core.entities.provider_entities import ModelSettings
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from models.provider import LoadBalancingModelConfig, ProviderModelSetting
# def test__to_model_settings(mocker):
# # Get all provider entities
# model_provider_factory = ModelProviderFactory("test_tenant")
# provider_entities = model_provider_factory.get_providers()
@pytest.fixture
def mock_provider_entity(mocker):
mock_entity = mocker.Mock()
mock_entity.provider = "openai"
mock_entity.configurate_methods = ["predefined-model"]
mock_entity.supported_model_types = [ModelType.LLM]
# provider_entity = None
# for provider in provider_entities:
# if provider.provider == "openai":
# provider_entity = provider
mock_entity.model_credential_schema = mocker.Mock()
mock_entity.model_credential_schema.credential_form_schemas = []
# # Mocking the inputs
# provider_model_settings = [
# ProviderModelSetting(
# id="id",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# enabled=True,
# load_balancing_enabled=True,
# )
# ]
# load_balancing_model_configs = [
# LoadBalancingModelConfig(
# id="id1",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="__inherit__",
# encrypted_config=None,
# enabled=True,
# ),
# LoadBalancingModelConfig(
# id="id2",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="first",
# encrypted_config='{"openai_api_key": "fake_key"}',
# enabled=True,
# ),
# ]
# mocker.patch(
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
# )
# provider_manager = ProviderManager()
# # Running the method
# result = provider_manager._to_model_settings(provider_entity,
# provider_model_settings, load_balancing_model_configs)
# # Asserting that the result is as expected
# assert len(result) == 1
# assert isinstance(result[0], ModelSettings)
# assert result[0].model == "gpt-4"
# assert result[0].model_type == ModelType.LLM
# assert result[0].enabled is True
# assert len(result[0].load_balancing_configs) == 2
# assert result[0].load_balancing_configs[0].name == "__inherit__"
# assert result[0].load_balancing_configs[1].name == "first"
return mock_entity
# def test__to_model_settings_only_one_lb(mocker):
# # Get all provider entities
# model_provider_factory = ModelProviderFactory("test_tenant")
# provider_entities = model_provider_factory.get_providers()
def test__to_model_settings(mocker, mock_provider_entity):
# Mocking the inputs
provider_model_settings = [
ProviderModelSetting(
id="id",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
enabled=True,
load_balancing_enabled=True,
)
]
load_balancing_model_configs = [
LoadBalancingModelConfig(
id="id1",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="__inherit__",
encrypted_config=None,
enabled=True,
),
LoadBalancingModelConfig(
id="id2",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="first",
encrypted_config='{"openai_api_key": "fake_key"}',
enabled=True,
),
]
# provider_entity = None
# for provider in provider_entities:
# if provider.provider == "openai":
# provider_entity = provider
mocker.patch(
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
)
# # Mocking the inputs
# provider_model_settings = [
# ProviderModelSetting(
# id="id",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# enabled=True,
# load_balancing_enabled=True,
# )
# ]
# load_balancing_model_configs = [
# LoadBalancingModelConfig(
# id="id1",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="__inherit__",
# encrypted_config=None,
# enabled=True,
# )
# ]
provider_manager = ProviderManager()
# mocker.patch(
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
# )
# Running the method
result = provider_manager._to_model_settings(
provider_entity=mock_provider_entity,
provider_model_settings=provider_model_settings,
load_balancing_model_configs=load_balancing_model_configs,
)
# provider_manager = ProviderManager()
# # Running the method
# result = provider_manager._to_model_settings(
# provider_entity, provider_model_settings, load_balancing_model_configs)
# # Asserting that the result is as expected
# assert len(result) == 1
# assert isinstance(result[0], ModelSettings)
# assert result[0].model == "gpt-4"
# assert result[0].model_type == ModelType.LLM
# assert result[0].enabled is True
# assert len(result[0].load_balancing_configs) == 0
# Asserting that the result is as expected
assert len(result) == 1
assert isinstance(result[0], ModelSettings)
assert result[0].model == "gpt-4"
assert result[0].model_type == ModelType.LLM
assert result[0].enabled is True
assert len(result[0].load_balancing_configs) == 2
assert result[0].load_balancing_configs[0].name == "__inherit__"
assert result[0].load_balancing_configs[1].name == "first"
# def test__to_model_settings_lb_disabled(mocker):
# # Get all provider entities
# model_provider_factory = ModelProviderFactory("test_tenant")
# provider_entities = model_provider_factory.get_providers()
def test__to_model_settings_only_one_lb(mocker, mock_provider_entity):
# Mocking the inputs
provider_model_settings = [
ProviderModelSetting(
id="id",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
enabled=True,
load_balancing_enabled=True,
)
]
load_balancing_model_configs = [
LoadBalancingModelConfig(
id="id1",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="__inherit__",
encrypted_config=None,
enabled=True,
)
]
# provider_entity = None
# for provider in provider_entities:
# if provider.provider == "openai":
# provider_entity = provider
mocker.patch(
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
)
# # Mocking the inputs
# provider_model_settings = [
# ProviderModelSetting(
# id="id",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# enabled=True,
# load_balancing_enabled=False,
# )
# ]
# load_balancing_model_configs = [
# LoadBalancingModelConfig(
# id="id1",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="__inherit__",
# encrypted_config=None,
# enabled=True,
# ),
# LoadBalancingModelConfig(
# id="id2",
# tenant_id="tenant_id",
# provider_name="openai",
# model_name="gpt-4",
# model_type="text-generation",
# name="first",
# encrypted_config='{"openai_api_key": "fake_key"}',
# enabled=True,
# ),
# ]
provider_manager = ProviderManager()
# mocker.patch(
# "core.helper.model_provider_cache.ProviderCredentialsCache.get",
# return_value={"openai_api_key": "fake_key"}
# )
# Running the method
result = provider_manager._to_model_settings(
provider_entity=mock_provider_entity,
provider_model_settings=provider_model_settings,
load_balancing_model_configs=load_balancing_model_configs,
)
# provider_manager = ProviderManager()
# Asserting that the result is as expected
assert len(result) == 1
assert isinstance(result[0], ModelSettings)
assert result[0].model == "gpt-4"
assert result[0].model_type == ModelType.LLM
assert result[0].enabled is True
assert len(result[0].load_balancing_configs) == 0
# # Running the method
# result = provider_manager._to_model_settings(provider_entity,
# provider_model_settings, load_balancing_model_configs)
# # Asserting that the result is as expected
# assert len(result) == 1
# assert isinstance(result[0], ModelSettings)
# assert result[0].model == "gpt-4"
# assert result[0].model_type == ModelType.LLM
# assert result[0].enabled is True
# assert len(result[0].load_balancing_configs) == 0
def test__to_model_settings_lb_disabled(mocker, mock_provider_entity):
# Mocking the inputs
provider_model_settings = [
ProviderModelSetting(
id="id",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
enabled=True,
load_balancing_enabled=False,
)
]
load_balancing_model_configs = [
LoadBalancingModelConfig(
id="id1",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="__inherit__",
encrypted_config=None,
enabled=True,
),
LoadBalancingModelConfig(
id="id2",
tenant_id="tenant_id",
provider_name="openai",
model_name="gpt-4",
model_type="text-generation",
name="first",
encrypted_config='{"openai_api_key": "fake_key"}',
enabled=True,
),
]
mocker.patch(
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
)
provider_manager = ProviderManager()
# Running the method
result = provider_manager._to_model_settings(
provider_entity=mock_provider_entity,
provider_model_settings=provider_model_settings,
load_balancing_model_configs=load_balancing_model_configs,
)
# Asserting that the result is as expected
assert len(result) == 1
assert isinstance(result[0], ModelSettings)
assert result[0].model == "gpt-4"
assert result[0].model_type == ModelType.LLM
assert result[0].enabled is True
assert len(result[0].load_balancing_configs) == 0