Adapter Pattern
Why Adapter Pattern for LLM?
Key LLM Use Cases
1. Multi-Provider LLM Adapter
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional, List
import json
class LLMResponse:
"""Standardized response format"""
def __init__(self, content: str, model: str, usage: Dict[str, int], metadata: Dict[str, Any] = None):
self.content = content
self.model = model
self.usage = usage # {'input_tokens': int, 'output_tokens': int, 'total_tokens': int}
self.metadata = metadata or {}
class LLMProvider(ABC):
"""Abstract base class for LLM providers"""
@abstractmethod
def generate(self, prompt: str, **kwargs) -> LLMResponse:
pass
@abstractmethod
def get_models(self) -> List[str]:
pass
class OpenAIAdapter(LLMProvider):
"""Adapter for OpenAI API"""
def __init__(self, api_key: str, base_url: str = "https://api.openai.com/v1"):
self.api_key = api_key
self.base_url = base_url
self.models = ["gpt-4", "gpt-3.5-turbo", "gpt-4-turbo"]
def generate(self, prompt: str, model: str = "gpt-3.5-turbo", **kwargs) -> LLMResponse:
"""Generate response using OpenAI API format"""
# Simulate OpenAI API call
openai_request = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": kwargs.get("temperature", 0.7),
"max_tokens": kwargs.get("max_tokens", 1000)
}
# Mock OpenAI response format
openai_response = {
"choices": [{"message": {"content": f"OpenAI {model} response to: {prompt[:50]}..."}}],
"usage": {"prompt_tokens": 20, "completion_tokens": 50, "total_tokens": 70},
"model": model
}
# Adapt to standard format
return LLMResponse(
content=openai_response["choices"][0]["message"]["content"],
model=openai_response["model"],
usage={
"input_tokens": openai_response["usage"]["prompt_tokens"],
"output_tokens": openai_response["usage"]["completion_tokens"],
"total_tokens": openai_response["usage"]["total_tokens"]
},
metadata={"provider": "openai", "request": openai_request}
)
def get_models(self) -> List[str]:
return self.models
class AnthropicAdapter(LLMProvider):
"""Adapter for Anthropic Claude API"""
def __init__(self, api_key: str):
self.api_key = api_key
self.models = ["claude-3-opus", "claude-3-sonnet", "claude-3-haiku"]
def generate(self, prompt: str, model: str = "claude-3-sonnet", **kwargs) -> LLMResponse:
"""Generate response using Anthropic API format"""
# Simulate Anthropic API call
anthropic_request = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": kwargs.get("max_tokens", 1000),
"temperature": kwargs.get("temperature", 0.7)
}
# Mock Anthropic response format
anthropic_response = {
"content": [{"text": f"Claude {model} response to: {prompt[:50]}..."}],
"usage": {"input_tokens": 18, "output_tokens": 55},
"model": model
}
# Adapt to standard format
return LLMResponse(
content=anthropic_response["content"][0]["text"],
model=anthropic_response["model"],
usage={
"input_tokens": anthropic_response["usage"]["input_tokens"],
"output_tokens": anthropic_response["usage"]["output_tokens"],
"total_tokens": anthropic_response["usage"]["input_tokens"] + anthropic_response["usage"]["output_tokens"]
},
metadata={"provider": "anthropic", "request": anthropic_request}
)
def get_models(self) -> List[str]:
return self.models
class GoogleGeminiAdapter(LLMProvider):
"""Adapter for Google Gemini API"""
def __init__(self, api_key: str):
self.api_key = api_key
self.models = ["gemini-pro", "gemini-pro-vision", "gemini-ultra"]
def generate(self, prompt: str, model: str = "gemini-pro", **kwargs) -> LLMResponse:
"""Generate response using Google Gemini API format"""
# Simulate Gemini API call
gemini_request = {
"model": f"models/{model}",
"contents": [{"parts": [{"text": prompt}]}],
"generationConfig": {
"temperature": kwargs.get("temperature", 0.7),
"maxOutputTokens": kwargs.get("max_tokens", 1000)
}
}
# Mock Gemini response format
gemini_response = {
"candidates": [{"content": {"parts": [{"text": f"Gemini {model} response to: {prompt[:50]}..."}]}}],
"usageMetadata": {"promptTokenCount": 15, "candidatesTokenCount": 60, "totalTokenCount": 75},
"modelVersion": model
}
# Adapt to standard format
return LLMResponse(
content=gemini_response["candidates"][0]["content"]["parts"][0]["text"],
model=gemini_response["modelVersion"],
usage={
"input_tokens": gemini_response["usageMetadata"]["promptTokenCount"],
"output_tokens": gemini_response["usageMetadata"]["candidatesTokenCount"],
"total_tokens": gemini_response["usageMetadata"]["totalTokenCount"]
},
metadata={"provider": "google", "request": gemini_request}
)
def get_models(self) -> List[str]:
return self.models
class UnifiedLLMClient:
"""Unified client that uses adapter pattern to support multiple providers"""
def __init__(self):
self.providers: Dict[str, LLMProvider] = {}
self.default_provider = None
def add_provider(self, name: str, provider: LLMProvider, set_as_default: bool = False):
"""Add a new provider adapter"""
self.providers[name] = provider
if set_as_default or self.default_provider is None:
self.default_provider = name
def generate(self, prompt: str, provider: str = None, **kwargs) -> LLMResponse:
"""Generate response using specified or default provider"""
provider_name = provider or self.default_provider
if provider_name not in self.providers:
raise ValueError(f"Provider {provider_name} not found. Available: {list(self.providers.keys())}")
return self.providers[provider_name].generate(prompt, **kwargs)
def get_all_models(self) -> Dict[str, List[str]]:
"""Get all available models from all providers"""
return {name: provider.get_models() for name, provider in self.providers.items()}
# Usage example
def demonstrate_multi_provider_adapter():
"""Demonstrate unified LLM client with multiple providers"""
# Create unified client
llm_client = UnifiedLLMClient()
# Add different provider adapters
llm_client.add_provider("openai", OpenAIAdapter("sk-dummy-key"))
llm_client.add_provider("anthropic", AnthropicAdapter("sk-ant-dummy-key"))
llm_client.add_provider("google", GoogleGeminiAdapter("google-dummy-key"))
# Use same interface for different providers
prompt = "Explain quantum computing in simple terms"
for provider_name in llm_client.providers.keys():
print(f"\nπ Using {provider_name.upper()} provider:")
response = llm_client.generate(prompt, provider=provider_name)
print(f"π Response: {response.content[:100]}...")
print(f"π― Model: {response.model}")
print(f"π Usage: {response.usage}")
print(f"π·οΈ Provider: {response.metadata['provider']}")
# Show all available models
print(f"\nπ Available models: {llm_client.get_all_models()}")2. Legacy System Integration Adapter
3. Data Format Adaptation
Implementation Advantages
1. Interface Unification
2. Legacy System Modernization
3. Extensibility
4. Error Handling and Resilience
Real-World Impact
Last updated