Abstract Factory Pattern
Why Abstract Factory for LLM?
Key LLM Use Cases
1. Multi-Provider AI Client Factory
from abc import ABC, abstractmethod
# Abstract products
class LLMClient(ABC):
@abstractmethod
def generate(self, prompt, **kwargs):
pass
@abstractmethod
def stream_generate(self, prompt, **kwargs):
pass
class EmbeddingClient(ABC):
@abstractmethod
def embed_text(self, text):
pass
@abstractmethod
def embed_documents(self, documents):
pass
class ImageClient(ABC):
@abstractmethod
def generate_image(self, prompt):
pass
@abstractmethod
def analyze_image(self, image):
pass
# Abstract factory
class AIProviderFactory(ABC):
@abstractmethod
def create_llm_client(self):
pass
@abstractmethod
def create_embedding_client(self):
pass
@abstractmethod
def create_image_client(self):
pass
# Concrete implementations for OpenAI
class OpenAILLMClient(LLMClient):
def generate(self, prompt, **kwargs):
return openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
**kwargs
)
def stream_generate(self, prompt, **kwargs):
return openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
stream=True,
**kwargs
)
class OpenAIEmbeddingClient(EmbeddingClient):
def embed_text(self, text):
return openai.Embedding.create(
model="text-embedding-ada-002",
input=text
)
def embed_documents(self, documents):
return [self.embed_text(doc) for doc in documents]
class OpenAIImageClient(ImageClient):
def generate_image(self, prompt):
return openai.Image.create(
prompt=prompt,
n=1,
size="1024x1024"
)
def analyze_image(self, image):
return openai.ChatCompletion.create(
model="gpt-4-vision-preview",
messages=[{
"role": "user",
"content": [{"type": "image_url", "image_url": image}]
}]
)
class OpenAIFactory(AIProviderFactory):
def create_llm_client(self):
return OpenAILLMClient()
def create_embedding_client(self):
return OpenAIEmbeddingClient()
def create_image_client(self):
return OpenAIImageClient()
# Concrete implementations for Anthropic
class AnthropicLLMClient(LLMClient):
def generate(self, prompt, **kwargs):
return anthropic.Completion.create(
model="claude-3-opus-20240229",
prompt=f"Human: {prompt}\n\nAssistant:",
**kwargs
)
def stream_generate(self, prompt, **kwargs):
return anthropic.Completion.create(
model="claude-3-opus-20240229",
prompt=f"Human: {prompt}\n\nAssistant:",
stream=True,
**kwargs
)
class AnthropicFactory(AIProviderFactory):
def create_llm_client(self):
return AnthropicLLMClient()
def create_embedding_client(self):
# Anthropic doesn't have embeddings, use OpenAI
return OpenAIEmbeddingClient()
def create_image_client(self):
# Anthropic doesn't have image generation, use OpenAI
return OpenAIImageClient()
# Usage
class AIApplication:
def __init__(self, factory: AIProviderFactory):
self.llm = factory.create_llm_client()
self.embeddings = factory.create_embedding_client()
self.images = factory.create_image_client()
def process_multimodal_request(self, text_prompt, image_url=None):
if image_url:
image_analysis = self.images.analyze_image(image_url)
combined_prompt = f"{text_prompt}\n\nImage analysis: {image_analysis}"
else:
combined_prompt = text_prompt
return self.llm.generate(combined_prompt)
# Easy provider switching
openai_app = AIApplication(OpenAIFactory())
anthropic_app = AIApplication(AnthropicFactory())2. Environment-Based Configuration Factory
3. RAG System Component Factory
4. Testing and Evaluation Factory
Implementation Advantages
1. Provider Independence
2. Configuration Management
3. Domain Specialization
4. Testing and Quality Assurance
Real-World Impact
Last updated