Compatibility Guide
Learn how to migrate from other AI providers and ensure compatibility with existing tools and frameworks when using DeepSeek API.
Overview
DeepSeek API is designed to be compatible with:
- OpenAI API: Drop-in replacement for most use cases
- Popular SDKs: Works with existing OpenAI-compatible libraries
- Development Tools: Compatible with major AI development frameworks
- Enterprise Systems: Integrates with existing infrastructure
OpenAI API Compatibility
Supported Endpoints
DeepSeek API supports the following OpenAI-compatible endpoints:
Endpoint | Status | Notes |
---|---|---|
/v1/chat/completions | ✅ Full Support | Complete compatibility |
/v1/completions | ✅ Full Support | Legacy completion format |
/v1/models | ✅ Full Support | Lists available models |
/v1/embeddings | ⚠️ Planned | Coming soon |
/v1/files | ⚠️ Planned | For batch processing |
/v1/batches | ⚠️ Planned | Batch API support |
Migration from OpenAI
Simple Migration
python
# Before (OpenAI)
from openai import OpenAI
client = OpenAI(api_key="sk-...")
# After (DeepSeek)
from openai import OpenAI
client = OpenAI(
api_key="YOUR_DEEPSEEK_API_KEY",
base_url="https://api.deepseek.com/v1"
)
# The rest of your code remains the same!
response = client.chat.completions.create(
model="deepseek-chat", # Just change the model name
messages=[
{"role": "user", "content": "Hello, world!"}
]
)
Environment Variable Migration
bash
# Before
export OPENAI_API_KEY="sk-..."
# After
export OPENAI_API_KEY="YOUR_DEEPSEEK_API_KEY"
export OPENAI_BASE_URL="https://api.deepseek.com/v1"
python
# Your existing code works without changes
import os
from openai import OpenAI
client = OpenAI() # Automatically uses environment variables
Configuration File Migration
python
# config.py
import os
# Flexible configuration for multiple providers
API_CONFIGS = {
"deepseek": {
"api_key": os.getenv("DEEPSEEK_API_KEY"),
"base_url": "https://api.deepseek.com/v1",
"models": {
"chat": "deepseek-chat",
"code": "deepseek-coder",
"math": "deepseek-math"
}
},
"openai": {
"api_key": os.getenv("OPENAI_API_KEY"),
"base_url": "https://api.openai.com/v1",
"models": {
"chat": "gpt-4",
"code": "gpt-4",
"math": "gpt-4"
}
}
}
def get_client(provider="deepseek"):
config = API_CONFIGS[provider]
return OpenAI(
api_key=config["api_key"],
base_url=config["base_url"]
), config["models"]
# Usage
client, models = get_client("deepseek")
response = client.chat.completions.create(
model=models["chat"],
messages=[{"role": "user", "content": "Hello!"}]
)
SDK Compatibility
Python SDK
OpenAI Python Library
python
# Works out of the box
from openai import OpenAI
client = OpenAI(
api_key="YOUR_DEEPSEEK_API_KEY",
base_url="https://api.deepseek.com/v1"
)
# All OpenAI SDK features work
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "Hello!"}],
stream=True,
temperature=0.7,
max_tokens=100
)
for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
LangChain Integration
python
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage
# DeepSeek with LangChain
llm = ChatOpenAI(
model="deepseek-chat",
openai_api_key="YOUR_DEEPSEEK_API_KEY",
openai_api_base="https://api.deepseek.com/v1",
temperature=0.7
)
# Use as normal
response = llm([HumanMessage(content="Hello, world!")])
print(response.content)
LlamaIndex Integration
python
from llama_index.llms.openai import OpenAI as LlamaOpenAI
from llama_index.core import Settings
# Configure DeepSeek for LlamaIndex
Settings.llm = LlamaOpenAI(
model="deepseek-chat",
api_key="YOUR_DEEPSEEK_API_KEY",
api_base="https://api.deepseek.com/v1"
)
# Use with LlamaIndex
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What is this document about?")
print(response)
Node.js SDK
OpenAI Node.js Library
javascript
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: 'YOUR_DEEPSEEK_API_KEY',
baseURL: 'https://api.deepseek.com/v1',
});
// All features work as expected
const completion = await openai.chat.completions.create({
model: 'deepseek-chat',
messages: [{ role: 'user', content: 'Hello, world!' }],
stream: true,
});
for await (const chunk of completion) {
if (chunk.choices[0]?.delta?.content) {
process.stdout.write(chunk.choices[0].delta.content);
}
}
Express.js Integration
javascript
import express from 'express';
import OpenAI from 'openai';
const app = express();
app.use(express.json());
const openai = new OpenAI({
apiKey: process.env.DEEPSEEK_API_KEY,
baseURL: 'https://api.deepseek.com/v1',
});
app.post('/chat', async (req, res) => {
try {
const { message } = req.body;
const completion = await openai.chat.completions.create({
model: 'deepseek-chat',
messages: [{ role: 'user', content: message }],
});
res.json({
response: completion.choices[0].message.content
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
app.listen(3000, () => {
console.log('Server running on port 3000');
});
Go SDK
go
package main
import (
"context"
"fmt"
"log"
"github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultConfig("YOUR_DEEPSEEK_API_KEY")
config.BaseURL = "https://api.deepseek.com/v1"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: "deepseek-chat",
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello, world!",
},
},
},
)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.Choices[0].Message.Content)
}
Framework Compatibility
Streamlit Integration
python
import streamlit as st
from openai import OpenAI
# Initialize DeepSeek client
@st.cache_resource
def get_client():
return OpenAI(
api_key=st.secrets["DEEPSEEK_API_KEY"],
base_url="https://api.deepseek.com/v1"
)
client = get_client()
st.title("DeepSeek Chat App")
# Chat interface
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
response = client.chat.completions.create(
model="deepseek-chat",
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
)
response_placeholder = st.empty()
full_response = ""
for chunk in response:
if chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
response_placeholder.markdown(full_response + "▌")
response_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
Gradio Integration
python
import gradio as gr
from openai import OpenAI
client = OpenAI(
api_key="YOUR_DEEPSEEK_API_KEY",
base_url="https://api.deepseek.com/v1"
)
def chat_with_deepseek(message, history):
messages = []
for human, assistant in history:
messages.append({"role": "user", "content": human})
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": message})
response = client.chat.completions.create(
model="deepseek-chat",
messages=messages,
stream=True
)
partial_message = ""
for chunk in response:
if chunk.choices[0].delta.content:
partial_message += chunk.choices[0].delta.content
yield partial_message
# Create Gradio interface
demo = gr.ChatInterface(
chat_with_deepseek,
title="DeepSeek Chat",
description="Chat with DeepSeek AI models"
)
if __name__ == "__main__":
demo.launch()
FastAPI Integration
python
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from openai import OpenAI
import asyncio
from typing import List
app = FastAPI(title="DeepSeek API Wrapper")
client = OpenAI(
api_key="YOUR_DEEPSEEK_API_KEY",
base_url="https://api.deepseek.com/v1"
)
class ChatMessage(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
messages: List[ChatMessage]
model: str = "deepseek-chat"
temperature: float = 0.7
max_tokens: int = 1000
class ChatResponse(BaseModel):
content: str
model: str
usage: dict
@app.post("/chat", response_model=ChatResponse)
async def chat_completion(request: ChatRequest):
try:
# Convert to OpenAI format
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
# Make async call
response = await asyncio.to_thread(
client.chat.completions.create,
model=request.model,
messages=messages,
temperature=request.temperature,
max_tokens=request.max_tokens
)
return ChatResponse(
content=response.choices[0].message.content,
model=response.model,
usage=response.usage.dict()
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/models")
async def list_models():
try:
models = await asyncio.to_thread(client.models.list)
return {"models": [model.id for model in models.data]}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
Model Mapping
OpenAI to DeepSeek Model Mapping
OpenAI Model | DeepSeek Equivalent | Use Case |
---|---|---|
gpt-4 | deepseek-chat | General conversation |
gpt-4-turbo | deepseek-chat | Advanced reasoning |
gpt-3.5-turbo | deepseek-chat | Fast responses |
code-davinci-002 | deepseek-coder | Code generation |
text-davinci-003 | deepseek-chat | Text completion |
Automatic Model Mapping
python
class ModelMapper:
"""Automatically map OpenAI models to DeepSeek equivalents"""
MAPPING = {
"gpt-4": "deepseek-chat",
"gpt-4-turbo": "deepseek-chat",
"gpt-4-turbo-preview": "deepseek-chat",
"gpt-3.5-turbo": "deepseek-chat",
"gpt-3.5-turbo-16k": "deepseek-chat",
"code-davinci-002": "deepseek-coder",
"text-davinci-003": "deepseek-chat",
"text-davinci-002": "deepseek-chat",
}
@classmethod
def map_model(cls, openai_model: str) -> str:
"""Map OpenAI model to DeepSeek equivalent"""
return cls.MAPPING.get(openai_model, "deepseek-chat")
@classmethod
def create_client_with_mapping(cls, api_key: str):
"""Create client with automatic model mapping"""
original_client = OpenAI(
api_key=api_key,
base_url="https://api.deepseek.com/v1"
)
# Wrap the create method to auto-map models
original_create = original_client.chat.completions.create
def create_with_mapping(**kwargs):
if "model" in kwargs:
kwargs["model"] = cls.map_model(kwargs["model"])
return original_create(**kwargs)
original_client.chat.completions.create = create_with_mapping
return original_client
# Usage
client = ModelMapper.create_client_with_mapping("YOUR_DEEPSEEK_API_KEY")
# This will automatically use deepseek-chat instead of gpt-4
response = client.chat.completions.create(
model="gpt-4", # Automatically mapped to deepseek-chat
messages=[{"role": "user", "content": "Hello!"}]
)
Parameter Compatibility
Supported Parameters
Parameter | Support | Notes |
---|---|---|
model | ✅ | Use DeepSeek model names |
messages | ✅ | Full compatibility |
temperature | ✅ | 0.0 to 2.0 |
max_tokens | ✅ | Up to model limit |
top_p | ✅ | 0.0 to 1.0 |
frequency_penalty | ✅ | -2.0 to 2.0 |
presence_penalty | ✅ | -2.0 to 2.0 |
stop | ✅ | Up to 4 sequences |
stream | ✅ | Full streaming support |
functions | ✅ | Function calling |
function_call | ✅ | Function calling |
tools | ✅ | Tool calling |
tool_choice | ✅ | Tool selection |
response_format | ✅ | JSON mode |
seed | ⚠️ | Limited support |
logit_bias | ❌ | Not supported |
user | ❌ | Not supported |
Parameter Migration Helper
python
def migrate_parameters(openai_params: dict) -> dict:
"""Migrate OpenAI parameters to DeepSeek compatible format"""
# Parameters to remove (not supported)
unsupported = ["logit_bias", "user"]
# Parameters to adjust
deepseek_params = openai_params.copy()
# Remove unsupported parameters
for param in unsupported:
deepseek_params.pop(param, None)
# Map model names
if "model" in deepseek_params:
deepseek_params["model"] = ModelMapper.map_model(deepseek_params["model"])
# Validate temperature range
if "temperature" in deepseek_params:
temp = deepseek_params["temperature"]
deepseek_params["temperature"] = max(0.0, min(2.0, temp))
# Validate top_p range
if "top_p" in deepseek_params:
top_p = deepseek_params["top_p"]
deepseek_params["top_p"] = max(0.0, min(1.0, top_p))
return deepseek_params
# Usage
openai_params = {
"model": "gpt-4",
"messages": [{"role": "user", "content": "Hello!"}],
"temperature": 0.7,
"logit_bias": {"50256": -100}, # Will be removed
"user": "user123" # Will be removed
}
deepseek_params = migrate_parameters(openai_params)
response = client.chat.completions.create(**deepseek_params)
Error Handling Compatibility
Error Code Mapping
python
class ErrorHandler:
"""Handle errors with OpenAI compatibility"""
ERROR_MAPPING = {
401: "invalid_api_key",
429: "rate_limit_exceeded",
400: "invalid_request_error",
500: "api_error",
503: "service_unavailable"
}
@classmethod
def handle_error(cls, error):
"""Handle errors in OpenAI-compatible format"""
if hasattr(error, 'status_code'):
error_type = cls.ERROR_MAPPING.get(error.status_code, "unknown_error")
return {
"error": {
"type": error_type,
"code": error.status_code,
"message": str(error),
"param": None
}
}
return {
"error": {
"type": "unknown_error",
"message": str(error)
}
}
# Usage with error handling
try:
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "Hello!"}]
)
except Exception as e:
error_info = ErrorHandler.handle_error(e)
print(f"Error: {error_info}")
Testing Compatibility
Compatibility Test Suite
python
import pytest
from openai import OpenAI
class TestDeepSeekCompatibility:
"""Test suite for DeepSeek API compatibility"""
@pytest.fixture
def client(self):
return OpenAI(
api_key="YOUR_DEEPSEEK_API_KEY",
base_url="https://api.deepseek.com/v1"
)
def test_basic_chat_completion(self, client):
"""Test basic chat completion compatibility"""
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "Hello!"}]
)
assert response.choices[0].message.content
assert response.model
assert response.usage.total_tokens > 0
def test_streaming_compatibility(self, client):
"""Test streaming response compatibility"""
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "Count to 5"}],
stream=True
)
chunks = list(response)
assert len(chunks) > 0
assert any(chunk.choices[0].delta.content for chunk in chunks)
def test_function_calling_compatibility(self, client):
"""Test function calling compatibility"""
functions = [
{
"name": "get_weather",
"description": "Get weather information",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
]
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
functions=functions,
function_call="auto"
)
assert response.choices[0].message
def test_parameter_compatibility(self, client):
"""Test parameter compatibility"""
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "Hello!"}],
temperature=0.7,
max_tokens=100,
top_p=0.9,
frequency_penalty=0.1,
presence_penalty=0.1,
stop=["END"]
)
assert response.choices[0].message.content
assert len(response.choices[0].message.content) <= 100 * 4 # Rough token estimate
# Run tests
if __name__ == "__main__":
pytest.main([__file__])
Migration Checklist
Pre-Migration
- [ ] Inventory current OpenAI API usage
- [ ] Identify unsupported features
- [ ] Plan model mapping strategy
- [ ] Set up DeepSeek API account
- [ ] Test compatibility with sample requests
During Migration
- [ ] Update API keys and base URLs
- [ ] Map model names to DeepSeek equivalents
- [ ] Remove unsupported parameters
- [ ] Update error handling if needed
- [ ] Test all critical functionality
Post-Migration
- [ ] Monitor API usage and costs
- [ ] Validate response quality
- [ ] Update documentation
- [ ] Train team on new models
- [ ] Set up monitoring and alerts
Best Practices
Gradual Migration
python
class GradualMigration:
"""Gradually migrate from OpenAI to DeepSeek"""
def __init__(self, openai_key: str, deepseek_key: str, migration_percentage: float = 0.1):
self.openai_client = OpenAI(api_key=openai_key)
self.deepseek_client = OpenAI(
api_key=deepseek_key,
base_url="https://api.deepseek.com/v1"
)
self.migration_percentage = migration_percentage
def create_completion(self, **kwargs):
"""Route requests based on migration percentage"""
import random
if random.random() < self.migration_percentage:
# Use DeepSeek
kwargs["model"] = ModelMapper.map_model(kwargs.get("model", "gpt-3.5-turbo"))
return self.deepseek_client.chat.completions.create(**kwargs)
else:
# Use OpenAI
return self.openai_client.chat.completions.create(**kwargs)
# Usage
migrator = GradualMigration(
openai_key="sk-...",
deepseek_key="YOUR_DEEPSEEK_API_KEY",
migration_percentage=0.2 # 20% of requests to DeepSeek
)
response = migrator.create_completion(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)
A/B Testing
python
class ABTester:
"""A/B test OpenAI vs DeepSeek responses"""
def __init__(self, openai_key: str, deepseek_key: str):
self.openai_client = OpenAI(api_key=openai_key)
self.deepseek_client = OpenAI(
api_key=deepseek_key,
base_url="https://api.deepseek.com/v1"
)
def compare_responses(self, messages: list, model: str = "gpt-3.5-turbo"):
"""Compare responses from both providers"""
# OpenAI response
openai_response = self.openai_client.chat.completions.create(
model=model,
messages=messages
)
# DeepSeek response
deepseek_model = ModelMapper.map_model(model)
deepseek_response = self.deepseek_client.chat.completions.create(
model=deepseek_model,
messages=messages
)
return {
"openai": {
"content": openai_response.choices[0].message.content,
"usage": openai_response.usage.dict(),
"model": openai_response.model
},
"deepseek": {
"content": deepseek_response.choices[0].message.content,
"usage": deepseek_response.usage.dict(),
"model": deepseek_response.model
}
}
# Usage
tester = ABTester("sk-...", "YOUR_DEEPSEEK_API_KEY")
comparison = tester.compare_responses([
{"role": "user", "content": "Explain quantum computing"}
])
print("OpenAI:", comparison["openai"]["content"][:100])
print("DeepSeek:", comparison["deepseek"]["content"][:100])
Troubleshooting
Common Migration Issues
- Model not found: Update model names to DeepSeek equivalents
- Parameter errors: Remove unsupported parameters
- Authentication errors: Verify API key and base URL
- Rate limiting: Adjust request frequency
- Response format differences: Update parsing logic if needed
Debug Helper
python
def debug_compatibility_issue(error, request_params):
"""Debug compatibility issues"""
print(f"Error: {error}")
print(f"Request params: {request_params}")
# Check common issues
if "model" in request_params:
model = request_params["model"]
if model.startswith("gpt-"):
print(f"Suggestion: Map '{model}' to DeepSeek equivalent")
if "logit_bias" in request_params:
print("Issue: logit_bias not supported, remove this parameter")
if "user" in request_params:
print("Issue: user parameter not supported, remove this parameter")
# Suggest fixes
fixed_params = migrate_parameters(request_params)
print(f"Suggested params: {fixed_params}")
# Usage
try:
response = client.chat.completions.create(**params)
except Exception as e:
debug_compatibility_issue(e, params)