API Reference
This document provides a detailed API reference for the oai2ollama package, covering its core application logic, configuration management, and entry points.
oai2ollama
__all__
module-attribute
__all__ = ['app', 'start']
start
Source code in oai2ollama/__init__.py
| def start():
import uvicorn
uvicorn.run(app, host=env.host, port=11434)
|
oai2ollama.config
Settings
Bases: BaseSettings
Source code in oai2ollama/config.py
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 | class Settings(BaseSettings):
model_config = {
"cli_parse_args": True,
"cli_kebab_case": True,
"cli_ignore_unknown_args": True,
"extra": "ignore",
"cli_shortcuts": {
"capabilities": "c",
"models": "m",
},
}
api_key: str = Field(getenv("OPENAI_API_KEY", ...), description="API key for authentication") # type: ignore
base_url: HttpUrl = Field(getenv("OPENAI_BASE_URL", ...), description="Base URL for the OpenAI-compatible API") # type: ignore
capacities: CliSuppress[list[Literal["tools", "insert", "vision", "embedding", "thinking"]]] = Field([], repr=False)
capabilities: list[Literal["tools", "insert", "vision", "embedding", "thinking"]] = []
host: str = Field("localhost", description="IP / hostname for the API server")
extra_models: list[str] = Field([], description="Extra models to include in the /api/tags response", alias="models")
@model_validator(mode="after")
def _warn_legacy_capacities(self: Self):
if self.capacities:
print("\n Warning: 'capacities' is a previous typo, please use 'capabilities' instead.\n", file=stderr)
self.capabilities.extend(self.capacities)
return self
|
model_config
class-attribute
instance-attribute
model_config = {
"cli_parse_args": True,
"cli_kebab_case": True,
"cli_ignore_unknown_args": True,
"extra": "ignore",
"cli_shortcuts": {"capabilities": "c", "models": "m"},
}
api_key
class-attribute
instance-attribute
api_key: str = Field(
getenv("OPENAI_API_KEY", ...), description="API key for authentication"
)
base_url
class-attribute
instance-attribute
base_url: HttpUrl = Field(
getenv("OPENAI_BASE_URL", ...), description="Base URL for the OpenAI-compatible API"
)
capacities
class-attribute
instance-attribute
capacities: CliSuppress[
list[Literal["tools", "insert", "vision", "embedding", "thinking"]]
] = Field([], repr=False)
capabilities
class-attribute
instance-attribute
capabilities: list[Literal['tools', 'insert', 'vision', 'embedding', 'thinking']] = []
host
class-attribute
instance-attribute
host: str = Field('localhost', description='IP / hostname for the API server')
extra_models: list[str] = Field(
[], description="Extra models to include in the /api/tags response", alias="models"
)
_warn_legacy_capacities
_warn_legacy_capacities()
Source code in oai2ollama/config.py
| @model_validator(mode="after")
def _warn_legacy_capacities(self: Self):
if self.capacities:
print("\n Warning: 'capacities' is a previous typo, please use 'capabilities' instead.\n", file=stderr)
self.capabilities.extend(self.capacities)
return self
|
oai2ollama._app
_new_client
async
Source code in oai2ollama/_app.py
| @Depends
async def _new_client():
from httpx import AsyncClient
async with AsyncClient(base_url=str(env.base_url), headers={"Authorization": f"Bearer {env.api_key}"}, timeout=60, http2=True, follow_redirects=True) as client:
yield client
|
models
async
models(client=_new_client)
Source code in oai2ollama/_app.py
17
18
19
20
21
22
23
24
25
26 | @app.get("/api/tags")
async def models(client=_new_client):
res = await client.get("/models")
res.raise_for_status()
try:
data = res.json()["data"]
except (KeyError, TypeError):
data = []
models_map = {i["id"]: {"name": i["id"], "model": i["id"]} for i in data} | {i: {"name": i, "model": i} for i in env.extra_models}
return {"models": list(models_map.values())}
|
show_model
async
Source code in oai2ollama/_app.py
| @app.post("/api/show")
async def show_model():
return {
"model_info": {"general.architecture": "CausalLM"},
"capabilities": ["completion", *env.capabilities],
}
|
list_models
async
list_models(client=_new_client)
Source code in oai2ollama/_app.py
| @app.get("/v1/models")
async def list_models(client=_new_client):
res = await client.get("/models")
res.raise_for_status()
return res.json()
|
chat_completions
async
chat_completions(request: Request, client=_new_client)
Source code in oai2ollama/_app.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60 | @app.post("/v1/chat/completions")
async def chat_completions(request: Request, client=_new_client):
data = await request.json()
if data.get("stream", False):
async def stream():
async with client.stream("POST", "/chat/completions", json=data) as response:
async for chunk in response.aiter_bytes():
yield chunk
return StreamingResponse(stream(), media_type="text/event-stream")
else:
res = await client.post("/chat/completions", json=data)
res.raise_for_status()
return res.json()
|
ollama_version
async
Source code in oai2ollama/_app.py
| @app.get("/api/version")
async def ollama_version():
return {"version": "0.12.10"}
|