"""Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
chat_fn = completion_to_chat_decorator(self.complete)
return chat_fn(messages, **kwargs)
self, messages: Sequence[ChatMessage], **kwargs: Any
stream_chat_fn = stream_completion_to_chat_decorator(self.stream_complete)
return stream_chat_fn(messages, **kwargs)
messages: Sequence[ChatMessage],
return self.chat(messages, **kwargs)
messages: Sequence[ChatMessage],
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
# NOTE: convert generator to async generator
@llm_completion_callback()
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, **kwargs):
# NOTE: convert generator to async generator
def class_name(cls) -> str: