Skip to content

rigging.prompt

Treat empty function signatures as prompts for structured chat interfaces.

DEFAULT_DOC = 'Convert the following inputs to outputs ({func_name}).' module-attribute #

Default docstring if none is provided to a prompt function.

DEFAULT_MAX_ROUNDS = 3 module-attribute #

Default maximum number of rounds for a prompt to run until outputs are parsed.

Ctx(tag: str | None = None, prefix: str | None = None, example: str | Model | None = None) dataclass #

Used in type annotations to provide additional context for the prompt construction.

You can use this annotation on inputs and ouputs to prompt functions.

tag_override = Annotated[str, Ctx(tag="custom_tag", ...)]

Prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | None = None, attempt_recovery: bool = True, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, inputs: list[Input] = list(), output: Output = lambda: ChatOutput(id='chat', context=Ctx())(), watch_callbacks: list[WatchChatCallback] = list(), params: GenerateParams | None = None, api_tools: list[ApiTool] = list(), _generator_id: str | None = None, _generator: Generator | None = None, _pipeline: ChatPipeline | None = None, _docstring: str | None = None) dataclass #

Bases: Generic[P, R]

Prompts wrap hollow functions and create structured chat interfaces for passing inputs into a ChatPipeline and parsing outputs.

api_tools: list[ApiTool] = dataclasses.field(default_factory=list) class-attribute instance-attribute #

The API tools to be made available when generating chats for this prompt.

attempt_recovery: bool = True class-attribute instance-attribute #

Whether the prompt should attempt to recover from errors in output parsing.

docstring: str property #

The docstring for the prompt function.

drop_dialog: bool = True class-attribute instance-attribute #

When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.

func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | None = None class-attribute instance-attribute #

The function that the prompt was derived from.

inputs: list[Input] = dataclasses.field(default_factory=list) class-attribute instance-attribute #

The structured input handlers for the prompt.

max_rounds: int = DEFAULT_MAX_ROUNDS class-attribute instance-attribute #

The maximum number of rounds the prompt should try to reparse outputs.

output: Output = dataclasses.field(default_factory=lambda: ChatOutput(id='chat', context=Ctx())) class-attribute instance-attribute #

The structured output handler for the prompt.

params: GenerateParams | None = None class-attribute instance-attribute #

The parameters to be used when generating chats for this prompt.

pipeline: ChatPipeline | None property #

If available, the resolved Chat Pipeline for the prompt.

template: str property #

The dynamic jinja2 template for the prompt function.

watch_callbacks: list[WatchChatCallback] = dataclasses.field(default_factory=list) class-attribute instance-attribute #

Callbacks to be passed any chats produced while executing this prompt.

bind(other: ChatPipeline | Generator | Chat | str) -> t.Callable[P, t.Coroutine[t.Any, t.Any, R]] #

Binds the prompt to a pipeline, generator, or chat and returns a scoped run callable.

@rg.prompt
def say_hello(name: str) -> str:
    """Say hello to {{ name }}"""

await say_hello.bind("gpt-3.5-turbo")("the world")

Parameters:

Returns:

  • Callable[P, Coroutine[Any, Any, R]]

    A callable for executing this prompt

Source code in rigging/prompt.py
def bind(self, other: ChatPipeline | Generator | Chat | str) -> t.Callable[P, t.Coroutine[t.Any, t.Any, R]]:
    """
    Binds the prompt to a pipeline, generator, or chat and returns a scoped run callable.

    ```
    @rg.prompt
    def say_hello(name: str) -> str:
        \"""Say hello to {{ name }}\"""

    await say_hello.bind("gpt-3.5-turbo")("the world")
    ```

    Args:
        other: The pipeline, generator, generator id, or chat to bind to.

    Returns:
        A callable for executing this prompt
    """
    pipeline = self._resolve_to_pipeline(other)
    if pipeline.on_failed == "skip":
        raise NotImplementedError(
            "pipeline.on_failed='skip' cannot be used for prompt methods that return one object"
        )

    async def run(*args: P.args, **kwargs: P.kwargs) -> R:
        results = await self.bind_many(pipeline)(1, *args, **kwargs)
        return results[0]

    run.__rg_prompt__ = self  # type: ignore

    return run

bind_many(other: ChatPipeline | Generator | Chat | str) -> t.Callable[Concatenate[int, P], t.Coroutine[t.Any, t.Any, list[R]]] #

Binds the prompt to a pipeline, generator, or chat and returns a scoped run_many callable.

@rg.prompt
def say_hello(name: str) -> str:
    """Say hello to {{ name }}"""

await say_hello.bind("gpt-3.5-turbo")(5, "the world")

Parameters:

Returns:

  • Callable[Concatenate[int, P], Coroutine[Any, Any, list[R]]]

    A callable for executing this prompt.

Source code in rigging/prompt.py
def bind_many(
    self, other: ChatPipeline | Generator | Chat | str
) -> t.Callable[Concatenate[int, P], t.Coroutine[t.Any, t.Any, list[R]]]:
    """
    Binds the prompt to a pipeline, generator, or chat and returns a scoped run_many callable.

    ```
    @rg.prompt
    def say_hello(name: str) -> str:
        \"""Say hello to {{ name }}\"""

    await say_hello.bind("gpt-3.5-turbo")(5, "the world")
    ```

    Args:
        other: The pipeline, generator, generator id, or chat to bind to.

    Returns:
        A callable for executing this prompt.
    """
    pipeline = self._resolve_to_pipeline(other)
    if pipeline.on_failed == "include" and not isinstance(self.output, ChatOutput):
        raise NotImplementedError("pipeline.on_failed='include' cannot be used with prompts that process outputs")

    async def run_many(count: int, /, *args: P.args, **kwargs: P.kwargs) -> list[R]:
        with tracer.span(
            "Prompt {name}()" if count == 1 else "Prompt {name}() (x{count})",
            count=count,
            name=get_qualified_name(self.func) if self.func else "<generated>",
            arguments=self._bind_args(*args, **kwargs),
        ) as span:
            content = self.render(*args, **kwargs)
            _pipeline = (
                pipeline.fork(content)
                .using_api_tools(*self.api_tools)
                .until(
                    self._until_parsed,
                    attempt_recovery=self.attempt_recovery,
                    drop_dialog=self.drop_dialog,
                    max_rounds=self.max_rounds,
                )
                .with_(self.params)
            )
            chats = await _pipeline.run_many(count)

            # TODO: I can't remember why we don't just pass the watch_callbacks to the pipeline
            # Maybe it has something to do with uniqueness and merging?

            def wrap_watch_callback(callback: WatchChatCallback) -> WatchChatCallback:
                async def traced_watch_callback(chats: list[Chat]) -> None:
                    with tracer.span(
                        "Watch with {callback}()", callback=get_qualified_name(callback), chat_count=len(chats)
                    ):
                        await callback(chats)

                return traced_watch_callback

            coros = [
                wrap_watch_callback(watch)(chats)
                for watch in self.watch_callbacks
                if watch not in pipeline.watch_callbacks
            ]
            await asyncio.gather(*coros)

            results = [self.process(chat) for chat in chats]
            span.set_attribute("results", results)
            return results

    run_many.__rg_prompt__ = self  # type: ignore

    return run_many

bind_over(other: ChatPipeline | Generator | Chat | str | None = None) -> t.Callable[Concatenate[t.Sequence[Generator | str], P], t.Coroutine[t.Any, t.Any, list[R]]] #

Binds the prompt to a pipeline, generator, or chat and returns a scoped run_over callable.

@rg.prompt
def say_hello(name: str) -> str:
    """Say hello to {{ name }}"""

await say_hello.bind("gpt-3.5-turbo")(["gpt-4o", "gpt-4"], "the world")

Parameters:

  • other (ChatPipeline | Generator | Chat | str | None, default: None ) –

    The pipeline, generator, generator id, or chat to bind to.

Returns:

  • Callable[Concatenate[Sequence[Generator | str], P], Coroutine[Any, Any, list[R]]]

    A callable for executing this prompt.

Source code in rigging/prompt.py
def bind_over(
    self, other: ChatPipeline | Generator | Chat | str | None = None
) -> t.Callable[Concatenate[t.Sequence[Generator | str], P], t.Coroutine[t.Any, t.Any, list[R]]]:
    """
    Binds the prompt to a pipeline, generator, or chat and returns a scoped run_over callable.

    ```
    @rg.prompt
    def say_hello(name: str) -> str:
        \"""Say hello to {{ name }}\"""

    await say_hello.bind("gpt-3.5-turbo")(["gpt-4o", "gpt-4"], "the world")
    ```

    Args:
        other: The pipeline, generator, generator id, or chat to bind to.

    Returns:
        A callable for executing this prompt.
    """
    include_original = other is not None

    if other is None:
        pipeline = get_generator("base!base").chat().catch(on_failed="skip")  # TODO: Clean this up
    else:
        pipeline = self._resolve_to_pipeline(other)

    if pipeline.on_failed == "include" and not isinstance(self.output, ChatOutput):
        raise NotImplementedError("pipeline.on_failed='include' cannot be used with prompts that process outputs")

    async def run_over(generators: t.Sequence[Generator | str], /, *args: P.args, **kwargs: P.kwargs) -> list[R]:
        content = self.render(*args, **kwargs)
        _pipeline = (
            pipeline.fork(content)
            .using_api_tools(*self.api_tools)
            .until(
                self._until_parsed,
                attempt_recovery=self.attempt_recovery,
                drop_dialog=self.drop_dialog,
                max_rounds=self.max_rounds,
            )
            .with_(self.params)
        )
        chats = await _pipeline.run_over(*generators, include_original=include_original)

        coros = [watch(chats) for watch in self.watch_callbacks if watch not in pipeline.watch_callbacks]
        await asyncio.gather(*coros)

        return [self.process(chat) for chat in chats]

    run_over.__rg_prompt__ = self  # type: ignore

    return run_over

clone(*, skip_callbacks: bool = False) -> Prompt[P, R] #

Creates a deep copy of this prompt.

Parameters:

  • skip_callbacks (bool, default: False ) –

    Whether to skip copying the watch callbacks.

Returns:

  • Prompt[P, R]

    A new instance of the prompt.

Source code in rigging/prompt.py
def clone(self, *, skip_callbacks: bool = False) -> Prompt[P, R]:
    """
    Creates a deep copy of this prompt.

    Args:
        skip_callbacks: Whether to skip copying the watch callbacks.

    Returns:
        A new instance of the prompt.
    """
    new = Prompt(
        func=self.func,
        _pipeline=self.pipeline,
        params=self.params.model_copy() if self.params is not None else None,
        attempt_recovery=self.attempt_recovery,
        drop_dialog=self.drop_dialog,
        max_rounds=self.max_rounds,
    )
    if not skip_callbacks:
        new.watch_callbacks = self.watch_callbacks.copy()
    return new

process(chat: Chat) -> R #

Attempt to parse the output from a chat into the expected return type.

Source code in rigging/prompt.py
def process(self, chat: Chat) -> R:
    """
    Attempt to parse the output from a chat into the expected return type.
    """
    return self.output.from_chat(chat)  # type: ignore

render(*args: P.args, **kwargs: P.kwargs) -> str #

Pass the arguments to the jinja2 template and render the full prompt.

Source code in rigging/prompt.py
def render(self, *args: P.args, **kwargs: P.kwargs) -> str:
    """
    Pass the arguments to the jinja2 template and render the full prompt.
    """

    env = Environment(
        trim_blocks=True,
        lstrip_blocks=True,
        keep_trailing_newline=True,
        undefined=StrictUndefined,
    )
    jinja_template = env.from_string(self.template)

    if self.func is None:
        return jinja_template.render()

    bound_args = self._bind_args(*args, **kwargs)

    for input_ in self.inputs:
        bound_args[to_snake(input_.tag)] = input_.to_xml(bound_args[input_.name])

    return jinja_template.render(**bound_args)

run(*args: P.args, **kwargs: P.kwargs) -> R async #

Use the prompt to run the function with the provided arguments and return the output.

Parameters:

  • *args (args, default: () ) –

    The positional arguments for the prompt function.

  • **kwargs (kwargs, default: {} ) –

    The keyword arguments for the prompt function.

Returns:

  • R

    The output of the prompt function.

Source code in rigging/prompt.py
async def run(self, *args: P.args, **kwargs: P.kwargs) -> R:
    """
    Use the prompt to run the function with the provided arguments and return the output.

    Args:
        *args: The positional arguments for the prompt function.
        **kwargs: The keyword arguments for the prompt function.

    Returns:
        The output of the prompt function.
    """
    if self.pipeline is None:
        raise RuntimeError(
            "Prompt cannot be executed as a standalone function without being assigned a pipeline or generator"
        )
    return await self.bind(self.pipeline)(*args, **kwargs)

run_many(count: int, /, *args: P.args, **kwargs: P.kwargs) -> list[R] async #

Use the prompt to run the function multiple times with the provided arguments and return the output.

Parameters:

  • count (int) –

    The number of times to run the prompt.

  • *args (args, default: () ) –

    The positional arguments for the prompt function.

  • **kwargs (kwargs, default: {} ) –

    The keyword arguments for the prompt function.

Returns:

  • list[R]

    The outputs of the prompt function.

Source code in rigging/prompt.py
async def run_many(self, count: int, /, *args: P.args, **kwargs: P.kwargs) -> list[R]:
    """
    Use the prompt to run the function multiple times with the provided arguments and return the output.

    Args:
        count: The number of times to run the prompt.
        *args: The positional arguments for the prompt function.
        **kwargs: The keyword arguments for the prompt function.

    Returns:
        The outputs of the prompt function.
    """
    if self.pipeline is None:
        raise RuntimeError(
            "Prompt cannot be executed as a standalone function without being assigned a pipeline or generator"
        )
    return await self.bind_many(self.pipeline)(count, *args, **kwargs)

run_over(generators: t.Sequence[Generator | str], /, *args: P.args, **kwargs: P.kwargs) -> list[R] async #

Executes the prompt process across multiple generators.

For each generator, a pipeline is created and the generator is replaced before the run call. All callbacks and parameters are preserved.

If this prompt has a pipeline assigned, it will be included in the run.

Warning

The implementation currently skips any failed chats and only processes successful chats. This may change in the future.

Parameters:

  • generators (Sequence[Generator | str]) –

    A sequence of generators to be used for the generation process.

Returns:

  • list[R]

    A list of generatated Chats.

Source code in rigging/prompt.py
async def run_over(self, generators: t.Sequence[Generator | str], /, *args: P.args, **kwargs: P.kwargs) -> list[R]:
    """
    Executes the prompt process across multiple generators.

    For each generator, a pipeline is created and the generator is replaced
    before the run call. All callbacks and parameters are preserved.

    If this prompt has a pipeline assigned, it will be included in the run.

    Warning:
        The implementation currently skips any failed chats and only
        processes successful chats. This may change in the future.

    Parameters:
        generators: A sequence of generators to be used for the generation process.

    Returns:
        A list of generatated Chats.
    """
    return await self.bind_over(self.pipeline)(generators, *args, **kwargs)

set_(attempt_recovery: bool | None = None, drop_dialog: bool | None = None, max_rounds: int | None = None) -> Prompt[P, R] #

Helper to allow updates to the parsing configuration.

Parameters:

  • attempt_recovery (bool | None, default: None ) –

    Whether the prompt should attempt to recover from errors in output parsing.

  • drop_dialog (bool | None, default: None ) –

    When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.

  • max_rounds (int | None, default: None ) –

    The maximum number of rounds the prompt should try to reparse outputs.

Returns:

Source code in rigging/prompt.py
def set_(
    self, attempt_recovery: bool | None = None, drop_dialog: bool | None = None, max_rounds: int | None = None
) -> Prompt[P, R]:
    """
    Helper to allow updates to the parsing configuration.

    Args:
        attempt_recovery: Whether the prompt should attempt to recover from errors in output parsing.
        drop_dialog: When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.
        max_rounds: The maximum number of rounds the prompt should try to reparse outputs.

    Returns:
        Self
    """
    self.attempt_recovery = attempt_recovery or self.attempt_recovery
    self.drop_dialog = drop_dialog or self.drop_dialog
    self.max_rounds = max_rounds or self.max_rounds
    return self

watch(*callbacks: WatchChatCallback) -> Prompt[P, R] #

Registers a callback to monitor any chats produced for this prompt

Parameters:

  • *callbacks (WatchChatCallback, default: () ) –

    The callback functions to be executed.

async def log(chats: list[Chat]) -> None:
    ...

@rg.prompt()
async def summarize(text: str) -> str:
    ...

summarize.watch(log)(...)
or
async def log(chats: list[Chat]) -> None:
    ...

async def _summarize(text: str) -> str:
    ...

summarize = rg.prompt(_summarize).watch(log)

Returns:

Source code in rigging/prompt.py
def watch(self, *callbacks: WatchChatCallback) -> Prompt[P, R]:
    """
    Registers a callback to monitor any chats produced for this prompt

    Args:
        *callbacks: The callback functions to be executed.

    ```
    async def log(chats: list[Chat]) -> None:
        ...

    @rg.prompt()
    async def summarize(text: str) -> str:
        ...

    summarize.watch(log)(...)
    ```
    or
    ```
    async def log(chats: list[Chat]) -> None:
        ...

    async def _summarize(text: str) -> str:
        ...

    summarize = rg.prompt(_summarize).watch(log)
    ```

    Returns:
        Self
    """
    for callback in callbacks:
        if callback not in self.watch_callbacks:
            self.watch_callbacks.append(callback)
    return self

with_(params: t.Optional[GenerateParams] = None, **kwargs: t.Any) -> Prompt[P, R] #

Assign specific generation parameter overloads for this prompt.

Parameters:

  • params (Optional[GenerateParams], default: None ) –

    The parameters to set for the underlying chat pipeline.

  • **kwargs (Any, default: {} ) –

    An alternative way to pass parameters as keyword arguments.

Returns:

Source code in rigging/prompt.py
def with_(self, params: t.Optional[GenerateParams] = None, **kwargs: t.Any) -> Prompt[P, R]:
    """
    Assign specific generation parameter overloads for this prompt.

    Args:
        params: The parameters to set for the underlying chat pipeline.
        **kwargs: An alternative way to pass parameters as keyword arguments.

    Returns:
        Self
    """
    self.params = params if params is not None else GenerateParams(**kwargs)
    return self

make_prompt(content: str, return_type: type[R] | None = None, *, ctx: Ctx | None = None) -> Prompt[..., R] | Prompt[..., str] #

Create a prompt at runtime from a basic string and return type (experimental).

import rigging as rg

write_joke = rg.make_prompt("Write a joke.", ctx=rg.Ctx(tag="joke"))

await write_joke.bind("gpt-4o-mini")()
Note

Adding input parameters is not currently supported. Instead use the rigging.prompt.prompt decorator.

Parameters:

  • content (str) –

    The docstring content for the prompt.

  • return_type (type[R] | None, default: None ) –

    The return type of the prompt function.

  • ctx (Ctx | None, default: None ) –

    Context for the return type (Use this instead of Annotated for better type hints).

Returns:

  • Prompt[..., R] | Prompt[..., str]

    The constructed Prompt

Source code in rigging/prompt.py
def make_prompt(
    content: str, return_type: type[R] | None = None, *, ctx: Ctx | None = None
) -> Prompt[..., R] | Prompt[..., str]:
    """
    Create a prompt at runtime from a basic string and return type (experimental).

    ```
    import rigging as rg

    write_joke = rg.make_prompt("Write a joke.", ctx=rg.Ctx(tag="joke"))

    await write_joke.bind("gpt-4o-mini")()
    ```

    Note:
        Adding input parameters is not currently supported. Instead use
        the [rigging.prompt.prompt][] decorator.

    Args:
        content: The docstring content for the prompt.
        return_type: The return type of the prompt function.
        ctx: Context for the return type (Use this instead of Annotated for better type hints).

    Returns:
        The constructed Prompt
    """
    return_type = return_type or str  # type: ignore
    output = parse_output(t.Annotated[return_type, ctx] if ctx is not None else return_type, "make_prompt(<return>)")
    return Prompt(output=output, _docstring=content)

prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R] | None = None, /, *, pipeline: ChatPipeline | None = None, generator: Generator | None = None, generator_id: str | None = None, tools: list[ApiTool | t.Callable[..., t.Any]] | None = None) -> t.Callable[[t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R]], Prompt[P, R]] | Prompt[P, R] #

Convert a hollow function into a Prompt, which can be called directly or passed a chat pipeline to execute the function and parse the outputs.

from dataclasses import dataclass
import rigging as rg

@dataclass
class ExplainedJoke:
    chat: rg.Chat
    setup: str
    punchline: str
    explanation: str

@rg.prompt(generator_id="gpt-3.5-turbo")
async def write_joke(topic: str) -> ExplainedJoke:
    """Write a joke."""
    ...

await write_joke("programming")
Note

A docstring is not required, but this can be used to provide guidance to the model, or even handle any number of input transormations. Any input parameter which is not handled inside the docstring will be automatically added and formatted internally.

Note

Output parameters can be basic types, dataclasses, rigging models, lists, or tuples. Internal inspection will attempt to ensure your output types are valid, but there is no guarantee of complete coverage/safety. It's recommended to check rigging.prompt.Prompt.template to inspect the generated jinja2 template.

Note

If you annotate the return value of the function as a rigging.chat.Chat object, then no output parsing will take place and you can parse objects out manually.

You can also use Chat in any number of type annotation inside tuples or dataclasses. All instances will be filled with the final chat object transparently.

Note

All input parameters and output types can be annotated with the rigging.prompt.Ctx annotation to provide additional context for the prompt. This can be used to override the xml tag, provide a prefix string, or example content which will be placed inside output xml tags.

In the case of output parameters, especially in tuples, you might have xml tag collisions between the same basic types. Manually annotating xml tags with rigging.prompt.Ctx is recommended.

Parameters:

  • func (Callable[P, Coroutine[Any, Any, R]] | Callable[P, R] | None, default: None ) –

    The function to convert into a prompt.

  • pipeline (ChatPipeline | None, default: None ) –

    An optional pipeline to use for the prompt.

  • generator (Generator | None, default: None ) –

    An optional generator to use for the prompt.

  • generator_id (str | None, default: None ) –

    An optional generator id to use for the prompt.

  • tools (list[ApiTool | Callable[..., Any]] | None, default: None ) –

    An optional list of API tools to make available to the prompt (Native tools are not currently supported).

Returns:

  • Callable[[Callable[P, Coroutine[Any, Any, R]] | Callable[P, R]], Prompt[P, R]] | Prompt[P, R]

    A prompt instance or a function that can be used to create a prompt.

Source code in rigging/prompt.py
def prompt(
    func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R] | None = None,
    /,
    *,
    pipeline: ChatPipeline | None = None,
    generator: Generator | None = None,
    generator_id: str | None = None,
    tools: list[ApiTool | t.Callable[..., t.Any]] | None = None,
) -> t.Callable[[t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R]], Prompt[P, R]] | Prompt[P, R]:
    """
    Convert a hollow function into a Prompt, which can be called directly or passed a
    chat pipeline to execute the function and parse the outputs.

    ```
    from dataclasses import dataclass
    import rigging as rg

    @dataclass
    class ExplainedJoke:
        chat: rg.Chat
        setup: str
        punchline: str
        explanation: str

    @rg.prompt(generator_id="gpt-3.5-turbo")
    async def write_joke(topic: str) -> ExplainedJoke:
        \"""Write a joke.\"""
        ...

    await write_joke("programming")
    ```

    Note:
        A docstring is not required, but this can be used to provide guidance to the model, or
        even handle any number of input transormations. Any input parameter which is not
        handled inside the docstring will be automatically added and formatted internally.

    Note:
        Output parameters can be basic types, dataclasses, rigging models, lists, or tuples.
        Internal inspection will attempt to ensure your output types are valid, but there is
        no guarantee of complete coverage/safety. It's recommended to check
        [rigging.prompt.Prompt.template][] to inspect the generated jinja2 template.

    Note:
        If you annotate the return value of the function as a [rigging.chat.Chat][] object,
        then no output parsing will take place and you can parse objects out manually.

        You can also use Chat in any number of type annotation inside tuples or dataclasses.
        All instances will be filled with the final chat object transparently.

    Note:
        All input parameters and output types can be annotated with the [rigging.prompt.Ctx][] annotation
        to provide additional context for the prompt. This can be used to override the xml tag, provide
        a prefix string, or example content which will be placed inside output xml tags.

        In the case of output parameters, especially in tuples, you might have xml tag collisions
        between the same basic types. Manually annotating xml tags with [rigging.prompt.Ctx][] is
        recommended.

    Args:
        func: The function to convert into a prompt.
        pipeline: An optional pipeline to use for the prompt.
        generator: An optional generator to use for the prompt.
        generator_id: An optional generator id to use for the prompt.
        tools: An optional list of API tools to make available to the prompt (Native tools are not currently supported).

    Returns:
        A prompt instance or a function that can be used to create a prompt.
    """
    if sum(arg is not None for arg in (pipeline, generator, generator_id)) > 1:
        raise ValueError("Only one of pipeline, generator, or generator_id can be provided")

    def make_prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R]) -> Prompt[P, R]:
        return Prompt[P, R](
            func=func,  # type: ignore
            _generator_id=generator_id,
            _pipeline=pipeline,
            _generator=generator,
            api_tools=[tool if isinstance(tool, ApiTool) else ApiTool(tool) for tool in tools] if tools else [],
        )

    if func is not None:
        return make_prompt(func)
    return make_prompt