rigging.prompt
Treat empty function signatures as prompts for structured chat interfaces.
DEFAULT_DOC = 'Convert the following inputs to outputs ({func_name}).'
module-attribute
#
Default docstring if none is provided to a prompt function.
DEFAULT_MAX_ROUNDS = 3
module-attribute
#
Default maximum number of rounds for a prompt to run until outputs are parsed.
Ctx(tag: str | None = None, prefix: str | None = None, example: str | Model | None = None)
dataclass
#
Prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]], attempt_recovery: bool = True, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, inputs: list[Input] = list(), output: Output = lambda: ChatOutput(id='chat', context=Ctx())(), watch_callbacks: list[WatchChatCallback] = list(), params: GenerateParams | None = None, _generator_id: str | None = None, _generator: Generator | None = None, _pipeline: ChatPipeline | None = None)
dataclass
#
Bases: Generic[P, R]
Prompts wrap hollow functions and create structured chat interfaces for passing inputs into a ChatPipeline and parsing outputs.
attempt_recovery: bool = True
class-attribute
instance-attribute
#
Whether the prompt should attempt to recover from errors in output parsing.
docstring: str
property
#
The docstring for the prompt function.
drop_dialog: bool = True
class-attribute
instance-attribute
#
When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.
func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]]
instance-attribute
#
The function that the prompt wraps. This function should be a coroutine.
inputs: list[Input] = dataclasses.field(default_factory=list)
class-attribute
instance-attribute
#
The structured input handlers for the prompt.
max_rounds: int = DEFAULT_MAX_ROUNDS
class-attribute
instance-attribute
#
The maximum number of rounds the prompt should try to reparse outputs.
output: Output = dataclasses.field(default_factory=lambda: ChatOutput(id='chat', context=Ctx()))
class-attribute
instance-attribute
#
The structured output handler for the prompt.
params: GenerateParams | None = None
class-attribute
instance-attribute
#
The parameters to be used when generating chats for this prompt.
pipeline: ChatPipeline | None
property
#
If available, the resolved Chat Pipeline for the prompt.
template: str
property
#
The dynamic jinja2 template for the prompt function.
watch_callbacks: list[WatchChatCallback] = dataclasses.field(default_factory=list)
class-attribute
instance-attribute
#
Callbacks to be passed any chats produced while executing this prompt.
bind(other: ChatPipeline | Generator | Chat | str) -> t.Callable[P, t.Coroutine[t.Any, t.Any, R]]
#
Binds the prompt to a pipeline, generator, or chat and returns a scoped run callable.
@rg.prompt
def say_hello(name: str) -> str:
"""Say hello to {{ name }}"""
await say_hello.bind("gpt-3.5-turbo")("the world")
Parameters:
-
other
(ChatPipeline | Generator | Chat | str
) –The pipeline, generator, generator id, or chat to bind to.
Returns:
-
Callable[P, Coroutine[Any, Any, R]]
–A callable for executing this prompt
Source code in rigging/prompt.py
bind_many(other: ChatPipeline | Generator | Chat | str) -> t.Callable[Concatenate[int, P], t.Coroutine[t.Any, t.Any, list[R]]]
#
Binds the prompt to a pipeline, generator, or chat and returns a scoped run_many callable.
@rg.prompt
def say_hello(name: str) -> str:
"""Say hello to {{ name }}"""
await say_hello.bind("gpt-3.5-turbo")(5, "the world")
Parameters:
-
other
(ChatPipeline | Generator | Chat | str
) –The pipeline, generator, generator id, or chat to bind to.
Returns:
-
Callable[Concatenate[int, P], Coroutine[Any, Any, list[R]]]
–A callable for executing this prompt.
Source code in rigging/prompt.py
bind_over(other: ChatPipeline | Generator | Chat | str | None = None) -> t.Callable[Concatenate[t.Sequence[Generator | str], P], t.Coroutine[t.Any, t.Any, list[R]]]
#
Binds the prompt to a pipeline, generator, or chat and returns a scoped run_over callable.
@rg.prompt
def say_hello(name: str) -> str:
"""Say hello to {{ name }}"""
await say_hello.bind("gpt-3.5-turbo")(["gpt-4o", "gpt-4"], "the world")
Parameters:
-
other
(ChatPipeline | Generator | Chat | str | None
, default:None
) –The pipeline, generator, generator id, or chat to bind to.
Returns:
-
Callable[Concatenate[Sequence[Generator | str], P], Coroutine[Any, Any, list[R]]]
–A callable for executing this prompt.
Source code in rigging/prompt.py
clone(*, skip_callbacks: bool = False) -> Prompt[P, R]
#
Creates a deep copy of this prompt.
Parameters:
-
skip_callbacks
(bool
, default:False
) –Whether to skip copying the watch callbacks.
Returns:
-
Prompt[P, R]
–A new instance of the prompt.
Source code in rigging/prompt.py
process(chat: Chat) -> R
#
render(*args: P.args, **kwargs: P.kwargs) -> str
#
Pass the arguments to the jinja2 template and render the full prompt.
Source code in rigging/prompt.py
run(*args: P.args, **kwargs: P.kwargs) -> R
async
#
Use the prompt to run the function with the provided arguments and return the output.
Parameters:
-
*args
(args
, default:()
) –The positional arguments for the prompt function.
-
**kwargs
(kwargs
, default:{}
) –The keyword arguments for the prompt function.
Returns:
-
R
–The output of the prompt function.
Source code in rigging/prompt.py
run_many(count: int, /, *args: P.args, **kwargs: P.kwargs) -> list[R]
async
#
Use the prompt to run the function multiple times with the provided arguments and return the output.
Parameters:
-
count
(int
) –The number of times to run the prompt.
-
*args
(args
, default:()
) –The positional arguments for the prompt function.
-
**kwargs
(kwargs
, default:{}
) –The keyword arguments for the prompt function.
Returns:
-
list[R]
–The outputs of the prompt function.
Source code in rigging/prompt.py
run_over(generators: t.Sequence[Generator | str], /, *args: P.args, **kwargs: P.kwargs) -> list[R]
async
#
Executes the prompt process across multiple generators.
For each generator, a pipeline is created and the generator is replaced before the run call. All callbacks and parameters are preserved.
If this prompt has a pipeline assigned, it will be included in the run.
Warning
The implementation currently skips any failed chats and only processes successful chats. This may change in the future.
Parameters:
-
generators
(Sequence[Generator | str]
) –A sequence of generators to be used for the generation process.
Returns:
-
list[R]
–A list of generatated Chats.
Source code in rigging/prompt.py
set_(attempt_recovery: bool | None = None, drop_dialog: bool | None = None, max_rounds: int | None = None) -> Prompt[P, R]
#
Helper to allow updates to the parsing configuration.
Parameters:
-
attempt_recovery
(bool | None
, default:None
) –Whether the prompt should attempt to recover from errors in output parsing.
-
drop_dialog
(bool | None
, default:None
) –When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.
-
max_rounds
(int | None
, default:None
) –The maximum number of rounds the prompt should try to reparse outputs.
Returns:
-
Prompt[P, R]
–Self
Source code in rigging/prompt.py
watch(*callbacks: WatchChatCallback) -> Prompt[P, R]
#
Registers a callback to monitor any chats produced for this prompt
Parameters:
-
*callbacks
(WatchChatCallback
, default:()
) –The callback functions to be executed.
async def log(chats: list[Chat]) -> None:
...
@rg.prompt()
async def summarize(text: str) -> str:
...
summarize.watch(log)(...)
async def log(chats: list[Chat]) -> None:
...
async def _summarize(text: str) -> str:
...
summarize = rg.prompt(_summarize).watch(log)
Returns:
-
Prompt[P, R]
–Self
Source code in rigging/prompt.py
with_(params: t.Optional[GenerateParams] = None, **kwargs: t.Any) -> Prompt[P, R]
#
Assign specific generation parameter overloads for this prompt.
Parameters:
-
params
(Optional[GenerateParams]
, default:None
) –The parameters to set for the underlying chat pipeline.
-
**kwargs
(Any
, default:{}
) –An alternative way to pass parameters as keyword arguments.
Returns:
-
Prompt[P, R]
–Self
Source code in rigging/prompt.py
prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R] | None = None, /, *, pipeline: ChatPipeline | None = None, generator: Generator | None = None, generator_id: str | None = None) -> t.Callable[[t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R]], Prompt[P, R]] | Prompt[P, R]
#
Convert a hollow function into a Prompt, which can be called directly or passed a chat pipeline to execute the function and parse the outputs.
``` from dataclasses import dataclass import rigging as rg
@dataclass class ExplainedJoke: chat: rg.Chat setup: str punchline: str explanation: str
@rg.prompt(generator_id="gpt-3.5-turbo") async def write_joke(topic: str) -> ExplainedJoke: """Write a joke.""" ...
await write_joke("programming")
Note: A docstring is not required, but this can be used to provide guidance to the model, or even handle any number of input transormations. Any input parameter which is not handled inside the docstring will be automatically added and formatted internally.
Note: Output parameters can be basic types, dataclasses, rigging models, lists, or tuples. Internal inspection will attempt to ensure your output types are valid, but there is no guarantee of complete coverage/safety. It's recommended to check rigging.prompt.Prompt.template to inspect the generated jinja2 template.
Note: If you annotate the return value of the function as a rigging.chat.Chat object, then no output parsing will take place and you can parse objects out manually.
You can also use Chat in any number of type annotation inside tuples or dataclasses.
All instances will be filled with the final chat object transparently.
Note: All input parameters and output types can be annotated with the rigging.prompt.Ctx annotation to provide additional context for the prompt. This can be used to override the xml tag, provide a prefix string, or example content which will be placed inside output xml tags.
In the case of output parameters, especially in tuples, you might have xml tag collisions
between the same basic types. Manually annotating xml tags with [rigging.prompt.Ctx][] is
recommended.
Args: func: The function to convert into a prompt. pipeline: An optional pipeline to use for the prompt. generator: An optional generator to use for the prompt. generator_id: An optional generator id to use for the prompt.
Returns: A prompt instance or a function that can be used to create a prompt.
Source code in rigging/prompt.py
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 |
|