steamship.agents.functional package#

Submodules#

steamship.agents.functional.functions_based module#

class steamship.agents.functional.functions_based.FunctionsBasedAgent(tools: List[Tool], llm: ChatLLM, *, message_selector: MessageSelector = NoMessages(), output_parser: OutputParser, PROMPT: str = 'You are a helpful AI assistant.\n\nNOTE: Some functions return images, video, and audio files. These multimedia files will be represented in messages as\nUUIDs for Steamship Blocks. When responding directly to a user, you SHOULD print the Steamship Blocks for the images,\nvideo, or audio as follows: `Block(UUID for the block)`.\n\nExample response for a request that generated an image:\nHere is the image you requested: Block(288A2CA1-4753-4298-9716-53C1E42B726B).\n\nOnly use the functions you have been provided with.')[source]#

Bases: ChatAgent

Selects actions for AgentService based on OpenAI Function style LLM Prompting.

build_chat_history_for_tool(context: AgentContext) List[Block][source]#
default_system_message() str | None[source]#

The default system message used by Agents to drive LLM instruction.

Non Chat-based Agents should always return None. Chat-based Agents should override this method to provide a default prompt.

next_action(context: AgentContext) Action[source]#
record_action_run(action: Action, context: AgentContext)[source]#

steamship.agents.functional.output_parser module#

class steamship.agents.functional.output_parser.FunctionsBasedOutputParser(*, tools_lookup_dict: Dict[str, Tool] | None = None)[source]#

Bases: OutputParser

parse(text: str, context: AgentContext) Action[source]#

Convert text into an Action object.

tools_lookup_dict: Dict[str, Tool] | None#
steamship.agents.functional.output_parser.is_punctuation(text: str)[source]#

Module contents#

class steamship.agents.functional.FunctionsBasedAgent(tools: List[Tool], llm: ChatLLM, *, message_selector: MessageSelector = NoMessages(), output_parser: OutputParser, PROMPT: str = 'You are a helpful AI assistant.\n\nNOTE: Some functions return images, video, and audio files. These multimedia files will be represented in messages as\nUUIDs for Steamship Blocks. When responding directly to a user, you SHOULD print the Steamship Blocks for the images,\nvideo, or audio as follows: `Block(UUID for the block)`.\n\nExample response for a request that generated an image:\nHere is the image you requested: Block(288A2CA1-4753-4298-9716-53C1E42B726B).\n\nOnly use the functions you have been provided with.')[source]#

Bases: ChatAgent

Selects actions for AgentService based on OpenAI Function style LLM Prompting.

build_chat_history_for_tool(context: AgentContext) List[Block][source]#
default_system_message() str | None[source]#

The default system message used by Agents to drive LLM instruction.

Non Chat-based Agents should always return None. Chat-based Agents should override this method to provide a default prompt.

next_action(context: AgentContext) Action[source]#
record_action_run(action: Action, context: AgentContext)[source]#
class steamship.agents.functional.FunctionsBasedOutputParser(*, tools_lookup_dict: Dict[str, Tool] | None = None)[source]#

Bases: OutputParser

parse(text: str, context: AgentContext) Action[source]#

Convert text into an Action object.

tools_lookup_dict: Dict[str, Tool] | None#