steamship.data.plugin package#
Submodules#
steamship.data.plugin.hosting module#
- class steamship.data.plugin.hosting.HostingCpu(value)[source]#
-
The amount of CPU required for deployment.
This is mapped to a value dependent on the HostingType it is combined with.
- LG = 'lg'#
- MAX = 'max'#
- MD = 'md'#
- MIN = 'min'#
- SM = 'sm'#
- XL = 'xl'#
- XS = 'xs'#
- XXL = 'xxl'#
- XXS = 'xxs'#
- class steamship.data.plugin.hosting.HostingEnvironment(value)[source]#
-
The software environment required for deployment.
- PYTHON38 = 'python38'#
- STEAMSHIP_PYTORCH_CPU = 'inferenceCpu'#
- class steamship.data.plugin.hosting.HostingMemory(value)[source]#
-
The amount of memory required for deployment.
This is mapped to a value dependent on the HostingType it is combined with.
- LG = 'lg'#
- MAX = 'max'#
- MD = 'md'#
- MIN = 'min'#
- SM = 'sm'#
- XL = 'xl'#
- XS = 'xs'#
- XXL = 'xxl'#
- XXS = 'xxs'#
steamship.data.plugin.index_plugin_instance module#
- class steamship.data.plugin.index_plugin_instance.EmbedderInvocation(*, pluginHandle: str, instanceHandle: Optional[str] = None, config: Optional[Dict[str, Any]] = None, version: Optional[str] = None, fetchIfExists: bool = True)[source]#
Bases:
CamelModel
The parameters capable of creating/fetching an Embedder (Tagger) Plugin Instance.
- class steamship.data.plugin.index_plugin_instance.EmbeddingIndexPluginInstance(*, client: Client = None, id: str = None, handle: str = None, pluginId: str = None, pluginVersionId: str = None, pluginHandle: Optional[str] = None, pluginVersionHandle: Optional[str] = None, workspaceId: Optional[str] = None, userId: str = None, config: Dict[str, Any] = None, hostingType: Optional[HostingType] = None, hostingCpu: Optional[HostingCpu] = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingEnvironment: Optional[HostingEnvironment] = None, initStatus: Optional[InvocableInitStatus] = None, embedder: PluginInstance = None, index: EmbeddingIndex = None)[source]#
Bases:
PluginInstance
A persistent, read-optimized index over embeddings.
This is currently implemented as an object which behaves like a PluginInstance even though it isn’t from an implementation perspective on the back-end.
- static create(client: Any, plugin_id: Optional[str] = None, plugin_handle: Optional[str] = None, plugin_version_id: Optional[str] = None, plugin_version_handle: Optional[str] = None, handle: Optional[str] = None, fetch_if_exists: bool = True, config: Optional[Dict[str, Any]] = None) EmbeddingIndexPluginInstance [source]#
Create a class that simulates an embedding index re-implemented as a PluginInstance.
- delete()[source]#
Delete the EmbeddingIndexPluginInstnace.
For now, we will have this correspond to deleting the index but not the embedder. This is likely a temporary design.
- embedder: PluginInstance#
- index: EmbeddingIndex#
- class steamship.data.plugin.index_plugin_instance.SearchResult(*, tag: Optional[Tag] = None, score: Optional[float] = None)[source]#
Bases:
CamelModel
A single scored search result – which is always a tag.
This class is intended to eventually replace the QueryResult object currently used with the Embedding layer.
- static from_query_result(query_result: QueryResult) SearchResult [source]#
- class steamship.data.plugin.index_plugin_instance.SearchResults(*, items: List[SearchResult] = None)[source]#
Bases:
CamelModel
Results of a search operation – which is always a list of ranked tag.
This class is intended to eventually replace the QueryResults object currently used with the Embedding layer. TODO: add in paging support.
- static from_query_results(query_results: QueryResults) SearchResults [source]#
- items: List[SearchResult]#
steamship.data.plugin.plugin module#
- class steamship.data.plugin.plugin.CreatePluginRequest(*, trainingPlatform: Optional[HostingType] = None, id: str = None, type: str = None, transport: str = None, isPublic: bool = None, handle: str = None, description: str = None, metadata: str = None, fetchIfExists: bool = False)[source]#
Bases:
Request
- training_platform: Optional[HostingType]#
- class steamship.data.plugin.plugin.ListPluginsRequest(*, pageSize: Optional[int] = None, pageToken: Optional[str] = None, sortOrder: Optional[SortOrder] = SortOrder.DESC, type: Optional[str] = None)[source]#
Bases:
ListRequest
- class steamship.data.plugin.plugin.ListPluginsResponse(*, nextPageToken: Optional[str] = None, plugins: List[Plugin])[source]#
Bases:
ListResponse
- class steamship.data.plugin.plugin.Plugin(*, client: Client = None, id: str = None, type: str = None, transport: str = None, isPublic: bool = None, trainingPlatform: Optional[HostingType] = None, handle: str = None, description: str = None, metadata: str = None, profile: Optional[Manifest] = None, readme: Optional[str] = None, userId: Optional[str] = None)[source]#
Bases:
CamelModel
- static create(client: Client, description: str, type_: str, transport: str, is_public: bool, handle: Optional[str] = None, training_platform: Optional[HostingType] = None, metadata: Optional[Union[str, Dict, List]] = None, fetch_if_exists: bool = False) Plugin [source]#
- static list(client: Client, t: Optional[str] = None, page_size: Optional[int] = None, page_token: Optional[str] = None, sort_order: Optional[SortOrder] = SortOrder.DESC) ListPluginsResponse [source]#
- training_platform: Optional[HostingType]#
- class steamship.data.plugin.plugin.PluginAdapterType(value)[source]#
-
An enumeration.
- huggingface = 'huggingface'#
- openai = 'openai'#
- steamship_docker = 'steamshipDocker'#
- steamship_sagemaker = 'steamshipSagemaker'#
- class steamship.data.plugin.plugin.PluginTargetType(value)[source]#
-
An enumeration.
- FILE = 'file'#
- WORKSPACE = 'workspace'#
- class steamship.data.plugin.plugin.PluginType(value)[source]#
-
An enumeration.
- classifier = 'classifier'#
- embedder = 'embedder'#
- generator = 'generator'#
- parser = 'parser'#
- tagger = 'tagger'#
steamship.data.plugin.plugin_instance module#
- class steamship.data.plugin.plugin_instance.CreatePluginInstanceRequest(*, id: str = None, pluginId: str = None, pluginHandle: str = None, pluginVersionId: str = None, pluginVersionHandle: str = None, handle: str = None, fetchIfExists: bool = None, config: Dict[str, Any] = None)[source]#
Bases:
Request
- class steamship.data.plugin.plugin_instance.PluginInstance(*, client: Client = None, id: str = None, handle: str = None, pluginId: str = None, pluginVersionId: str = None, pluginHandle: Optional[str] = None, pluginVersionHandle: Optional[str] = None, workspaceId: Optional[str] = None, userId: str = None, config: Dict[str, Any] = None, hostingType: Optional[HostingType] = None, hostingCpu: Optional[HostingCpu] = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingEnvironment: Optional[HostingEnvironment] = None, initStatus: Optional[InvocableInitStatus] = None)[source]#
Bases:
CamelModel
- static create(client: Client, plugin_id: Optional[str] = None, plugin_handle: Optional[str] = None, plugin_version_id: Optional[str] = None, plugin_version_handle: Optional[str] = None, handle: Optional[str] = None, fetch_if_exists: bool = True, config: Optional[Dict[str, Any]] = None) PluginInstance [source]#
Create a plugin instance
When handle is empty the engine will automatically assign one fetch_if_exists controls whether we want to re-use an existing plugin instance or not.
- delete() PluginInstance [source]#
- generate(input_file_id: str = None, input_file_start_block_index: int = None, input_file_end_block_index: Optional[int] = None, input_file_block_index_list: Optional[List[int]] = None, text: Optional[str] = None, block_query: Optional[str] = None, append_output_to_file: bool = False, output_file_id: Optional[str] = None, make_output_public: Optional[bool] = None, options: Optional[dict] = None) Task[GenerateResponse] [source]#
See GenerateRequest for description of parameter options
- static get(client: Client, handle: str) PluginInstance [source]#
- get_training_parameters(training_request: TrainingParameterPluginInput) TrainingParameterPluginOutput [source]#
- hosting_cpu: Optional[HostingCpu]#
- hosting_environment: Optional[HostingEnvironment]#
- hosting_memory: Optional[HostingMemory]#
- hosting_timeout: Optional[HostingTimeout]#
- hosting_type: Optional[HostingType]#
- init_status: Optional[InvocableInitStatus]#
- train(training_request: TrainingParameterPluginInput = None, training_epochs: Optional[int] = None, export_query: Optional[str] = None, testing_holdout_percent: Optional[float] = None, test_split_seed: Optional[int] = None, training_params: Optional[Dict] = None, inference_params: Optional[Dict] = None) Task[TrainPluginOutput] [source]#
Train a plugin instance. Please provide either training_request OR the other parameters; passing training_request ignores all other parameters, but is kept for backwards compatibility.
steamship.data.plugin.plugin_version module#
- class steamship.data.plugin.plugin_version.CreatePluginVersionRequest(*, pluginId: str = None, handle: str = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingHandler: str = None, isPublic: bool = None, isDefault: bool = None, type: str = 'file', configTemplate: str = None)[source]#
Bases:
Request
- hosting_memory: Optional[HostingMemory]#
- hosting_timeout: Optional[HostingTimeout]#
- class steamship.data.plugin.plugin_version.ListPluginVersionsRequest(*, handle: str, pluginId: str)[source]#
Bases:
Request
- class steamship.data.plugin.plugin_version.ListPluginVersionsResponse(*, plugins: List[PluginVersion])[source]#
Bases:
Response
- plugins: List[PluginVersion]#
- class steamship.data.plugin.plugin_version.PluginVersion(*, client: Client = None, id: str = None, pluginId: str = None, handle: str = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingHandler: str = None, isPublic: bool = None, isDefault: bool = None, configTemplate: Dict[str, Any] = None)[source]#
Bases:
CamelModel
- static create(client: Client, handle: str, plugin_id: str = None, filename: str = None, filebytes: bytes = None, hosting_memory: Optional[HostingMemory] = None, hosting_timeout: Optional[HostingTimeout] = None, hosting_handler: str = None, is_public: bool = None, is_default: bool = None, config_template: Dict[str, Any] = None) Task[PluginVersion] [source]#
- hosting_memory: Optional[HostingMemory]#
- hosting_timeout: Optional[HostingTimeout]#
steamship.data.plugin.prompt_generation_plugin_instance module#
- class steamship.data.plugin.prompt_generation_plugin_instance.PromptGenerationPluginInstance(*, client: Client = None, id: str = None, handle: str = None, pluginId: str = None, pluginVersionId: str = None, pluginHandle: Optional[str] = None, pluginVersionHandle: Optional[str] = None, workspaceId: Optional[str] = None, userId: str = None, config: Dict[str, Any] = None, hostingType: Optional[HostingType] = None, hostingCpu: Optional[HostingCpu] = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingEnvironment: Optional[HostingEnvironment] = None, initStatus: Optional[InvocableInitStatus] = None)[source]#
Bases:
PluginInstance
An instance of a configured prompt completion service such as GPT-3.
The generate method synchronously invokes the prompt against a set of variables that parameterize it. The return value is a single string.
- Example Usage:
llm = Steamship.use(‘prompt-generation-default’, config={ “temperature”: 0.9 }) PROMPT = “Greet {name} as if he were a {relation}.” greeting = llm.generate(PROMPT, {“name”: “Ted”, “relation”: “old friend”})
- static create(client: Client, plugin_id: Optional[str] = None, plugin_handle: Optional[str] = None, plugin_version_id: Optional[str] = None, plugin_version_handle: Optional[str] = None, handle: Optional[str] = None, fetch_if_exists: bool = True, config: Optional[Dict[str, Any]] = None) PromptGenerationPluginInstance [source]#
Create a plugin instance
When handle is empty the engine will automatically assign one fetch_if_exists controls whether we want to re-use an existing plugin instance or not.
Module contents#
- class steamship.data.plugin.HostingCpu(value)[source]#
-
The amount of CPU required for deployment.
This is mapped to a value dependent on the HostingType it is combined with.
- LG = 'lg'#
- MAX = 'max'#
- MD = 'md'#
- MIN = 'min'#
- SM = 'sm'#
- XL = 'xl'#
- XS = 'xs'#
- XXL = 'xxl'#
- XXS = 'xxs'#
- class steamship.data.plugin.HostingEnvironment(value)[source]#
-
The software environment required for deployment.
- PYTHON38 = 'python38'#
- STEAMSHIP_PYTORCH_CPU = 'inferenceCpu'#
- class steamship.data.plugin.HostingMemory(value)[source]#
-
The amount of memory required for deployment.
This is mapped to a value dependent on the HostingType it is combined with.
- LG = 'lg'#
- MAX = 'max'#
- MD = 'md'#
- MIN = 'min'#
- SM = 'sm'#
- XL = 'xl'#
- XS = 'xs'#
- XXL = 'xxl'#
- XXS = 'xxs'#
- class steamship.data.plugin.HostingTimeout(value)[source]#
-
The request timeout required for deployment.
This is mapped to a value dependent on the HostingType it is combined with.
- LG = 'lg'#
- MAX = 'max'#
- MD = 'md'#
- MIN = 'min'#
- SM = 'sm'#
- XL = 'xl'#
- XS = 'xs'#
- XXL = 'xxl'#
- XXS = 'xxs'#
- class steamship.data.plugin.HostingType(value)[source]#
-
The type of hosting provider to deploy to.
- ECS = 'ecs'#
- LAMBDA = 'lambda'#
- class steamship.data.plugin.Plugin(*, client: Client = None, id: str = None, type: str = None, transport: str = None, isPublic: bool = None, trainingPlatform: Optional[HostingType] = None, handle: str = None, description: str = None, metadata: str = None, profile: Optional[Manifest] = None, readme: Optional[str] = None, userId: Optional[str] = None)[source]#
Bases:
CamelModel
- static create(client: Client, description: str, type_: str, transport: str, is_public: bool, handle: Optional[str] = None, training_platform: Optional[HostingType] = None, metadata: Optional[Union[str, Dict, List]] = None, fetch_if_exists: bool = False) Plugin [source]#
- static list(client: Client, t: Optional[str] = None, page_size: Optional[int] = None, page_token: Optional[str] = None, sort_order: Optional[SortOrder] = SortOrder.DESC) ListPluginsResponse [source]#
- training_platform: Optional[HostingType]#
- class steamship.data.plugin.PluginAdapterType(value)[source]#
-
An enumeration.
- huggingface = 'huggingface'#
- openai = 'openai'#
- steamship_docker = 'steamshipDocker'#
- steamship_sagemaker = 'steamshipSagemaker'#
- class steamship.data.plugin.PluginInstance(*, client: Client = None, id: str = None, handle: str = None, pluginId: str = None, pluginVersionId: str = None, pluginHandle: Optional[str] = None, pluginVersionHandle: Optional[str] = None, workspaceId: Optional[str] = None, userId: str = None, config: Dict[str, Any] = None, hostingType: Optional[HostingType] = None, hostingCpu: Optional[HostingCpu] = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingEnvironment: Optional[HostingEnvironment] = None, initStatus: Optional[InvocableInitStatus] = None)[source]#
Bases:
CamelModel
- static create(client: Client, plugin_id: Optional[str] = None, plugin_handle: Optional[str] = None, plugin_version_id: Optional[str] = None, plugin_version_handle: Optional[str] = None, handle: Optional[str] = None, fetch_if_exists: bool = True, config: Optional[Dict[str, Any]] = None) PluginInstance [source]#
Create a plugin instance
When handle is empty the engine will automatically assign one fetch_if_exists controls whether we want to re-use an existing plugin instance or not.
- delete() PluginInstance [source]#
- generate(input_file_id: str = None, input_file_start_block_index: int = None, input_file_end_block_index: Optional[int] = None, input_file_block_index_list: Optional[List[int]] = None, text: Optional[str] = None, block_query: Optional[str] = None, append_output_to_file: bool = False, output_file_id: Optional[str] = None, make_output_public: Optional[bool] = None, options: Optional[dict] = None) Task[GenerateResponse] [source]#
See GenerateRequest for description of parameter options
- static get(client: Client, handle: str) PluginInstance [source]#
- get_training_parameters(training_request: TrainingParameterPluginInput) TrainingParameterPluginOutput [source]#
- hosting_cpu: Optional[HostingCpu]#
- hosting_environment: Optional[HostingEnvironment]#
- hosting_memory: Optional[HostingMemory]#
- hosting_timeout: Optional[HostingTimeout]#
- hosting_type: Optional[HostingType]#
- init_status: Optional[InvocableInitStatus]#
- train(training_request: TrainingParameterPluginInput = None, training_epochs: Optional[int] = None, export_query: Optional[str] = None, testing_holdout_percent: Optional[float] = None, test_split_seed: Optional[int] = None, training_params: Optional[Dict] = None, inference_params: Optional[Dict] = None) Task[TrainPluginOutput] [source]#
Train a plugin instance. Please provide either training_request OR the other parameters; passing training_request ignores all other parameters, but is kept for backwards compatibility.
- class steamship.data.plugin.PluginTargetType(value)[source]#
-
An enumeration.
- FILE = 'file'#
- WORKSPACE = 'workspace'#
- class steamship.data.plugin.PluginType(value)[source]#
-
An enumeration.
- classifier = 'classifier'#
- embedder = 'embedder'#
- generator = 'generator'#
- parser = 'parser'#
- tagger = 'tagger'#
- class steamship.data.plugin.PluginVersion(*, client: Client = None, id: str = None, pluginId: str = None, handle: str = None, hostingMemory: Optional[HostingMemory] = None, hostingTimeout: Optional[HostingTimeout] = None, hostingHandler: str = None, isPublic: bool = None, isDefault: bool = None, configTemplate: Dict[str, Any] = None)[source]#
Bases:
CamelModel
- static create(client: Client, handle: str, plugin_id: str = None, filename: str = None, filebytes: bytes = None, hosting_memory: Optional[HostingMemory] = None, hosting_timeout: Optional[HostingTimeout] = None, hosting_handler: str = None, is_public: bool = None, is_default: bool = None, config_template: Dict[str, Any] = None) Task[PluginVersion] [source]#
- hosting_memory: Optional[HostingMemory]#
- hosting_timeout: Optional[HostingTimeout]#