From 2587849fea3b7cc58692a27e169882598e765327 Mon Sep 17 00:00:00 2001 From: Carlos Coelho <80289056+carlosrcoelho@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:55:54 -0300 Subject: [PATCH] Update docs (#1567) * Add new documentation files and update package dependencies * Refactor tweak application logic in process_tweaks function * Add dynamic function creation and execution helpers * Refactor build method to be asynchronous * Add FlowToolComponent to handle flows as tools * Update RunFlowComponent to include a method for updating build config * Fix duplicated first layer results * Refactor vertex building and streaming endpoints * Add base_name attribute to Vertex class * Refactor flow.py to generate dynamic flow functions and build schemas * Refactor FlowToolComponent in FlowTool.py * Add JSONInputComponent to load JSON object as input * Update render_tool_description method in XMLAgent.py * Refactor XMLAgentComponent.render_tool_description() method * Refactor SearchApi.py to include typing and handle empty records * Refactor SearchApi class to simplify code * Add SearchApi and SearchApiTool components * Refactor ServiceFactory and Dependencies (#1560) * Update dependencies for OpenTelemetry * Update service dependency logic and add first version of telemetry service * Remove telemetry service and related code * Update cache service references * Refactor imports in env.py * Refactor code for initializing services and socketio server * Refactor parameterComponent to use inline button_text * Refactor build_vertex method and add RunnableVerticesManager class * Add import statement and update build_vertex function * Add import statement for SettingsService in MonitorServiceFactory.create() method * Refactor build_schema_from_inputs to use display_name and description for field names and descriptions respectively * Refactor graph building and running logic * Update input type mappings and function arguments * Update default values for input types in flow.py * Remove console.log statement in flowStore.ts * Add vertices_to_run field to VerticesOrderResponse * Add input_value parameter to chain components * Refactor CSVAgent build method to include handle_parse_errors parameter * Add agent_type parameter to CSVAgent build method * Update model imports in component files * Add LCAgentComponent and XMLAgentComponent * Add "agents" category to NATIVE_CATEGORIES * Refactor model.py to support chat models * Add system_message parameter to model components * Update CSVAgent.py: handle_parsing_errors and agent_type options * Add ping animation to update button * Fix encryption and decryption of API keys * Update CSVAgentComponent constructor * Refactor inputs parameter to inputs_dict in build_vertex function * Removes "component" table and drops "flowstyle" table * Delete component model and init files * Removes "flowstyle" table and drops "user" table index * Add typing import to CohereModel.py * Fix ShareModal rendering issue * Update models docs * Changed vector-stores docs * Update component documentation * Add AstraDB and AstraDBSearch components for AstraDB Vector Store docs * Rename GetNotified to Listen * Update GetNotifiedComponent import * Remove unused imports in flow-runner.mdx and features.mdx * Add new documentation files and update existing files * Update package versions in package-lock.json * Remove unused files * Delete run-flow.mdx file * Update topics * Add new file run-flow.mdx --------- Co-authored-by: Gabriel Luiz Freitas Almeida Co-authored-by: anovazzi1 --- docs/docs/components/models.mdx | 464 +++++++++++++ docs/docs/components/prompts.mdx | 2 +- docs/docs/components/vector-stores.mdx | 638 +++++++++++++++++- docs/docs/examples/flow-runner.mdx | 6 +- docs/docs/guidelines/features.mdx | 11 +- docs/docs/guides/compatibility.mdx | 0 .../component-status-and-data-passing.mdx | 0 .../guides/connecting-output-components.mdx | 0 docs/docs/guides/custom-component.mdx | 0 docs/docs/guides/experimental-components.mdx | 0 docs/docs/guides/flow-of-data.mdx | 0 docs/docs/guides/global-variables.mdx | 0 docs/docs/guides/inputs-and-outputs.mdx | 0 docs/docs/guides/multiple-flows.mdx | 0 .../guides/new-categories-and-components.mdx | 0 .../docs/guides/passing-tweaks-and-inputs.mdx | 0 .../renaming-and-editing-components.mdx | 0 docs/docs/guides/run-flow.mdx | 0 .../guides/sidebar-and-interaction-panel.mdx | 0 docs/docs/guides/state-management.mdx | 0 docs/docs/guides/supported-frameworks.mdx | 0 docs/docs/guides/text-and-record.mdx | 0 docs/docs/whats-new/customization-control.mdx | 1 + docs/docs/whats-new/debugging-reimagined.mdx | 1 + .../whats-new/migrating-to-one-point-zero.mdx | 124 ++++ .../simplification-standardization.mdx | 1 + docs/package-lock.json | 441 ++++++------ docs/package.json | 10 +- docs/sidebars.js | 38 +- .../{GetNotified.py => Listen.py} | 6 +- .../components/experimental/__init__.py | 2 +- src/backend/base/langflow/services/manager.py | 4 +- src/backend/langflow/base/agents/__init__.py | 0 src/backend/langflow/base/agents/agent.py | 70 ++ src/backend/langflow/base/models/__init__.py | 3 + src/backend/langflow/base/models/model.py | 48 ++ .../components/experimental/FlowTool.py | 85 +++ .../components/tools/SearchAPITool.py | 37 + 38 files changed, 1740 insertions(+), 252 deletions(-) create mode 100644 docs/docs/guides/compatibility.mdx create mode 100644 docs/docs/guides/component-status-and-data-passing.mdx create mode 100644 docs/docs/guides/connecting-output-components.mdx create mode 100644 docs/docs/guides/custom-component.mdx create mode 100644 docs/docs/guides/experimental-components.mdx create mode 100644 docs/docs/guides/flow-of-data.mdx create mode 100644 docs/docs/guides/global-variables.mdx create mode 100644 docs/docs/guides/inputs-and-outputs.mdx create mode 100644 docs/docs/guides/multiple-flows.mdx create mode 100644 docs/docs/guides/new-categories-and-components.mdx create mode 100644 docs/docs/guides/passing-tweaks-and-inputs.mdx create mode 100644 docs/docs/guides/renaming-and-editing-components.mdx create mode 100644 docs/docs/guides/run-flow.mdx create mode 100644 docs/docs/guides/sidebar-and-interaction-panel.mdx create mode 100644 docs/docs/guides/state-management.mdx create mode 100644 docs/docs/guides/supported-frameworks.mdx create mode 100644 docs/docs/guides/text-and-record.mdx create mode 100644 docs/docs/whats-new/customization-control.mdx create mode 100644 docs/docs/whats-new/debugging-reimagined.mdx create mode 100644 docs/docs/whats-new/migrating-to-one-point-zero.mdx create mode 100644 docs/docs/whats-new/simplification-standardization.mdx rename src/backend/base/langflow/components/experimental/{GetNotified.py => Listen.py} (75%) create mode 100644 src/backend/langflow/base/agents/__init__.py create mode 100644 src/backend/langflow/base/agents/agent.py create mode 100644 src/backend/langflow/base/models/__init__.py create mode 100644 src/backend/langflow/base/models/model.py create mode 100644 src/backend/langflow/components/experimental/FlowTool.py create mode 100644 src/backend/langflow/components/tools/SearchAPITool.py diff --git a/docs/docs/components/models.mdx b/docs/docs/components/models.mdx index e69de29bb2..9d4f672575 100644 --- a/docs/docs/components/models.mdx +++ b/docs/docs/components/models.mdx @@ -0,0 +1,464 @@ +import Admonition from '@theme/Admonition'; + +# Models + + +

+ We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝 +

+
+ +### AmazonBedrock + +This component facilitates the generation of text using the LLM (Large Language Model) model from Amazon Bedrock. + +**Params** + +- **Input Value:** Specifies the input text for text generation. + +- **System Message (Optional):** A system message to pass to the model. + +- **Model ID (Optional):** Specifies the model ID to be used for text generation. Defaults to _`"anthropic.claude-instant-v1"`_. Available options include: + - _`"ai21.j2-grande-instruct"`_ + - _`"ai21.j2-jumbo-instruct"`_ + - _`"ai21.j2-mid"`_ + - _`"ai21.j2-mid-v1"`_ + - _`"ai21.j2-ultra"`_ + - _`"ai21.j2-ultra-v1"`_ + - _`"anthropic.claude-instant-v1"`_ + - _`"anthropic.claude-v1"`_ + - _`"anthropic.claude-v2"`_ + - _`"cohere.command-text-v14"`_ + +- **Credentials Profile Name (Optional):** Specifies the name of the credentials profile. + +- **Region Name (Optional):** Specifies the region name. + +- **Model Kwargs (Optional):** Additional keyword arguments for the model. + +- **Endpoint URL (Optional):** Specifies the endpoint URL. + +- **Streaming (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **Cache (Optional):** Specifies whether to cache the response. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + + +

+ Ensure that necessary credentials are provided to connect to the Amazon Bedrock API. If connection fails, a ValueError will be raised. +

+
+ + +--- + +### AnthropicLLM + +This component allows the generation of text using Anthropic Chat&Completion large language models. + +**Params** + +- **Model Name:** Specifies the name of the Anthropic model to be used for text generation. Available options include: + - _`"claude-2.1"`_ + - _`"claude-2.0"`_ + - _`"claude-instant-1.2"`_ + - _`"claude-instant-1"`_ + +- **Anthropic API Key:** Your Anthropic API key. + +- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`256`_. + +- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.7`_. + +- **API Endpoint (Optional):** Specifies the endpoint of the Anthropic API. Defaults to _`"https://api.anthropic.com"`_ if not specified. + +- **Input Value:** Specifies the input text for text generation. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +For detailed documentation and integration guides, please refer to the [Anthropic Component Documentation](https://python.langchain.com/docs/integrations/chat/anthropic). + +--- + +### AzureChatOpenAI + +This component allows the generation of text using the LLM (Large Language Model) model from Azure OpenAI. + +**Params** + +- **Model Name:** Specifies the name of the Azure OpenAI model to be used for text generation. Available options include: + - _`"gpt-35-turbo"`_ + - _`"gpt-35-turbo-16k"`_ + - _`"gpt-35-turbo-instruct"`_ + - _`"gpt-4"`_ + - _`"gpt-4-32k"`_ + - _`"gpt-4-vision"`_ + +- **Azure Endpoint:** Your Azure endpoint, including the resource. Example: `https://example-resource.azure.openai.com/`. + +- **Deployment Name:** Specifies the name of the deployment. + +- **API Version:** Specifies the version of the Azure OpenAI API to be used. Available options include: + - _`"2023-03-15-preview"`_ + - _`"2023-05-15"`_ + - _`"2023-06-01-preview"`_ + - _`"2023-07-01-preview"`_ + - _`"2023-08-01-preview"`_ + - _`"2023-09-01-preview"`_ + - _`"2023-12-01-preview"`_ + +- **API Key:** Your Azure OpenAI API key. + +- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.7`_. + +- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`1000`_. + +- **Input Value:** Specifies the input text for text generation. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +For detailed documentation and integration guides, please refer to the [Azure OpenAI Component Documentation](https://python.langchain.com/docs/integrations/llms/azure_openai). + + +--- + +### QianfanChatEndpoint + +This component facilitates the generation of text using Baidu Qianfan chat models. + +**Params** + +- **Model Name:** Specifies the name of the Qianfan chat model to be used for text generation. Available options include: + - _`"ERNIE-Bot"`_ + - _`"ERNIE-Bot-turbo"`_ + - _`"BLOOMZ-7B"`_ + - _`"Llama-2-7b-chat"`_ + - _`"Llama-2-13b-chat"`_ + - _`"Llama-2-70b-chat"`_ + - _`"Qianfan-BLOOMZ-7B-compressed"`_ + - _`"Qianfan-Chinese-Llama-2-7B"`_ + - _`"ChatGLM2-6B-32K"`_ + - _`"AquilaChat-7B"`_ + +- **Qianfan Ak:** Your Baidu Qianfan access key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop). + +- **Qianfan Sk:** Your Baidu Qianfan secret key, obtainable from [here](https://cloud.baidu.com/product/wenxinworkshop). + +- **Top p (Optional):** Model parameter. Specifies the top-p value. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.8`_. + +- **Temperature (Optional):** Model parameter. Specifies the sampling temperature. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`0.95`_. + +- **Penalty Score (Optional):** Model parameter. Specifies the penalty score. Only supported in ERNIE-Bot and ERNIE-Bot-turbo models. Defaults to _`1.0`_. + +- **Endpoint (Optional):** Endpoint of the Qianfan LLM, required if custom model is used. + +- **Input Value:** Specifies the input text for text generation. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +--- + +### Cohere + +This component enables text generation using Cohere large language models. + +**Params** + +- **Cohere API Key:** Your Cohere API key. + +- **Max Tokens (Optional):** Specifies the maximum number of tokens to generate. Defaults to _`256`_. + +- **Temperature (Optional):** Specifies the sampling temperature. Defaults to _`0.75`_. + +- **Input Value:** Specifies the input text for text generation. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +--- + +### CTransformers + +This component allows the generation of text using CTransformers large language models. + +**Params** + +- **Model:** Specifies the CTransformers model to be used for text generation. + +- **Model File (Optional):** Path to the model file if using a custom model. Should be a _.bin_ file. + +- **Model Type:** Specifies the type of the CTransformers model. + +- **Config (Optional):** Additional configuration parameters for the model. It should be provided as a JSON object. + + Defaults to: + + `{"top_k":40,"top_p":0.95,"temperature":0.8,"repetition_penalty":1.1,"last_n_tokens":64,"seed":-1,"max_new_tokens":256,"stop":"","stream":"False","reset":"True","batch_size":8,"threads":-1,"context_length":-1,"gpu_layers":0}`. + +- **Input Value:** Specifies the input text for text generation. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +--- + +### Google Generative AI + +This component enables text generation using Google Generative AI. + +**Params** + +- **Google API Key:** Your Google API key to use for the Google Generative AI. + +- **Model:** The name of the model to use. Supported examples are _`"gemini-pro"`_ and _`"gemini-pro-vision"`_. + +- **Max Output Tokens (Optional):** The maximum number of tokens to generate. + +- **Temperature:** Run inference with this temperature. Must be in the closed interval [0.0, 1.0]. + +- **Top K (Optional):** Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive. + +- **Top P (Optional):** The maximum cumulative probability of tokens to consider when sampling. + +- **N (Optional):** Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated. + +- **Input Value:** The input to the model. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +--- + +### Hugging Face API + +This component facilitates text generation using LLM models from the Hugging Face Inference API. + +**Params** + +- **Endpoint URL:** The URL of the Hugging Face Inference API endpoint. Should be provided along with necessary authentication credentials. + +- **Task:** Specifies the task for text generation. Options include _`"text2text-generation"`_, _`"text-generation"`_, and _`"summarization"`_. + +- **API Token:** The API token required for authentication with the Hugging Face Hub. + +- **Model Keyword Arguments (Optional):** Additional keyword arguments for the model. Should be provided as a Python dictionary. + +- **Input Value:** The input text for text generation. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** A system message to pass to the model. + +--- + +### LlamaCpp + +The `LlamaCpp` is a component for generating text using the llama.cpp model. + +**Params** + +- **Model Path:** The path to the llama.cpp model file. This should be provided as a file type input. + +- **Input Value:** The input text for text generation. + +- **Grammar (Optional):** The grammar for text generation. + +- **Cache (Optional):** Specifies whether to cache the generated text. + +- **Client (Optional):** The client to use for text generation. + +- **Echo (Optional):** Specifies whether to echo the generated text. Defaults to _`False`_. + +- **F16 KV:** Specifies whether to use F16 key-value pairs. Defaults to _`True`_. + +- **Grammar Path (Optional):** The path to the grammar file. + +- **Last N Tokens Size (Optional):** The size of the last N tokens. Defaults to _`64`_. + +- **Logits All:** Specifies whether to include logits for all tokens. Defaults to _`False`_. + +- **Logprobs (Optional):** The log probabilities for text generation. + +- **Lora Base (Optional):** The base URL for Lora. + +- **Lora Path (Optional):** The path for Lora. + +- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to _`256`_. + +- **Metadata (Optional):** Additional metadata for the model. + +- **Model Kwargs:** Additional keyword arguments for the model. Should be provided as a Python dictionary. + +- **N Batch (Optional):** The batch size. Defaults to _`8`_. + +- **N Ctx:** The context size. Defaults to _`512`_. + +- **N GPU Layers (Optional):** The number of GPU layers. + +- **N Parts:** The number of parts. + +- **N Threads (Optional):** The number of threads. Defaults to _`1`_. + +- **Repeat Penalty (Optional):** The repeat penalty for text generation. Defaults to _`1.1`_. + +- **Rope Freq Base:** The base frequency for rope. + +- **Rope Freq Scale:** The scale frequency for rope. + +- **Seed:** The seed for random generation. + +- **Stop (Optional):** The stop words for text generation. + +- **Streaming:** Specifies whether to stream the response from the model. Defaults to _`True`_. + +- **Suffix (Optional):** The suffix for text generation. + +- **Tags (Optional):** The tags for text generation. + +- **Temperature (Optional):** The temperature for text generation. Defaults to _`0.8`_. + +- **Top K (Optional):** The top K tokens to consider for text generation. Defaults to _`40`_. + +- **Top P (Optional):** The top P probability threshold for text generation. Defaults to _`0.95`_. + +- **Use Mlock:** Specifies whether to use Mlock. Defaults to _`False`_. + +- **Use Mmap (Optional):** Specifies whether to use Mmap. Defaults to _`True`_. + +- **Verbose:** Specifies whether to enable verbose mode. Defaults to _`True`_. + +- **Vocab Only:** Specifies whether to include vocabulary only. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +For more information, please refer to the [documentation](https://python.langchain.com/docs/modules/model_io/models/llms/integrations/llamacpp). + +--- + +### ChatOllama + +This component facilitates text generation using the Local LLM model for chat with Ollama. + +**Params** + +- **Base URL:** The endpoint of the Ollama API. Defaults to 'http://localhost:11434' if not specified. + +- **Model Name:** The name of the model to use. Refer to [https://ollama.ai/library](https://ollama.ai/library) for more models. + +- **Input Value:** The input text for text generation. + +- **Mirostat:** Enable/disable Mirostat sampling for controlling perplexity. + +- **Mirostat Eta (Optional):** The learning rate for the Mirostat algorithm. (Default: 0.1) + +- **Mirostat Tau (Optional):** Controls the balance between coherence and diversity of the output. (Default: 5.0) + +- **Repeat Last N (Optional):** How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + +- **Verbose (Optional):** Whether to print out response text. + +- **Cache (Optional):** Enable or disable caching. Defaults to _`False`_. + +- **Context Window Size (Optional):** Size of the context window for generating tokens. (Default: 2048) + +- **Number of GPUs (Optional):** Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable) + +- **Format (Optional):** Specify the format of the output (e.g., json). + +- **Metadata (Optional):** Metadata to add to the run trace. + +- **Number of Threads (Optional):** Number of threads to use during computation. (Default: detected for optimal performance) + +- **Repeat Penalty (Optional):** Penalty for repetitions in generated text. (Default: 1.1) + +- **Stop Tokens (Optional):** List of tokens to signal the model to stop generating text. + +- **System (Optional):** System to use for generating text. + +- **Tags (Optional):** Tags to add to the run trace. + +- **Temperature (Optional):** Controls the creativity of model responses. Defaults to _`0.8`_. + +- **Template (Optional):** Template to use for generating text. + +- **TFS Z (Optional):** Tail free sampling value. (Default: 1) + +- **Timeout (Optional):** Timeout for the request stream. + +- **Top K (Optional):** Limits token selection to top K. (Default: 40) + +- **Top P (Optional):** Works together with top-k. (Default: 0.9) + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** System message to pass to the model. + +--- + +### OpenAIModel + +This component facilitates text generation using OpenAI's models. + +**Params** + +- **Input Value:** The input text for text generation. + +- **Max Tokens (Optional):** The maximum number of tokens to generate. Defaults to _`256`_. + +- **Model Kwargs (Optional):** Additional keyword arguments for the model. Should be provided as a nested dictionary. + +- **Model Name (Optional):** The name of the model to use. Defaults to _`gpt-4-1106-preview`_. Supported options include: _`gpt-4-turbo-preview`_, _`gpt-4-0125-preview`_, _`gpt-4-1106-preview`_, _`gpt-4-vision-preview`_, _`gpt-3.5-turbo-0125`_, _`gpt-3.5-turbo-1106`_. + +- **OpenAI API Base (Optional):** The base URL of the OpenAI API. Defaults to _`https://api.openai.com/v1`_. + +- **OpenAI API Key (Optional):** The API key for accessing the OpenAI API. + +- **Temperature:** Controls the creativity of model responses. Defaults to _`0.7`_. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** System message to pass to the model. + +--- + +### ChatVertexAI + +The `ChatVertexAI` is a component for generating text using Vertex AI Chat large language models API. + +**Params** + +- **Input Value:** The input text for text generation. + +- **Credentials:** The JSON file containing the credentials for accessing the Vertex AI Chat API. + +- **Project:** The name of the project associated with the Vertex AI Chat API. + +- **Examples (Optional):** List of examples to provide context for text generation. + +- **Location:** The location of the Vertex AI Chat API service. Defaults to _`us-central1`_. + +- **Max Output Tokens:** The maximum number of tokens to generate. Defaults to _`128`_. + +- **Model Name:** The name of the model to use. Defaults to _`chat-bison`_. + +- **Temperature:** Controls the creativity of model responses. Defaults to _`0.0`_. + +- **Top K:** Limits token selection to top K. Defaults to _`40`_. + +- **Top P:** Works together with top-k. Defaults to _`0.95`_. + +- **Verbose:** Whether to print out response text. Defaults to _`False`_. + +- **Stream (Optional):** Specifies whether to stream the response from the model. Defaults to _`False`_. + +- **System Message (Optional):** System message to pass to the model. \ No newline at end of file diff --git a/docs/docs/components/prompts.mdx b/docs/docs/components/prompts.mdx index 3aafc9b965..0b1ad705cf 100644 --- a/docs/docs/components/prompts.mdx +++ b/docs/docs/components/prompts.mdx @@ -21,7 +21,7 @@ The `PromptTemplate` component allows users to create prompts and define variabl Once a variable is defined in the prompt template, it becomes a component input of its own. Check out [Prompt - Customization](../docs/guidelines/prompt-customization.mdx) to learn more. + Customization](../guidelines/prompt-customization) to learn more. - **template:** Template used to format an individual request. diff --git a/docs/docs/components/vector-stores.mdx b/docs/docs/components/vector-stores.mdx index 133984cda8..0fd1fd89be 100644 --- a/docs/docs/components/vector-stores.mdx +++ b/docs/docs/components/vector-stores.mdx @@ -6,4 +6,640 @@ import Admonition from '@theme/Admonition';

We appreciate your understanding as we polish our documentation – it may contain some rough edges. Share your feedback or report issues to help us improve! 🛠️📝

- \ No newline at end of file + + + +### AstraDB + +The `AstraDB` is a component for initializing an AstraDB Vector Store from Records. It facilitates the creation of AstraDB-based vector indexes for efficient document storage and retrieval. + +**Params** + +- **Input:** The input documents or records. + +- **Embedding:** The embedding model used by AstraDB. + +- **Collection Name:** The name of the collection in AstraDB. + +- **Token:** The token for AstraDB. + +- **API Endpoint:** The API endpoint for AstraDB. + +- **Namespace:** The namespace in AstraDB. + +- **Metric:** The metric to use in AstraDB. + +- **Batch Size:** The batch size for AstraDB. + +- **Bulk Insert Batch Concurrency:** The bulk insert batch concurrency for AstraDB. + +- **Bulk Insert Overwrite Concurrency:** The bulk insert overwrite concurrency for AstraDB. + +- **Bulk Delete Concurrency:** The bulk delete concurrency for AstraDB. + +- **Setup Mode:** The setup mode for the vector store. + +- **Pre Delete Collection:** Pre delete collection. + +- **Metadata Indexing Include:** Metadata indexing include. + +- **Metadata Indexing Exclude:** Metadata indexing exclude. + +- **Collection Indexing Policy:** Collection indexing policy. + + +

+ Ensure that the required AstraDB token and API endpoint are properly configured. +

+ +
+ +--- + +### AstraDB Search + +The `AstraDBSearch` is a component for searching an existing AstraDB Vector Store for similar documents. It extends the functionality of the `AstraDB` component to provide efficient document retrieval based on similarity metrics. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Embedding:** The embedding model used by AstraDB. + +- **Collection Name:** The name of the collection in AstraDB. + +- **Token:** The token for AstraDB. + +- **API Endpoint:** The API endpoint for AstraDB. + +- **Namespace:** The namespace in AstraDB. + +- **Metric:** The metric to use in AstraDB. + +- **Batch Size:** The batch size for AstraDB. + +- **Bulk Insert Batch Concurrency:** The bulk insert batch concurrency for AstraDB. + +- **Bulk Insert Overwrite Concurrency:** The bulk insert overwrite concurrency for AstraDB. + +- **Bulk Delete Concurrency:** The bulk delete concurrency for AstraDB. + +- **Setup Mode:** The setup mode for the vector store. + +- **Pre Delete Collection:** Pre delete collection. + +- **Metadata Indexing Include:** Metadata indexing include. + +- **Metadata Indexing Exclude:** Metadata indexing exclude. + +- **Collection Indexing Policy:** Collection indexing policy. + +--- + +### Chroma + +The `Chroma` is a component designed for implementing a Vector Store using Chroma. This component allows users to utilize Chroma for efficient vector storage and retrieval within their language processing workflows. + +**Params** + +- **Collection Name:** The name of the collection. + +- **Persist Directory:** The directory to persist the Vector Store to. + +- **Server CORS Allow Origins (Optional):** The CORS allow origins for the Chroma server. + +- **Server Host (Optional):** The host for the Chroma server. + +- **Server Port (Optional):** The port for the Chroma server. + +- **Server gRPC Port (Optional):** The gRPC port for the Chroma server. + +- **Server SSL Enabled (Optional):** Whether to enable SSL for the Chroma server. + + +- **Input:** Input data for creating the Vector Store. + +- **Embedding:** The embeddings to use for the Vector Store. + +For detailed documentation and integration guides, please refer to the [Chroma Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/chroma). + +--- + +### Chroma Search + + +The `ChromaSearch` is a component designed for searching a Chroma collection for similar documents. This component integrates with Chroma to facilitate efficient document retrieval based on similarity metrics. + +**Params** + +- **Input:** The input text to search for similar documents. + +- **Search Type:** The type of search to perform ("Similarity" or "MMR"). + +- **Collection Name:** The name of the Chroma collection. + +- **Index Directory:** The directory where the Chroma index is stored. + +- **Embedding:** The embedding model used to vectorize inputs (make sure to use the same as the index). + +- **Server CORS Allow Origins (Optional):** The CORS allow origins for the Chroma server. + +- **Server Host (Optional):** The host for the Chroma server. + +- **Server Port (Optional):** The port for the Chroma server. + +- **Server gRPC Port (Optional):** The gRPC port for the Chroma server. + +- **Server SSL Enabled (Optional):** Whether SSL is enabled for the Chroma server. + + +--- + +### FAISS + +The `FAISS` is a component designed for ingesting documents into a FAISS Vector Store. It facilitates efficient document indexing and retrieval using the FAISS library. + +**Params** + +- **Embedding:** The embedding model used to vectorize inputs. + +- **Input:** The input documents to ingest into the FAISS Vector Store. + +- **Folder Path:** The path to save the FAISS index. It will be relative to where Langflow is running. + +- **Index Name:** The name of the FAISS index. + + +For detailed documentation and integration guides, please refer to the [FAISS Component Documentation](https://faiss.ai/index.html). + +--- + +### FAISS Search + +The `FAISSSearch` is a component for searching a FAISS Vector Store for similar documents. It enables efficient document retrieval based on similarity metrics using FAISS. + +**Params** + +- **Embedding:** The embedding model used by the FAISS Vector Store. + +- **Folder Path:** The path from which to load the FAISS index. It will be relative to where Langflow is running. + +- **Input:** The input value to search for similar documents. + +- **Index Name:** The name of the FAISS index. + + +--- + + +### MongoDB Atlas + +The `MongoDBAtlas` is a component used to construct a MongoDB Atlas Vector Search vector store from Records. It facilitates the creation of MongoDB Atlas-based vector stores for efficient document storage and retrieval. + +**Params** + +- **Embedding:** The embedding model used by the MongoDB Atlas Vector Search. + +- **Input:** The input documents or records. + +- **Collection Name:** The name of the collection in the MongoDB Atlas database. + +- **Database Name:** The name of the database in MongoDB Atlas. + +- **Index Name:** The name of the index in MongoDB Atlas. + +- **MongoDB Atlas Cluster URI:** The URI of the MongoDB Atlas cluster. + +- **Search Kwargs:** Additional search arguments for MongoDB Atlas. + + + +

+ Ensure that pymongo is installed to use MongoDB Atlas Vector Store. +

+
+ +--- + +### MongoDB Atlas Search + +The `MongoDBAtlasSearch` is a component for searching a MongoDB Atlas Vector Store for similar documents. It extends the functionality of the MongoDBAtlasComponent to provide efficient document retrieval based on similarity metrics. + +**Params** + +- **Search Type:** The type of search to perform. Options: "Similarity", "MMR". + +- **Input:** The input value to search for. + +- **Embedding:** The embedding model used by the MongoDB Atlas Vector Store. + +- **Collection Name:** The name of the collection in the MongoDB Atlas database. + +- **Database Name:** The name of the database in MongoDB Atlas. + +- **Index Name:** The name of the index in MongoDB Atlas. + +- **MongoDB Atlas Cluster URI:** The URI of the MongoDB Atlas cluster. + +- **Search Kwargs:** Additional search arguments for MongoDB Atlas. + + +--- + +### PGVector + +The `PGVector` is a component for implementing a Vector Store using PostgreSQL. It allows users to store and retrieve vectors efficiently within a PostgreSQL database. + +**Params** + +- **Input:** The input value to use for the Vector Store. + +- **Embedding:** The embedding model used by the Vector Store. + +- **PostgreSQL Server Connection String:** The URL for the PostgreSQL server. + +- **Table:** The name of the table in the PostgreSQL database. + + +For detailed documentation and integration guides, please refer to the [PGVector Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/pgvector). + + + +

+ Ensure that the required PostgreSQL server is accessible and properly configured. +

+
+ +--- + +### PGVector Search + +The `PGVectorSearch` is a component for searching a PGVector Store for similar documents. It extends the functionality of the PGVectorComponent to provide efficient document retrieval based on similarity metrics. + +**Params** + +- **Input:** The input value to search for. + +- **Embedding:** The embedding model used by the Vector Store. + +- **PostgreSQL Server Connection String:** The URL for the PostgreSQL server. + +- **Table:** The name of the table in the PostgreSQL database. + +- **Search Type:** The type of search to perform (e.g., "Similarity", "MMR"). + + +--- + +### Pinecone + +The `Pinecone` is a component used to construct a Pinecone wrapper from Records. It facilitates the creation of Pinecone-based vector indexes for efficient document storage and retrieval. + +**Params** + +- **Input:** The input documents or records. + +- **Embedding:** The embedding model used by Pinecone. + +- **Index Name:** The name of the index in Pinecone. + +- **Namespace:** The namespace in Pinecone. + +- **Pinecone API Key:** The API key for Pinecone. + +- **Pinecone Environment:** The environment for Pinecone. + +- **Search Kwargs:** Additional search keyword arguments for Pinecone. + +- **Pool Threads:** The number of threads to use for Pinecone. + + + +

+ Ensure that the required Pinecone API key and environment are properly configured. +

+
+ +--- + +### Pinecone Search + +The `PineconeSearch` is a component used to search a Pinecone Vector Store for similar documents. It extends the functionality of the `PineconeComponent` to provide efficient document retrieval based on similarity metrics. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Embedding:** The embedding model used by Pinecone. + +- **Index Name:** The name of the index in Pinecone. + +- **Namespace:** The namespace in Pinecone. + +- **Pinecone API Key:** The API key for Pinecone. + +- **Pinecone Environment:** The environment for Pinecone. + +- **Search Kwargs:** Additional search keyword arguments for Pinecone. + +- **Pool Threads:** The number of threads to use for Pinecone. + + +--- + +### Qdrant + +The `Qdrant` is a component used to construct a Qdrant wrapper from a list of texts. It allows for efficient similarity search and retrieval operations based on the provided embeddings. + +**Params** + +- **Input:** The input documents or records. + +- **Embedding:** The embedding model used by Qdrant. + +- **API Key:** The API key for Qdrant (password field). + +- **Collection Name:** The name of the collection in Qdrant. + +- **Content Payload Key:** The key for the content payload in the documents (advanced). + +- **Distance Function:** The distance function to use in Qdrant (advanced). + +- **gRPC Port:** The gRPC port for Qdrant (advanced). + +- **Host:** The host for Qdrant (advanced). + +- **HTTPS:** Enable HTTPS for Qdrant (advanced). + +- **Location:** The location for Qdrant (advanced). + +- **Metadata Payload Key:** The key for the metadata payload in the documents (advanced). + +- **Path:** The path for Qdrant (advanced). + +- **Port:** The port for Qdrant (advanced). + +- **Prefer gRPC:** Prefer gRPC for Qdrant (advanced). + +- **Prefix:** The prefix for Qdrant (advanced). + +- **Search Kwargs:** Additional search keyword arguments for Qdrant (advanced). + +- **Timeout:** The timeout for Qdrant (advanced). + +- **URL:** The URL for Qdrant (advanced). + +--- + +### Qdrant Search + +The `QdrantSearch` is a component used to search a Qdrant Vector Store for similar documents. It extends the functionality of the `QdrantComponent` to provide efficient document retrieval based on similarity metrics. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Embedding:** The embedding model used by Qdrant. + +- **API Key:** The API key for Qdrant (password field). + +- **Collection Name:** The name of the collection in Qdrant. + +- **Content Payload Key:** The key for the content payload in the documents (advanced). + +- **Distance Function:** The distance function to use in Qdrant (advanced). + +- **gRPC Port:** The gRPC port for Qdrant (advanced). + +- **Host:** The host for Qdrant (advanced). + +- **HTTPS:** Enable HTTPS for Qdrant (advanced). + +- **Location:** The location for Qdrant (advanced). + +- **Metadata Payload Key:** The key for the metadata payload in the documents (advanced). + +- **Path:** The path for Qdrant (advanced). + +- **Port:** The port for Qdrant (advanced). + +- **Prefer gRPC:** Prefer gRPC for Qdrant (advanced). + +- **Prefix:** The prefix for Qdrant (advanced). + +- **Search Kwargs:** Additional search keyword arguments for Qdrant (advanced). + +- **Timeout:** The timeout for Qdrant (advanced). + +- **URL:** The URL for Qdrant (advanced). + +--- + +### Redis + +The `Redis` is a component for implementing a Vector Store using Redis. It provides functionality to store and retrieve vectors efficiently from a Redis database. + +**Params** + +- **Index Name:** The name of the index in Redis (default: your_index). + +- **Input:** The input data to build the Redis Vector Store (input types: Document, Record). + +- **Embedding:** The embedding model used by Redis. + +- **Schema:** The schema file (.yaml) to define the structure of the documents (optional). + +- **Redis Server Connection String:** The connection string for the Redis server. + +- **Redis Index:** The name of the Redis index (optional). + +For detailed documentation, please refer to the [Redis Documentation](https://python.langchain.com/docs/integrations/vectorstores/redis). + + +

+ Ensure that the required Redis server connection URL and index name are properly configured. If no documents are provided, a schema must be provided. +

+
+ +--- + +### Redis Search + +The `RedisSearch` is a component for searching a Redis Vector Store for similar documents. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Index Name:** The name of the index in Redis (default: your_index). + +- **Embedding:** The embedding model used by Redis. + +- **Schema:** The schema file (.yaml) to define the structure of the documents (optional). + +- **Redis Server Connection String:** The connection string for the Redis server. + +- **Redis Index:** The name of the Redis index (optional). + +--- + +### Supabase + +The `Supabase` is a component for initializing a Supabase Vector Store from texts and embeddings. + +**Params** + +- **Input:** The input documents or records. + +- **Embedding:** The embedding model used by Supabase. + +- **Query Name:** The name of the query (optional). + +- **Search Kwargs:** Additional search keyword arguments for Supabase (advanced). + +- **Supabase Service Key:** The service key for Supabase. + +- **Supabase URL:** The URL for the Supabase instance. + +- **Table Name:** The name of the table in Supabase (advanced). + + +

+ Ensure that the required Supabase service key, Supabase URL, and table name are properly configured. +

+
+ +--- + +### Supabase Search + +The `SupabaseSearch` is a component for searching a Supabase Vector Store for similar documents. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Embedding:** The embedding model used by Supabase. + +- **Query Name:** The name of the query (optional). + +- **Search Kwargs:** Additional search keyword arguments for Supabase (advanced). + +- **Supabase Service Key:** The service key for Supabase. + +- **Supabase URL:** The URL for the Supabase instance. + +- **Table Name:** The name of the table in Supabase (advanced). + +--- + +### Vectara + +The `Vectara` is a component for implementing a Vector Store using Vectara. + +**Params** + +- **Vectara Customer ID:** The customer ID for Vectara. + +- **Vectara Corpus ID:** The corpus ID for Vectara. + +- **Vectara API Key:** The API key for Vectara. + +- **Files Url:** The URL(s) of the file(s) to be used for initializing the Vectara Vector Store (optional). + +- **Input:** The input data to be upserted to the corpus (optional). + +For detailed documentation and integration guides, please refer to the [Vectara Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/vectara). + + +

+ If `inputs` are provided, they will be upserted to the corpus. If `files_url` are provided, Vectara will process the files from the URLs. +

+
+ +--- + +### Vectara Search + +The `VectaraSearch` is a component for searching a Vectara Vector Store for similar documents. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Vectara Customer ID:** The customer ID for Vectara. + +- **Vectara Corpus ID:** The corpus ID for Vectara. + +- **Vectara API Key:** The API key for Vectara. + +- **Files Url:** The URL(s) of the file(s) to be used for initializing the Vectara Vector Store (optional). +--- + +### Weaviate + +The `Weaviate` is a component for implementing a Vector Store using Weaviate. + +**Params** + +- **Weaviate URL:** The URL of the Weaviate instance (default: http://localhost:8080). + +- **Search By Text:** Boolean indicating whether to search by text (default: False). + +- **API Key:** The API key for authentication (optional). + +- **Index name:** The name of the index in Weaviate (optional). + +- **Text Key:** The key used to extract text from documents (default: "text"). + +- **Input:** The input document or record. + +- **Embedding:** The embedding model used by Weaviate. + +- **Attributes:** Additional attributes to consider during indexing (optional). + +For detailed documentation and integration guides, please refer to the [Weaviate Component Documentation](https://python.langchain.com/docs/integrations/vectorstores/weaviate). + + +

+ Before using the Weaviate Vector Store component, ensure that you have a Weaviate instance running and accessible at the specified URL. Additionally, make sure to provide the correct API key for authentication if required. Adjust the index name, text key, and attributes according to your dataset and indexing requirements. Finally, ensure that the provided embeddings are compatible with Weaviate's requirements. +

+
+ +--- + +### Weaviate Search + +The `WeaviateSearch` component facilitates searching a Weaviate Vector Store for similar documents. + +**Params** + +- **Search Type:** The type of search to perform (e.g., Similarity, MMR). + +- **Input Value:** The input value to search for. + +- **Weaviate URL:** The URL of the Weaviate instance (default: http://localhost:8080). + +- **Search By Text:** Boolean indicating whether to search by text (default: False). + +- **API Key:** The API key for authentication (optional). + +- **Index name:** The name of the index in Weaviate (optional). + +- **Text Key:** The key used to extract text from documents (default: "text"). + +- **Embedding:** The embedding model used by Weaviate. + +- **Attributes:** Additional attributes to consider during indexing (optional). \ No newline at end of file diff --git a/docs/docs/examples/flow-runner.mdx b/docs/docs/examples/flow-runner.mdx index 641d814e5b..38466e4b32 100644 --- a/docs/docs/examples/flow-runner.mdx +++ b/docs/docs/examples/flow-runner.mdx @@ -3,8 +3,6 @@ description: Custom Components hide_table_of_contents: true --- -import ZoomableImage from "/src/theme/ZoomableImage.js"; -import Admonition from "@theme/Admonition"; # FlowRunner Component @@ -365,6 +363,8 @@ Done! This is what our script and custom component looks like: }} /> -import ZoomableImage from "/src/theme/ZoomableImage.js"; + + +import ZoomableImage from "/src/theme/ZoomableImage.js"; import Admonition from "@theme/Admonition"; diff --git a/docs/docs/guidelines/features.mdx b/docs/docs/guidelines/features.mdx index 04feba779d..46913a0931 100644 --- a/docs/docs/guidelines/features.mdx +++ b/docs/docs/guidelines/features.mdx @@ -1,8 +1,3 @@ -import ThemedImage from "@theme/ThemedImage"; -import useBaseUrl from "@docusaurus/useBaseUrl"; -import ZoomableImage from "/src/theme/ZoomableImage.js"; -import ReactPlayer from "react-player"; -import Admonition from "@theme/Admonition"; # Features @@ -66,9 +61,11 @@ The example below shows a Python script making a POST request to a local API end style={{ marginBottom: "20px", display: "flex", justifyContent: "center" }} > -import ThemedImage from "@theme/ThemedImage"; + + + +import ThemedImage from "@theme/ThemedImage"; import useBaseUrl from "@docusaurus/useBaseUrl"; import ZoomableImage from "/src/theme/ZoomableImage.js"; import ReactPlayer from "react-player"; import Admonition from "@theme/Admonition"; - diff --git a/docs/docs/guides/compatibility.mdx b/docs/docs/guides/compatibility.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/component-status-and-data-passing.mdx b/docs/docs/guides/component-status-and-data-passing.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/connecting-output-components.mdx b/docs/docs/guides/connecting-output-components.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/custom-component.mdx b/docs/docs/guides/custom-component.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/experimental-components.mdx b/docs/docs/guides/experimental-components.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/flow-of-data.mdx b/docs/docs/guides/flow-of-data.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/global-variables.mdx b/docs/docs/guides/global-variables.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/inputs-and-outputs.mdx b/docs/docs/guides/inputs-and-outputs.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/multiple-flows.mdx b/docs/docs/guides/multiple-flows.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/new-categories-and-components.mdx b/docs/docs/guides/new-categories-and-components.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/passing-tweaks-and-inputs.mdx b/docs/docs/guides/passing-tweaks-and-inputs.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/renaming-and-editing-components.mdx b/docs/docs/guides/renaming-and-editing-components.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/run-flow.mdx b/docs/docs/guides/run-flow.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/sidebar-and-interaction-panel.mdx b/docs/docs/guides/sidebar-and-interaction-panel.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/state-management.mdx b/docs/docs/guides/state-management.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/supported-frameworks.mdx b/docs/docs/guides/supported-frameworks.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/guides/text-and-record.mdx b/docs/docs/guides/text-and-record.mdx new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/docs/whats-new/customization-control.mdx b/docs/docs/whats-new/customization-control.mdx new file mode 100644 index 0000000000..11f23f53c6 --- /dev/null +++ b/docs/docs/whats-new/customization-control.mdx @@ -0,0 +1 @@ +# A New Customization and Control \ No newline at end of file diff --git a/docs/docs/whats-new/debugging-reimagined.mdx b/docs/docs/whats-new/debugging-reimagined.mdx new file mode 100644 index 0000000000..d302340883 --- /dev/null +++ b/docs/docs/whats-new/debugging-reimagined.mdx @@ -0,0 +1 @@ +# Debugging Reimagined \ No newline at end of file diff --git a/docs/docs/whats-new/migrating-to-one-point-zero.mdx b/docs/docs/whats-new/migrating-to-one-point-zero.mdx new file mode 100644 index 0000000000..835dadfa16 --- /dev/null +++ b/docs/docs/whats-new/migrating-to-one-point-zero.mdx @@ -0,0 +1,124 @@ +# Migrating to Langflow 1.0: A Guide + +Langflow 1.0 is a significant update that brings many exciting changes and improvements to the platform. This guide will walk you through the key differences and help you migrate your existing projects to the new version. + +If you have any questions or need assistance during the migration process, please don't hesitate to reach out to in our [Discord](https://discord.gg/wZSWQaukgJ) or [GitHub](https://github.com/logspace-ai/langflow/issues) community. + +We have a special channel + +## TLDR; + +- Inputs and Outputs of Components have changed +- The composition model has been replaced with a flow of data +- Continued support for LangChain and new support for multiple frameworks +- Redesigned sidebar and customizable interaction panel +- New Native Categories and Components +- Improved user experience with Text and Record modes +- CustomComponent for all components +- Compatibility with previous versions using Runnable Executor +- Multiple flows in the canvas +- Improved component status +- Ability to connect Output components to any other Component +- Rename and edit component descriptions +- Pass tweaks and inputs in the API using Display Name +- Global Variables for Text Fields +- Experimental components like SubFlow and Flow as Tool +- Experimental State Management system with Notify and Listen components + +## Inputs and Outputs of Components + +Langflow 1.0 introduces adds the concept of Inputs and Outputs to flows, allowing clear definition of the data flow between components. Discover how to use Inputs and Outputs to pass data between components and create more dynamic flows. + +[Learn more about Inputs and Outputs of Components](../guides/inputs-and-outputs) + +## From Composition to Freedom + +Even though composition is still possible in Langflow 1.0, the new standard is getting data moving through the flow. This allows for more flexibility and control over the data flow in your projects. Check out how to use this in new and existing projects. + +[Learn more about the Flow of Data](../guides/flow-of-data) + +## Continued Support for LangChain and Multiple Frameworks + +Langflow 1.0 continues to support LangChain while also introducing support for multiple frameworks. This is another important boon that adding the paradigm of data flow brings to the table. Find out how to leverage the power of different frameworks in your projects. + +[Learn more about Supported Frameworks](../guides/supported-frameworks) + +## Sidebar Redesign and Customizable Interaction Panel + +We've expanded on the chat experience by creating a customizable interaction panel that allows you to design a panel that fits your needs and interact with it. The sidebar has also been redesigned to provide a more intuitive and user-friendly experience. Explore the new sidebar and interaction panel features to enhance your workflow. + +[Learn more about some of the UI updates](../guides/sidebar-and-interaction-panel) + +## New Native Categories and Components + +Langflow 1.0 introduces many new native categories, including Inputs, Outputs, Helpers, Experimental, Models, and more. Discover the new components available, such as Chat Input, Prompt, Files, API Request, and others. + +[Learn more about New Categories and Components](../guides/new-categories-and-components) + +## New Way of Using Langflow: Text and Record (and more to come) + +With the introduction of Text and Record types connections between Components are more intuitive and easier to understand. This is the first step in a series of improvements to the way you interact with Langflow. Learn how to use Text, and Record and how they help you build better flows. + +[Learn more about Text and Record](../guides/text-and-record) + +## CustomComponent for All Components + +Almost all components in Langflow 1.0 are now CustomComponents, allowing you to check and modify the code of each component. Discover how to leverage this feature to customize your components to your specific needs. + +[Learn more about CustomComponent](../guides/custom-component) + +## Compatibility with Previous Versions + +To use flows built in previous versions of Langflow, you can utilize the experimental component Runnable Executor along with an Input and Output. **We'd love your feedback on this**. Learn how to adapt your existing flows to work seamlessly in the new version of Langflow. + +[Learn more about Compatibility with Previous Versions](../guides/compatibility) + +## Multiple Flows in the Canvas + +Langflow 1.0 allows you to have more than one flow in the canvas and run them separately. Discover how to create and manage multiple flows within a single project. + +[Learn more about Multiple Flows](../guides/multiple-flows) + +## Improved Component Status + +Each component now displays its status more clearly, allowing you to quickly identify any issues or errors. Explore how to use the new component status feature to troubleshoot and optimize your flows. + +[Learn more about Component Status](../guides/component-status-and-data-passing) + +## Connecting Output Components + +You can now connect Output components to any other component (that has a Text output), providing a better understanding of the data flow. Explore the possibilities of connecting Output components and how it enhances your flow's functionality. + +[Learn more about Connecting Output Components](../guides/connecting-output-components) + +## Renaming and Editing Component Descriptions + +Langflow 1.0 allows you to rename and edit the description of each component, making it easier to understand and interact with the flow. Learn how to customize your component names and descriptions for improved clarity. + +[Learn more about Renaming and Editing Components](../guides/renaming-and-editing-components) + +## Passing Tweaks and Inputs in the API + +Things got a whole lot easier. You can now pass tweaks and inputs in the API by referencing the Display Name of the component. Discover how to leverage this feature to dynamically control your flow's behavior. + +[Learn more about Passing Tweaks and Inputs](../guides/passing-tweaks-and-inputs) + +## Global Variables for Text Fields + +Global Variables can be used in any Text Field across your projects. Learn how to define and utilize Global Variables to streamline your workflow. + +[Learn more about Global Variables](../guides/global-variables) + +## Experimental Components + +Explore the experimental components available in Langflow 1.0, such as SubFlow, which allows you to load a flow as a component dynamically, and Flow as Tool, which enables you to use a flow as a tool for an Agent. + +[Learn more about Experimental Components](../guides/experimental-components) + +## Experimental State Management System + +We are experimenting with a State Management system for flows that allows components to trigger other components and pass messages between them using the Notify and Listen components. Discover how to leverage this system to create more dynamic and interactive flows. + +[Learn more about State Management](../guides/state-management) + +We hope this guide helps you navigate the changes and improvements in Langflow 1.0. If you have any questions or need further assistance, please don't hesitate to reach out to us in our [Discord](https://discord.gg/wZSWQaukgJ). \ No newline at end of file diff --git a/docs/docs/whats-new/simplification-standardization.mdx b/docs/docs/whats-new/simplification-standardization.mdx new file mode 100644 index 0000000000..f7e3115bcd --- /dev/null +++ b/docs/docs/whats-new/simplification-standardization.mdx @@ -0,0 +1 @@ +# Simplification Through Standardization \ No newline at end of file diff --git a/docs/package-lock.json b/docs/package-lock.json index 6742b89e71..f00d7c2836 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10,10 +10,10 @@ "dependencies": { "@babel/preset-react": "^7.22.3", "@code-hike/mdx": "^0.9.0", - "@docusaurus/core": "3.0.1", - "@docusaurus/plugin-ideal-image": "^3.0.1", - "@docusaurus/preset-classic": "3.0.1", - "@docusaurus/theme-classic": "^3.0.1", + "@docusaurus/core": "^3.1.1", + "@docusaurus/plugin-ideal-image": "^3.1.1", + "@docusaurus/preset-classic": "^3.1.1", + "@docusaurus/theme-classic": "^3.1.1", "@docusaurus/theme-search-algolia": "^3.0.1", "@mdx-js/react": "^2.3.0", "@mendable/search": "^0.0.154", @@ -2069,9 +2069,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.0.1.tgz", - "integrity": "sha512-CXrLpOnW+dJdSv8M5FAJ3JBwXtL6mhUWxFA8aS0ozK6jBG/wgxERk5uvH28fCeFxOGbAT9v1e9dOMo1X2IEVhQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.1.1.tgz", + "integrity": "sha512-2nQfKFcf+MLEM7JXsXwQxPOmQAR6ytKMZVSx7tVi9HEm9WtfwBH1fp6bn8Gj4zLUhjWKCLoysQ9/Wm+EZCQ4yQ==", "dependencies": { "@babel/core": "^7.23.3", "@babel/generator": "^7.23.3", @@ -2083,13 +2083,13 @@ "@babel/runtime": "^7.22.6", "@babel/runtime-corejs3": "^7.22.6", "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/cssnano-preset": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.5.1", "autoprefixer": "^10.4.14", @@ -2249,9 +2249,9 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/@docusaurus/cssnano-preset": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.0.1.tgz", - "integrity": "sha512-wjuXzkHMW+ig4BD6Ya1Yevx9UJadO4smNZCEljqBoQfIQrQskTswBs7lZ8InHP7mCt273a/y/rm36EZhqJhknQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.1.1.tgz", + "integrity": "sha512-LnoIDjJWbirdbVZDMq+4hwmrTl2yHDnBf9MLG9qyExeAE3ac35s4yUhJI8yyTCdixzNfKit4cbXblzzqMu4+8g==", "dependencies": { "cssnano-preset-advanced": "^5.3.10", "postcss": "^8.4.26", @@ -2263,9 +2263,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.0.1.tgz", - "integrity": "sha512-I5L6Nk8OJzkVA91O2uftmo71LBSxe1vmOn9AMR6JRCzYeEBrqneWMH02AqMvjJ2NpMiviO+t0CyPjyYV7nxCWQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.1.1.tgz", + "integrity": "sha512-BjkNDpQzewcTnST8trx4idSoAla6zZ3w22NqM/UMcFtvYJgmoE4layuTzlfql3VFPNuivvj7BOExa/+21y4X2Q==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.6.0" @@ -2339,11 +2339,11 @@ } }, "node_modules/@docusaurus/lqip-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/lqip-loader/-/lqip-loader-3.0.1.tgz", - "integrity": "sha512-hFSu8ltYo0ZnWBWmjMhSprOr6nNKG01YdMDxH/hahBfyaNDCkZU4o7mQNgUW845lvYdp6bhjyW31WJwBjOnLqw==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/lqip-loader/-/lqip-loader-3.1.1.tgz", + "integrity": "sha512-s06lySAX5ghCiQe0+/GaMWcVvgkBQ6U8p182fW+JbdjxABS8ecx2in2AQJbvrwKNgiMjOhsXiaE6BmbQAmT6nw==", "dependencies": { - "@docusaurus/logger": "3.0.1", + "@docusaurus/logger": "3.1.1", "file-loader": "^6.2.0", "lodash": "^4.17.21", "sharp": "^0.32.3", @@ -2354,15 +2354,15 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.0.1.tgz", - "integrity": "sha512-ldnTmvnvlrONUq45oKESrpy+lXtbnTcTsFkOTIDswe5xx5iWJjt6eSa0f99ZaWlnm24mlojcIGoUWNCS53qVlQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.1.1.tgz", + "integrity": "sha512-xN2IccH9+sv7TmxwsDJNS97BHdmlqWwho+kIVY4tcCXkp+k4QuzvWBeunIMzeayY4Fu13A6sAjHGv5qm72KyGA==", "dependencies": { "@babel/parser": "^7.22.7", "@babel/traverse": "^7.22.8", - "@docusaurus/logger": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@mdx-js/mdx": "^3.0.0", "@slorber/remark-comment": "^1.0.0", "escape-html": "^1.0.3", @@ -2434,17 +2434,17 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.0.1.tgz", - "integrity": "sha512-cLOvtvAyaMQFLI8vm4j26svg3ktxMPSXpuUJ7EERKoGbfpJSsgtowNHcRsaBVmfuCsRSk1HZ/yHBsUkTmHFEsg==", - "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.1.1.tgz", + "integrity": "sha512-ew/3VtVoG3emoAKmoZl7oKe1zdFOsI0NbcHS26kIxt2Z8vcXKCUgK9jJJrz0TbOipyETPhqwq4nbitrY3baibg==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^11.1.1", @@ -2465,17 +2465,17 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.0.1.tgz", - "integrity": "sha512-dRfAOA5Ivo+sdzzJGXEu33yAtvGg8dlZkvt/NEJ7nwi1F2j4LEdsxtfX2GKeETB2fP6XoGNSQnFXqa2NYGrHFg==", - "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/module-type-aliases": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.1.1.tgz", + "integrity": "sha512-lhFq4E874zw0UOH7ujzxnCayOyAt0f9YPVYSb9ohxrdCM8B4szxitUw9rIX4V9JLLHVoqIJb6k+lJJ1jrcGJ0A==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/module-type-aliases": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@types/react-router-config": "^5.0.7", "combine-promises": "^1.1.0", "fs-extra": "^11.1.1", @@ -2494,12 +2494,12 @@ } }, "node_modules/@docusaurus/plugin-content-docs/node_modules/@docusaurus/module-type-aliases": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.0.1.tgz", - "integrity": "sha512-DEHpeqUDsLynl3AhQQiO7AbC7/z/lBra34jTcdYuvp9eGm01pfH1wTVq8YqWZq6Jyx0BgcVl/VJqtE9StRd9Ag==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.1.1.tgz", + "integrity": "sha512-xBJyx0TMfAfVZ9ZeIOb1awdXgR4YJMocIEzTps91rq+hJDFJgJaylDtmoRhUxkwuYmNK1GJpW95b7DLztSBJ3A==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "3.0.1", + "@docusaurus/types": "3.1.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2513,15 +2513,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.0.1.tgz", - "integrity": "sha512-oP7PoYizKAXyEttcvVzfX3OoBIXEmXTMzCdfmC4oSwjG4SPcJsRge3mmI6O8jcZBgUPjIzXD21bVGWEE1iu8gg==", - "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.1.1.tgz", + "integrity": "sha512-NQHncNRAJbyLtgTim9GlEnNYsFhuCxaCNkMwikuxLTiGIPH7r/jpb7O3f3jUMYMebZZZrDq5S7om9a6rvB/YCA==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "fs-extra": "^11.1.1", "tslib": "^2.6.0", "webpack": "^5.88.1" @@ -2535,13 +2535,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.0.1.tgz", - "integrity": "sha512-09dxZMdATky4qdsZGzhzlUvvC+ilQ2hKbYF+wez+cM2mGo4qHbv8+qKXqxq0CQZyimwlAOWQLoSozIXU0g0i7g==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.1.1.tgz", + "integrity": "sha512-xWeMkueM9wE/8LVvl4+Qf1WqwXmreMjI5Kgr7GYCDoJ8zu4kD+KaMhrh7py7MNM38IFvU1RfrGKacCEe2DRRfQ==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", "fs-extra": "^11.1.1", "react-json-view-lite": "^1.2.0", "tslib": "^2.6.0" @@ -2555,13 +2555,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.0.1.tgz", - "integrity": "sha512-jwseSz1E+g9rXQwDdr0ZdYNjn8leZBnKPjjQhMBEiwDoenL3JYFcNW0+p0sWoVF/f2z5t7HkKA+cYObrUh18gg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.1.1.tgz", + "integrity": "sha512-+q2UpWTqVi8GdlLoSlD5bS/YpxW+QMoBwrPrUH/NpvpuOi0Of7MTotsQf9JWd3hymZxl2uu1o3PIrbpxfeDFDQ==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "tslib": "^2.6.0" }, "engines": { @@ -2573,13 +2573,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.0.1.tgz", - "integrity": "sha512-UFTDvXniAWrajsulKUJ1DB6qplui1BlKLQZjX4F7qS/qfJ+qkKqSkhJ/F4VuGQ2JYeZstYb+KaUzUzvaPK1aRQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.1.1.tgz", + "integrity": "sha512-0mMPiBBlQ5LFHTtjxuvt/6yzh8v7OxLi3CbeEsxXZpUzcKO/GC7UA1VOWUoBeQzQL508J12HTAlR3IBU9OofSw==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@types/gtag.js": "^0.0.12", "tslib": "^2.6.0" }, @@ -2592,13 +2592,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.0.1.tgz", - "integrity": "sha512-IPFvuz83aFuheZcWpTlAdiiX1RqWIHM+OH8wS66JgwAKOiQMR3+nLywGjkLV4bp52x7nCnwhNk1rE85Cpy/CIw==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.1.1.tgz", + "integrity": "sha512-d07bsrMLdDIryDtY17DgqYUbjkswZQr8cLWl4tzXrt5OR/T/zxC1SYKajzB3fd87zTu5W5klV5GmUwcNSMXQXA==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "tslib": "^2.6.0" }, "engines": { @@ -2610,16 +2610,16 @@ } }, "node_modules/@docusaurus/plugin-ideal-image": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-ideal-image/-/plugin-ideal-image-3.0.1.tgz", - "integrity": "sha512-IvAUpEIz6v1/fVz6UTdQY12pYIE5geNFtsuKpsULpMaotwYf3Gs7acXjQog4qquKkc65yV5zuvMj8BZMHEwLyQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-ideal-image/-/plugin-ideal-image-3.1.1.tgz", + "integrity": "sha512-FYce5eV5/fWO4qIG8sKYdK3MTwusdxQML/M62IiltUNM/cqFkDrty1d+H+/I2PYX1s7AOoL3YomdJNP4vra/Tg==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/lqip-loader": "3.0.1", + "@docusaurus/core": "3.1.1", + "@docusaurus/lqip-loader": "3.1.1", "@docusaurus/responsive-loader": "^1.7.0", - "@docusaurus/theme-translations": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/theme-translations": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@slorber/react-ideal-image": "^0.0.12", "react-waypoint": "^10.3.0", "sharp": "^0.32.3", @@ -2641,16 +2641,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.0.1.tgz", - "integrity": "sha512-xARiWnjtVvoEniZudlCq5T9ifnhCu/GAZ5nA7XgyLfPcNpHQa241HZdsTlLtVcecEVVdllevBKOp7qknBBaMGw==", - "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.1.1.tgz", + "integrity": "sha512-iJ4hCaMmDaUqRv131XJdt/C/jJQx8UreDWTRqZKtNydvZVh/o4yXGRRFOplea1D9b/zpwL1Y+ZDwX7xMhIOTmg==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "fs-extra": "^11.1.1", "sitemap": "^7.1.1", "tslib": "^2.6.0" @@ -2664,23 +2664,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.0.1.tgz", - "integrity": "sha512-il9m9xZKKjoXn6h0cRcdnt6wce0Pv1y5t4xk2Wx7zBGhKG1idu4IFHtikHlD0QPuZ9fizpXspXcTzjL5FXc1Gw==", - "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/plugin-content-blog": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/plugin-content-pages": "3.0.1", - "@docusaurus/plugin-debug": "3.0.1", - "@docusaurus/plugin-google-analytics": "3.0.1", - "@docusaurus/plugin-google-gtag": "3.0.1", - "@docusaurus/plugin-google-tag-manager": "3.0.1", - "@docusaurus/plugin-sitemap": "3.0.1", - "@docusaurus/theme-classic": "3.0.1", - "@docusaurus/theme-common": "3.0.1", - "@docusaurus/theme-search-algolia": "3.0.1", - "@docusaurus/types": "3.0.1" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.1.1.tgz", + "integrity": "sha512-jG4ys/hWYf69iaN/xOmF+3kjs4Nnz1Ay3CjFLDtYa8KdxbmUhArA9HmP26ru5N0wbVWhY+6kmpYhTJpez5wTyg==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/plugin-content-blog": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/plugin-content-pages": "3.1.1", + "@docusaurus/plugin-debug": "3.1.1", + "@docusaurus/plugin-google-analytics": "3.1.1", + "@docusaurus/plugin-google-gtag": "3.1.1", + "@docusaurus/plugin-google-tag-manager": "3.1.1", + "@docusaurus/plugin-sitemap": "3.1.1", + "@docusaurus/theme-classic": "3.1.1", + "@docusaurus/theme-common": "3.1.1", + "@docusaurus/theme-search-algolia": "3.1.1", + "@docusaurus/types": "3.1.1" }, "engines": { "node": ">=18.0" @@ -2726,22 +2726,22 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.0.1.tgz", - "integrity": "sha512-XD1FRXaJiDlmYaiHHdm27PNhhPboUah9rqIH0lMpBt5kYtsGjJzhqa27KuZvHLzOP2OEpqd2+GZ5b6YPq7Q05Q==", - "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/module-type-aliases": "3.0.1", - "@docusaurus/plugin-content-blog": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/plugin-content-pages": "3.0.1", - "@docusaurus/theme-common": "3.0.1", - "@docusaurus/theme-translations": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.1.1.tgz", + "integrity": "sha512-GiPE/jbWM8Qv1A14lk6s9fhc0LhPEQ00eIczRO4QL2nAQJZXkjPG6zaVx+1cZxPFWbAsqSjKe2lqkwF3fGkQ7Q==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/module-type-aliases": "3.1.1", + "@docusaurus/plugin-content-blog": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/plugin-content-pages": "3.1.1", + "@docusaurus/theme-common": "3.1.1", + "@docusaurus/theme-translations": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "copy-text-to-clipboard": "^3.2.0", @@ -2765,12 +2765,12 @@ } }, "node_modules/@docusaurus/theme-classic/node_modules/@docusaurus/module-type-aliases": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.0.1.tgz", - "integrity": "sha512-DEHpeqUDsLynl3AhQQiO7AbC7/z/lBra34jTcdYuvp9eGm01pfH1wTVq8YqWZq6Jyx0BgcVl/VJqtE9StRd9Ag==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.1.1.tgz", + "integrity": "sha512-xBJyx0TMfAfVZ9ZeIOb1awdXgR4YJMocIEzTps91rq+hJDFJgJaylDtmoRhUxkwuYmNK1GJpW95b7DLztSBJ3A==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "3.0.1", + "@docusaurus/types": "3.1.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2784,9 +2784,9 @@ } }, "node_modules/@docusaurus/theme-classic/node_modules/@mdx-js/react": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.0.tgz", - "integrity": "sha512-nDctevR9KyYFyV+m+/+S4cpzCWHqj+iHDHq3QrsWezcC+B17uZdIWgCguESUkwFhM3n/56KxWVE3V6EokrmONQ==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.1.tgz", + "integrity": "sha512-9ZrPIU4MGf6et1m1ov3zKf+q9+deetI51zprKB1D/z3NOb+rUxxtEl3mCjW5wTGh6VhRdwPueh1oRzi6ezkA8A==", "dependencies": { "@types/mdx": "^2.0.0" }, @@ -2800,9 +2800,9 @@ } }, "node_modules/@docusaurus/theme-classic/node_modules/clsx": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", - "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz", + "integrity": "sha512-m3iNNWpd9rl3jvvcBnu70ylMdrXt8Vlq4HYadnU5fwcOtvkSQWPmj7amUcDT2qYI7risszBjI5AUIUox9D16pg==", "engines": { "node": ">=6" } @@ -2820,17 +2820,17 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.0.1.tgz", - "integrity": "sha512-cr9TOWXuIOL0PUfuXv6L5lPlTgaphKP+22NdVBOYah5jSq5XAAulJTjfe+IfLsEG4L7lJttLbhW7LXDFSAI7Ag==", - "dependencies": { - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/module-type-aliases": "3.0.1", - "@docusaurus/plugin-content-blog": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/plugin-content-pages": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.1.1.tgz", + "integrity": "sha512-38urZfeMhN70YaXkwIGXmcUcv2CEYK/2l4b05GkJPrbEbgpsIZM3Xc+Js2ehBGGZmfZq8GjjQ5RNQYG+MYzCYg==", + "dependencies": { + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/module-type-aliases": "3.1.1", + "@docusaurus/plugin-content-blog": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/plugin-content-pages": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2849,12 +2849,12 @@ } }, "node_modules/@docusaurus/theme-common/node_modules/@docusaurus/module-type-aliases": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.0.1.tgz", - "integrity": "sha512-DEHpeqUDsLynl3AhQQiO7AbC7/z/lBra34jTcdYuvp9eGm01pfH1wTVq8YqWZq6Jyx0BgcVl/VJqtE9StRd9Ag==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.1.1.tgz", + "integrity": "sha512-xBJyx0TMfAfVZ9ZeIOb1awdXgR4YJMocIEzTps91rq+hJDFJgJaylDtmoRhUxkwuYmNK1GJpW95b7DLztSBJ3A==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "3.0.1", + "@docusaurus/types": "3.1.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2868,9 +2868,9 @@ } }, "node_modules/@docusaurus/theme-common/node_modules/clsx": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", - "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz", + "integrity": "sha512-m3iNNWpd9rl3jvvcBnu70ylMdrXt8Vlq4HYadnU5fwcOtvkSQWPmj7amUcDT2qYI7risszBjI5AUIUox9D16pg==", "engines": { "node": ">=6" } @@ -2888,18 +2888,18 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.0.1.tgz", - "integrity": "sha512-DDiPc0/xmKSEdwFkXNf1/vH1SzJPzuJBar8kMcBbDAZk/SAmo/4lf6GU2drou4Ae60lN2waix+jYWTWcJRahSA==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.1.1.tgz", + "integrity": "sha512-tBH9VY5EpRctVdaAhT+b1BY8y5dyHVZGFXyCHgTrvcXQy5CV4q7serEX7U3SveNT9zksmchPyct6i1sFDC4Z5g==", "dependencies": { "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/theme-common": "3.0.1", - "@docusaurus/theme-translations": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/theme-common": "3.1.1", + "@docusaurus/theme-translations": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "algoliasearch": "^4.18.0", "algoliasearch-helper": "^3.13.3", "clsx": "^2.0.0", @@ -2926,9 +2926,9 @@ } }, "node_modules/@docusaurus/theme-translations": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.0.1.tgz", - "integrity": "sha512-6UrbpzCTN6NIJnAtZ6Ne9492vmPVX+7Fsz4kmp+yor3KQwA1+MCzQP7ItDNkP38UmVLnvB/cYk/IvehCUqS3dg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.1.1.tgz", + "integrity": "sha512-xvWQFwjxHphpJq5fgk37FXCDdAa2o+r7FX8IpMg+bGZBNXyWBu3MjZ+G4+eUVNpDhVinTc+j6ucL0Ain5KCGrg==", "dependencies": { "fs-extra": "^11.1.1", "tslib": "^2.6.0" @@ -2938,10 +2938,11 @@ } }, "node_modules/@docusaurus/types": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.0.1.tgz", - "integrity": "sha512-plyX2iU1tcUsF46uQ01pAd4JhexR7n0iiQ5MSnBFX6M6NSJgDYdru/i1/YNPKOnQHBoXGLHv0dNT6OAlDWNjrg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.1.1.tgz", + "integrity": "sha512-grBqOLnubUecgKFXN9q3uit2HFbCxTWX4Fam3ZFbMN0sWX9wOcDoA7lwdX/8AmeL20Oc4kQvWVgNrsT8bKRvzg==", "dependencies": { + "@mdx-js/mdx": "^3.0.0", "@types/history": "^4.7.11", "@types/react": "*", "commander": "^5.1.0", @@ -2957,11 +2958,11 @@ } }, "node_modules/@docusaurus/utils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.0.1.tgz", - "integrity": "sha512-TwZ33Am0q4IIbvjhUOs+zpjtD/mXNmLmEgeTGuRq01QzulLHuPhaBTTAC/DHu6kFx3wDgmgpAlaRuCHfTcXv8g==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.1.1.tgz", + "integrity": "sha512-ZJfJa5cJQtRYtqijsPEnAZoduW6sjAQ7ZCWSZavLcV10Fw0Z3gSaPKA/B4micvj2afRZ4gZxT7KfYqe5H8Cetg==", "dependencies": { - "@docusaurus/logger": "3.0.1", + "@docusaurus/logger": "3.1.1", "@svgr/webpack": "^6.5.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -2992,9 +2993,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.0.1.tgz", - "integrity": "sha512-W0AxD6w6T8g6bNro8nBRWf7PeZ/nn7geEWM335qHU2DDDjHuV4UZjgUGP1AQsdcSikPrlIqTJJbKzer1lRSlIg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.1.1.tgz", + "integrity": "sha512-eGne3olsIoNfPug5ixjepZAIxeYFzHHnor55Wb2P57jNbtVaFvij/T+MS8U0dtZRFi50QU+UPmRrXdVUM8uyMg==", "dependencies": { "tslib": "^2.6.0" }, @@ -3011,12 +3012,12 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.0.1.tgz", - "integrity": "sha512-ujTnqSfyGQ7/4iZdB4RRuHKY/Nwm58IIb+41s5tCXOv/MBU2wGAjOHq3U+AEyJ8aKQcHbxvTKJaRchNHYUVUQg==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.1.1.tgz", + "integrity": "sha512-KlY4P9YVDnwL+nExvlIpu79abfEv6ZCHuOX4ZQ+gtip+Wxj0daccdReIWWtqxM/Fb5Cz1nQvUCc7VEtT8IBUAA==", "dependencies": { - "@docusaurus/logger": "3.0.1", - "@docusaurus/utils": "3.0.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/utils": "3.1.1", "joi": "^17.9.2", "js-yaml": "^4.1.0", "tslib": "^2.6.0" @@ -4928,12 +4929,12 @@ "dev": true }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", @@ -4941,7 +4942,7 @@ "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.11.0", - "raw-body": "2.5.1", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -6124,9 +6125,9 @@ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", "engines": { "node": ">= 0.6" } @@ -9620,16 +9621,16 @@ } }, "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -10005,9 +10006,9 @@ } }, "node_modules/follow-redirects": { - "version": "1.15.3", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", - "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==", + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", "funding": [ { "type": "individual", @@ -11688,9 +11689,9 @@ } }, "node_modules/ip": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.0.tgz", - "integrity": "sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.1.tgz", + "integrity": "sha512-lJUL9imLTNi1ZfXT+DU6rBBdbiKGBuay9B6xGSPVjUeQwaH1RIGqef8RZkUtHioLmSNpPR5M4HVKJGm1j8FWVQ==", "dev": true }, "node_modules/ipaddr.js": { @@ -17476,9 +17477,9 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -22018,9 +22019,9 @@ } }, "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", diff --git a/docs/package.json b/docs/package.json index 35f38de592..d03e8f4a7d 100644 --- a/docs/package.json +++ b/docs/package.json @@ -16,10 +16,10 @@ "dependencies": { "@babel/preset-react": "^7.22.3", "@code-hike/mdx": "^0.9.0", - "@docusaurus/core": "3.0.1", - "@docusaurus/plugin-ideal-image": "^3.0.1", - "@docusaurus/preset-classic": "3.0.1", - "@docusaurus/theme-classic": "^3.0.1", + "@docusaurus/core": "^3.1.1", + "@docusaurus/plugin-ideal-image": "^3.1.1", + "@docusaurus/preset-classic": "^3.1.1", + "@docusaurus/theme-classic": "^3.1.1", "@docusaurus/theme-search-algolia": "^3.0.1", "@mdx-js/react": "^2.3.0", "@mendable/search": "^0.0.154", @@ -69,4 +69,4 @@ "engines": { "node": ">=16.14" } -} \ No newline at end of file +} diff --git a/docs/sidebars.js b/docs/sidebars.js index edcb276710..92fed62ccf 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -11,6 +11,17 @@ module.exports = { "getting-started/creating-flows", ], }, + { + type: "category", + label: "What's New", + collapsed: false, + items: [ + "whats-new/migrating-to-one-point-zero", + "whats-new/customization-control", + "whats-new/debugging-reimagined", + "whats-new/simplification-standardization", + ], + }, { type: "category", label: "Guidelines", @@ -66,18 +77,25 @@ module.exports = { "guides/loading_document", "guides/chatprompttemplate_guide", "guides/langfuse_integration", + "guides/inputs-and-outputs", + "guides/flow-of-data", + "guides/supported-frameworks", + "guides/sidebar-and-interaction-panel", + "guides/new-categories-and-components", + "guides/text-and-record", + "guides/custom-component", + "guides/compatibility", + "guides/multiple-flows", + "guides/component-status-and-data-passing", + "guides/connecting-output-components", + "guides/renaming-and-editing-components", + "guides/passing-tweaks-and-inputs", + "guides/global-variables", + "guides/experimental-components", + "guides/state-management", + "guides/run-flow", ], }, - // { - // type: 'category', - // label: 'Components', - // collapsed: false, - // items: [ - // 'components/agents', 'components/chains', 'components/loaders', 'components/embeddings', 'components/llms', - // 'components/memories', 'components/prompts','components/text-splitters', 'components/toolkits', 'components/tools', - // 'components/utilities', 'components/vector-stores', 'components/wrappers', - // ], - // }, { type: "category", label: "Examples", diff --git a/src/backend/base/langflow/components/experimental/GetNotified.py b/src/backend/base/langflow/components/experimental/Listen.py similarity index 75% rename from src/backend/base/langflow/components/experimental/GetNotified.py rename to src/backend/base/langflow/components/experimental/Listen.py index d2ee6ede94..cab979f708 100644 --- a/src/backend/base/langflow/components/experimental/GetNotified.py +++ b/src/backend/base/langflow/components/experimental/Listen.py @@ -2,9 +2,9 @@ from langflow.schema import Record -class GetNotifiedComponent(CustomComponent): - display_name = "Get Notified" - description = "A component to get notified by Notify component." +class ListenComponent(CustomComponent): + display_name = "Listen" + description = "A component to listen for a notification." beta: bool = True def build_config(self): diff --git a/src/backend/base/langflow/components/experimental/__init__.py b/src/backend/base/langflow/components/experimental/__init__.py index 412e9075af..d23a731063 100644 --- a/src/backend/base/langflow/components/experimental/__init__.py +++ b/src/backend/base/langflow/components/experimental/__init__.py @@ -1,6 +1,6 @@ from .ClearMessageHistory import ClearMessageHistoryComponent from .ExtractDataFromRecord import ExtractKeyFromRecordComponent -from .GetNotified import GetNotifiedComponent +from .Listen import GetNotifiedComponent from .ListFlows import ListFlowsComponent from .MergeRecords import MergeRecordsComponent from .Notify import NotifyComponent diff --git a/src/backend/base/langflow/services/manager.py b/src/backend/base/langflow/services/manager.py index 6902b4a892..20186f7da7 100644 --- a/src/backend/base/langflow/services/manager.py +++ b/src/backend/base/langflow/services/manager.py @@ -106,7 +106,9 @@ def initialize_session_service(): Initialize the session manager. """ from langflow.services.cache import factory as cache_factory - from langflow.services.session import factory as session_service_factory # type: ignore + from langflow.services.session import ( + factory as session_service_factory, + ) # type: ignore initialize_settings_service() diff --git a/src/backend/langflow/base/agents/__init__.py b/src/backend/langflow/base/agents/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/backend/langflow/base/agents/agent.py b/src/backend/langflow/base/agents/agent.py new file mode 100644 index 0000000000..8dac73e61c --- /dev/null +++ b/src/backend/langflow/base/agents/agent.py @@ -0,0 +1,70 @@ +from typing import List, Union + +from langchain.agents import AgentExecutor, BaseMultiActionAgent, BaseSingleActionAgent + +from langflow import CustomComponent +from langflow.field_typing import BaseMemory, Text, Tool + + +class LCAgentComponent(CustomComponent): + def build_config(self): + return { + "lc": { + "display_name": "LangChain", + "info": "The LangChain to interact with.", + }, + "handle_parsing_errors": { + "display_name": "Handle Parsing Errors", + "info": "If True, the agent will handle parsing errors. If False, the agent will raise an error.", + "advanced": True, + }, + "output_key": { + "display_name": "Output Key", + "info": "The key to use to get the output from the agent.", + "advanced": True, + }, + "memory": { + "display_name": "Memory", + "info": "Memory to use for the agent.", + }, + "tools": { + "display_name": "Tools", + "info": "Tools the agent can use.", + }, + "input_value": { + "display_name": "Input", + "info": "Input text to pass to the agent.", + }, + } + + async def run_agent( + self, + agent: Union[BaseSingleActionAgent, BaseMultiActionAgent, AgentExecutor], + inputs: str, + input_variables: list[str], + tools: List[Tool], + memory: BaseMemory = None, + handle_parsing_errors: bool = True, + output_key: str = "output", + ) -> Text: + if isinstance(agent, AgentExecutor): + runnable = agent + else: + runnable = AgentExecutor.from_agent_and_tools( + agent=agent, tools=tools, verbose=True, memory=memory, handle_parsing_errors=handle_parsing_errors + ) + input_dict = {"input": inputs} + for var in input_variables: + if var not in ["agent_scratchpad", "input"]: + input_dict[var] = "" + result = await runnable.ainvoke(input_dict) + self.status = result + if output_key in result: + return result.get(output_key) + elif "output" not in result: + if output_key != "output": + raise ValueError(f"Output key not found in result. Tried '{output_key}' and 'output'.") + else: + raise ValueError("Output key not found in result. Tried 'output'.") + + return result.get("output") diff --git a/src/backend/langflow/base/models/__init__.py b/src/backend/langflow/base/models/__init__.py new file mode 100644 index 0000000000..921f10336a --- /dev/null +++ b/src/backend/langflow/base/models/__init__.py @@ -0,0 +1,3 @@ +from .model import LCModelComponent + +__all__ = ["LCModelComponent"] diff --git a/src/backend/langflow/base/models/model.py b/src/backend/langflow/base/models/model.py new file mode 100644 index 0000000000..e2ab4b6cf8 --- /dev/null +++ b/src/backend/langflow/base/models/model.py @@ -0,0 +1,48 @@ +from typing import Optional + +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.language_models.llms import LLM +from langchain_core.messages import HumanMessage, SystemMessage + +from langflow import CustomComponent + + +class LCModelComponent(CustomComponent): + display_name: str = "Model Name" + description: str = "Model Description" + + def get_result(self, runnable: LLM, stream: bool, input_value: str): + """ + Retrieves the result from the output of a Runnable object. + + Args: + output (Runnable): The output object to retrieve the result from. + stream (bool): Indicates whether to use streaming or invocation mode. + input_value (str): The input value to pass to the output object. + + Returns: + The result obtained from the output object. + """ + if stream: + result = runnable.stream(input_value) + else: + message = runnable.invoke(input_value) + result = message.content if hasattr(message, "content") else message + self.status = result + return result + + def get_chat_result( + self, runnable: BaseChatModel, stream: bool, input_value: str, system_message: Optional[str] = None + ): + messages = [] + if input_value: + messages.append(HumanMessage(input_value)) + if system_message: + messages.append(SystemMessage(system_message)) + if stream: + result = runnable.stream(messages) + else: + message = runnable.invoke(messages) + result = message.content + self.status = result + return result diff --git a/src/backend/langflow/components/experimental/FlowTool.py b/src/backend/langflow/components/experimental/FlowTool.py new file mode 100644 index 0000000000..5f504ff027 --- /dev/null +++ b/src/backend/langflow/components/experimental/FlowTool.py @@ -0,0 +1,85 @@ +from typing import Any, List, Optional, Text + +from langchain_core.tools import StructuredTool +from loguru import logger + +from langflow import CustomComponent +from langflow.field_typing import Tool +from langflow.graph.graph.base import Graph +from langflow.helpers.flow import build_function_and_schema +from langflow.schema.dotdict import dotdict + + +class FlowToolComponent(CustomComponent): + display_name = "Flow as Tool" + description = "Construct a Tool from a function that runs the loaded Flow." + field_order = ["flow_name", "name", "description", "return_direct"] + + def get_flow_names(self) -> List[str]: + flow_records = self.list_flows() + return [flow_record.data["name"] for flow_record in flow_records] + + def get_flow(self, flow_name: str) -> Optional[Text]: + """ + Retrieves a flow by its name. + + Args: + flow_name (str): The name of the flow to retrieve. + + Returns: + Optional[Text]: The flow record if found, None otherwise. + """ + flow_records = self.list_flows() + for flow_record in flow_records: + if flow_record.data["name"] == flow_name: + return flow_record + return None + + def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None): + logger.debug(f"Updating build config with field value {field_value} and field name {field_name}") + if field_name == "flow_name": + build_config["flow_name"]["options"] = self.get_flow_names() + + return build_config + + def build_config(self): + return { + "flow_name": { + "display_name": "Flow Name", + "info": "The name of the flow to run.", + "options": [], + "real_time_refresh": True, + "refresh_button": True, + }, + "name": { + "display_name": "Name", + "description": "The name of the tool.", + }, + "description": { + "display_name": "Description", + "description": "The description of the tool.", + }, + "return_direct": { + "display_name": "Return Direct", + "description": "Return the result directly from the Tool.", + "advanced": True, + }, + } + + async def build(self, flow_name: str, name: str, description: str, return_direct: bool = False) -> Tool: + flow_record = self.get_flow(flow_name) + if not flow_record: + raise ValueError("Flow not found.") + graph = Graph.from_payload(flow_record.data["data"]) + dynamic_flow_function, schema = build_function_and_schema(flow_record, graph) + tool = StructuredTool.from_function( + coroutine=dynamic_flow_function, + name=name, + description=description, + return_direct=return_direct, + args_schema=schema, + ) + description_repr = repr(tool.description).strip("'") + args_str = "\n".join([f"- {arg_name}: {arg_data['description']}" for arg_name, arg_data in tool.args.items()]) + self.status = f"{description_repr}\nArguments:\n{args_str}" + return tool diff --git a/src/backend/langflow/components/tools/SearchAPITool.py b/src/backend/langflow/components/tools/SearchAPITool.py new file mode 100644 index 0000000000..76c82ad9c5 --- /dev/null +++ b/src/backend/langflow/components/tools/SearchAPITool.py @@ -0,0 +1,37 @@ +from langchain_community.tools.searchapi import SearchAPIRun +from langchain_community.utilities.searchapi import SearchApiAPIWrapper + +from langflow import CustomComponent +from langflow.field_typing import Tool + + +class SearchApiToolComponent(CustomComponent): + display_name: str = "SearchApi Tool" + description: str = "Real-time search engine results API." + documentation: str = "https://www.searchapi.io/docs/google" + field_config = { + "engine": { + "display_name": "Engine", + "field_type": "str", + "info": "The search engine to use.", + }, + "api_key": { + "display_name": "API Key", + "field_type": "str", + "required": True, + "password": True, + "info": "The API key to use SearchApi.", + }, + } + + def build( + self, + engine: str, + api_key: str, + ) -> Tool: + search_api_wrapper = SearchApiAPIWrapper(engine=engine, searchapi_api_key=api_key) + + tool = SearchAPIRun(api_wrapper=search_api_wrapper) + + self.status = tool + return tool