Integrations

Langchain with Python SDK

Langchain with Python SDK

In this guide, we show you how to integrate your products with Langchain using Orquesta Python SDK.

Step 1 - Install the SDK

pip install orquesta-sdk
pip install langchain

Step 2 - Execute prompt

You can find your API Key in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

import os
import time
from orquesta_sdk import OrquestaClient, OrquestaClientOptions
from orquesta_sdk.prompts import OrquestaPromptMetrics, OrquestaPromptMetricsEconomics
from orquesta_sdk.helpers import orquesta_openai_parameters_mapper

from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback

# Initialize Orquesta client
from orquesta_sdk import OrquestaClient, OrquestaClientOptions

api_key = "ORQUESTA-API-Key"
options = OrquestaClientOptions(api_key=api_key, ttl=3600)
client = OrquestaClient(options)

prompt = client.prompts.query(
    key="customer-support-chat",
    context={"environments": ["test"]},
    variables={"customer_name": ""},
    metadata={"chain-id": "js2938js2ja"},
)

# Start time of the completion request for latency reporting
start_time = time.time()
print(f'Start time: {start_time}')

messages = []

for message in prompt.value.get("messages", []):
    role = message.get("role")
    content = message.get("content")
  
    if role == "system":
        messages.append(SystemMessage(content=content))
    elif role == "user":
        messages.append(HumanMessage(content=content))
    elif role == "assistant":
        messages.append(AIMessage(content=content))

parameters = orquesta_openai_parameters_mapper(prompt.value)

chat = ChatOpenAI(
    temperature=parameters.get("temperature"),
    max_tokens=parameters.get("max_tokens"),
    openai_api_key="api_key",
)

with get_openai_callback() as cb:
    result = chat(messages)

    # End time of the completion request
    end_time = time.time()
    print(f"End time: {end_time}")

    print(result.content)

    # Calculate the difference (latency) in milliseconds
    latency = (end_time - start_time) * 1000
    print(f'Latency is: {latency}')

    # Prepare token economics for logging
    economics = OrquestaPromptMetricsEconomics(
        total_tokens=cb.total_tokens,
        completion_tokens=cb.completion_tokens,
        prompt_tokens=cb.prompt_tokens,
    )

    # Report the metrics back to Orquesta
    metrics = OrquestaPromptMetrics(
        economics=economics,
        llm_response=result.content,
        latency=latency
    )

    prompt.add_metrics(metrics=metrics)

Langchain with Python SDK

In this guide, we show you how to integrate your products with Langchain using Orquesta Python SDK.

Step 1 - Install the SDK

pip install orquesta-sdk
pip install langchain

Step 2 - Execute prompt

You can find your API Key in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

import os
import time
from orquesta_sdk import OrquestaClient, OrquestaClientOptions
from orquesta_sdk.prompts import OrquestaPromptMetrics, OrquestaPromptMetricsEconomics
from orquesta_sdk.helpers import orquesta_openai_parameters_mapper

from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback

# Initialize Orquesta client
from orquesta_sdk import OrquestaClient, OrquestaClientOptions

api_key = "ORQUESTA-API-Key"
options = OrquestaClientOptions(api_key=api_key, ttl=3600)
client = OrquestaClient(options)

prompt = client.prompts.query(
    key="customer-support-chat",
    context={"environments": ["test"]},
    variables={"customer_name": ""},
    metadata={"chain-id": "js2938js2ja"},
)

# Start time of the completion request for latency reporting
start_time = time.time()
print(f'Start time: {start_time}')

messages = []

for message in prompt.value.get("messages", []):
    role = message.get("role")
    content = message.get("content")
  
    if role == "system":
        messages.append(SystemMessage(content=content))
    elif role == "user":
        messages.append(HumanMessage(content=content))
    elif role == "assistant":
        messages.append(AIMessage(content=content))

parameters = orquesta_openai_parameters_mapper(prompt.value)

chat = ChatOpenAI(
    temperature=parameters.get("temperature"),
    max_tokens=parameters.get("max_tokens"),
    openai_api_key="api_key",
)

with get_openai_callback() as cb:
    result = chat(messages)

    # End time of the completion request
    end_time = time.time()
    print(f"End time: {end_time}")

    print(result.content)

    # Calculate the difference (latency) in milliseconds
    latency = (end_time - start_time) * 1000
    print(f'Latency is: {latency}')

    # Prepare token economics for logging
    economics = OrquestaPromptMetricsEconomics(
        total_tokens=cb.total_tokens,
        completion_tokens=cb.completion_tokens,
        prompt_tokens=cb.prompt_tokens,
    )

    # Report the metrics back to Orquesta
    metrics = OrquestaPromptMetrics(
        economics=economics,
        llm_response=result.content,
        latency=latency
    )

    prompt.add_metrics(metrics=metrics)

Langchain with Python SDK

In this guide, we show you how to integrate your products with Langchain using Orquesta Python SDK.

Step 1 - Install the SDK

pip install orquesta-sdk
pip install langchain

Step 2 - Execute prompt

You can find your API Key in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

import os
import time
from orquesta_sdk import OrquestaClient, OrquestaClientOptions
from orquesta_sdk.prompts import OrquestaPromptMetrics, OrquestaPromptMetricsEconomics
from orquesta_sdk.helpers import orquesta_openai_parameters_mapper

from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback

# Initialize Orquesta client
from orquesta_sdk import OrquestaClient, OrquestaClientOptions

api_key = "ORQUESTA-API-Key"
options = OrquestaClientOptions(api_key=api_key, ttl=3600)
client = OrquestaClient(options)

prompt = client.prompts.query(
    key="customer-support-chat",
    context={"environments": ["test"]},
    variables={"customer_name": ""},
    metadata={"chain-id": "js2938js2ja"},
)

# Start time of the completion request for latency reporting
start_time = time.time()
print(f'Start time: {start_time}')

messages = []

for message in prompt.value.get("messages", []):
    role = message.get("role")
    content = message.get("content")
  
    if role == "system":
        messages.append(SystemMessage(content=content))
    elif role == "user":
        messages.append(HumanMessage(content=content))
    elif role == "assistant":
        messages.append(AIMessage(content=content))

parameters = orquesta_openai_parameters_mapper(prompt.value)

chat = ChatOpenAI(
    temperature=parameters.get("temperature"),
    max_tokens=parameters.get("max_tokens"),
    openai_api_key="api_key",
)

with get_openai_callback() as cb:
    result = chat(messages)

    # End time of the completion request
    end_time = time.time()
    print(f"End time: {end_time}")

    print(result.content)

    # Calculate the difference (latency) in milliseconds
    latency = (end_time - start_time) * 1000
    print(f'Latency is: {latency}')

    # Prepare token economics for logging
    economics = OrquestaPromptMetricsEconomics(
        total_tokens=cb.total_tokens,
        completion_tokens=cb.completion_tokens,
        prompt_tokens=cb.prompt_tokens,
    )

    # Report the metrics back to Orquesta
    metrics = OrquestaPromptMetrics(
        economics=economics,
        llm_response=result.content,
        latency=latency
    )

    prompt.add_metrics(metrics=metrics)

Start powering your SaaS with LLMs

Start

powering

your SaaS

with LLMs

Start powering your SaaS with LLMs