Integrations

OpenAI with Python SDK

OpenAI with Python SDK

In this guide, we show you how to integrate your products with OpenAI using Orquesta Python SDK.

For the longer blog article, see: Integrate Orquesta with OpenAI using Python SDK

Step 1 - Install the SDK

pip install orquesta-sdk

Step 2 - Execute prompt

You can find it in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

import os
import time
import openai
from orquesta_sdk import OrquestaClient, OrquestaClientOptions
from orquesta_sdk.helpers import orquesta_openai_parameters_mapper
from orquesta_sdk.prompts import OrquestaPromptMetrics

openai.api_key = "<OPENAI_API_KEY>"

# Initialize Orquesta client
api_key = "<ORQUESTA_API_KEY>"
options = OrquestaClientOptions(
    api_key=api_key,
    ttl=3600
)

client = OrquestaClient(options)

# Query the prompt from Orquesta
prompt = client.prompts.query(
  key="customer-support-chat",
  context={
    "environments": ["test"],
    "country": ["BEL", "NLD"],
    "locale": ["en"],
    "user-segment": ["b2c"]
  },
  variables={ "customer_name": "John" },
  metadata={"user_id":45515}
)

if prompt.has_error:
    print("There was an error while fetching the prompt")

# Start time of the completion request
start_time = time.time()
print(f'Start time: {start_time}')

completion = openai.ChatCompletion.create(
    **orquesta_openai_parameters_mapper(prompt.value),
    model=prompt.value.get("model"),
    messages=prompt.value.get("messages"),
)

# End time of the completion request
end_time = time.time()
print(f'End time: {end_time}')

# Calculate the difference (latency) in milliseconds
latency = (end_time - start_time) * 1000
print(f'Latency is: {latency}')

Step 3 - Report analytics back to Orquesta

After each query, Orquesta generates a log with a Trace ID. Using the add_metrics() method, you can add additional information, such as the llm-response, metadata, latency, and economics

# Report the metrics back to Orquesta
metrics = OrquestaPromptMetrics(
    economics={
        "total_tokens": completion.usage.get("total_tokens"),
        "completion_tokens": completion.usage.get("completion_tokens"),
        "prompt_tokens": completion.usage.get("prompt_tokens"),
    },
    llm_response=completion.choices[0].message.content,
    latency=latency,
    metadata={
        "finish_reason": completion.choices[0].finish_reason,
    },
)

prompt.add_metrics(metrics=metrics)

OpenAI with Python SDK

In this guide, we show you how to integrate your products with OpenAI using Orquesta Python SDK.

For the longer blog article, see: Integrate Orquesta with OpenAI using Python SDK

Step 1 - Install the SDK

pip install orquesta-sdk

Step 2 - Execute prompt

You can find it in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

import os
import time
import openai
from orquesta_sdk import OrquestaClient, OrquestaClientOptions
from orquesta_sdk.helpers import orquesta_openai_parameters_mapper
from orquesta_sdk.prompts import OrquestaPromptMetrics

openai.api_key = "<OPENAI_API_KEY>"

# Initialize Orquesta client
api_key = "<ORQUESTA_API_KEY>"
options = OrquestaClientOptions(
    api_key=api_key,
    ttl=3600
)

client = OrquestaClient(options)

# Query the prompt from Orquesta
prompt = client.prompts.query(
  key="customer-support-chat",
  context={
    "environments": ["test"],
    "country": ["BEL", "NLD"],
    "locale": ["en"],
    "user-segment": ["b2c"]
  },
  variables={ "customer_name": "John" },
  metadata={"user_id":45515}
)

if prompt.has_error:
    print("There was an error while fetching the prompt")

# Start time of the completion request
start_time = time.time()
print(f'Start time: {start_time}')

completion = openai.ChatCompletion.create(
    **orquesta_openai_parameters_mapper(prompt.value),
    model=prompt.value.get("model"),
    messages=prompt.value.get("messages"),
)

# End time of the completion request
end_time = time.time()
print(f'End time: {end_time}')

# Calculate the difference (latency) in milliseconds
latency = (end_time - start_time) * 1000
print(f'Latency is: {latency}')

Step 3 - Report analytics back to Orquesta

After each query, Orquesta generates a log with a Trace ID. Using the add_metrics() method, you can add additional information, such as the llm-response, metadata, latency, and economics

# Report the metrics back to Orquesta
metrics = OrquestaPromptMetrics(
    economics={
        "total_tokens": completion.usage.get("total_tokens"),
        "completion_tokens": completion.usage.get("completion_tokens"),
        "prompt_tokens": completion.usage.get("prompt_tokens"),
    },
    llm_response=completion.choices[0].message.content,
    latency=latency,
    metadata={
        "finish_reason": completion.choices[0].finish_reason,
    },
)

prompt.add_metrics(metrics=metrics)

OpenAI with Python SDK

In this guide, we show you how to integrate your products with OpenAI using Orquesta Python SDK.

For the longer blog article, see: Integrate Orquesta with OpenAI using Python SDK

Step 1 - Install the SDK

pip install orquesta-sdk

Step 2 - Execute prompt

You can find it in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

import os
import time
import openai
from orquesta_sdk import OrquestaClient, OrquestaClientOptions
from orquesta_sdk.helpers import orquesta_openai_parameters_mapper
from orquesta_sdk.prompts import OrquestaPromptMetrics

openai.api_key = "<OPENAI_API_KEY>"

# Initialize Orquesta client
api_key = "<ORQUESTA_API_KEY>"
options = OrquestaClientOptions(
    api_key=api_key,
    ttl=3600
)

client = OrquestaClient(options)

# Query the prompt from Orquesta
prompt = client.prompts.query(
  key="customer-support-chat",
  context={
    "environments": ["test"],
    "country": ["BEL", "NLD"],
    "locale": ["en"],
    "user-segment": ["b2c"]
  },
  variables={ "customer_name": "John" },
  metadata={"user_id":45515}
)

if prompt.has_error:
    print("There was an error while fetching the prompt")

# Start time of the completion request
start_time = time.time()
print(f'Start time: {start_time}')

completion = openai.ChatCompletion.create(
    **orquesta_openai_parameters_mapper(prompt.value),
    model=prompt.value.get("model"),
    messages=prompt.value.get("messages"),
)

# End time of the completion request
end_time = time.time()
print(f'End time: {end_time}')

# Calculate the difference (latency) in milliseconds
latency = (end_time - start_time) * 1000
print(f'Latency is: {latency}')

Step 3 - Report analytics back to Orquesta

After each query, Orquesta generates a log with a Trace ID. Using the add_metrics() method, you can add additional information, such as the llm-response, metadata, latency, and economics

# Report the metrics back to Orquesta
metrics = OrquestaPromptMetrics(
    economics={
        "total_tokens": completion.usage.get("total_tokens"),
        "completion_tokens": completion.usage.get("completion_tokens"),
        "prompt_tokens": completion.usage.get("prompt_tokens"),
    },
    llm_response=completion.choices[0].message.content,
    latency=latency,
    metadata={
        "finish_reason": completion.choices[0].finish_reason,
    },
)

prompt.add_metrics(metrics=metrics)

Start powering your SaaS with LLMs

Start

powering

your SaaS

with LLMs

Start powering your SaaS with LLMs