Loading...
Loading...
Optimize LLM prompts, tools, and agents in Opik using standardized optimizer workflows (prompt optimization, tool optimization, and parameter tuning), dataset/metric wiring, and result interpretation.
npx skill4agent add vincentkoc/dotskills opik-optimizerChatPromptoptimize_parameterOptimizationResultMetaPromptOptimizerFewShotBayesianOptimizerHRPOn_threadsn_samplesmax_trialsOptimizationResultoptimize_promptsscoreinitial_scorereferences/algorithms.mdreferences/prompt_agent_workflow.mdreferences/example_patterns.mdpip install opik-optimizerfrom opik_optimizer import ChatPrompt, MetaPromptOptimizer, HRPO, FewShotBayesianOptimizer
from opik_optimizer import datasetsfrom opik.evaluation.metrics import LevenshteinRatio
prompt = ChatPrompt(
system="You are a concise answerer.",
user="{question}",
)
def metric(dataset_item: dict, output: str) -> float:
return LevenshteinRatio().score(
reference=dataset_item["answer"],
output=output,
).valuedataset = datasets.hotpot(count=30)
result = MetaPromptOptimizer(model="openai/gpt-5-nano").optimize_prompt(
prompt=prompt,
dataset=dataset,
metric=metric,
n_samples=20,
max_trials=10,
)
result.display()FewShotBayesianOptimizerMetaPromptOptimizerEvolutionaryOptimizerHierarchicalReflectiveOptimizerHRPOGepaOptimizerParameterOptimizerChatPromptopik_optimizer.datasets(dataset_item, llm_output) -> floatScoreResultScoreResultn_threadsn_samplesmax_trialsoptimize_prompt(...)optimize_parameter(...)OptimizationResultscoreinitial_scorehistoryoptimization_idget_optimized_parametersproject_name{question}optimize_prompts="system""user"modelMetaPromptreasoningn_samplesn_samples_strategyoptimize_promptsprompt_segmentsextract_prompt_segments(ChatPrompt)apply_segment_updates(ChatPrompt, updates)scripts/num_threadsn_threadsParameterOptimizer.optimize_promptreferences/algorithms.mdoptimize_promptreferences/prompt_agent_workflow.mdreferences/example_patterns.md