Loading...
Loading...
Grafana Cloud Application Observability (APM), Frontend Observability (RUM/Faro), and AI Observability. Covers RED metrics (Rate/Error/Duration), service maps, span metrics from traces, Faro JavaScript/React SDK for browser instrumentation, session replay, AI/LLM model monitoring, and integration with traces/logs/profiles for full-stack correlation. Use when setting up APM, configuring frontend monitoring, analyzing service performance, or monitoring AI/LLM applications.
npx skill4agent add grafana/skills app-observabilityspanmetricstraces_spanmetrics_calls_totaltraces_spanmetrics_duration_secondstraces_span_metrics_calls_totaltraces_span_metrics_duration_seconds| Attribute | Grafana Label | Purpose |
|---|---|---|
| | Identifies the service |
| part of | Groups services; |
| | Env filter (prod/dev/staging) |
jobservice.namespace/service.nameservice.nameservice.versionk8s.cluster.namek8s.namespace.namecloud.regionexport OTEL_SERVICE_NAME="my-api"
export OTEL_RESOURCE_ATTRIBUTES="service.namespace=myteam,deployment.environment=production,service.version=1.2.3"
export OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:4317"
export OTEL_EXPORTER_OTLP_PROTOCOL="grpc"// Receive traces, metrics, logs from instrumented apps
otelcol.receiver.otlp "default" {
grpc {
endpoint = "0.0.0.0:4317"
}
http {
endpoint = "0.0.0.0:4318"
}
output {
metrics = [otelcol.processor.resourcedetection.default.input]
logs = [otelcol.processor.resourcedetection.default.input]
traces = [otelcol.processor.resourcedetection.default.input]
}
}
// Auto-detect host/cloud metadata
otelcol.processor.resourcedetection "default" {
detectors = ["env", "system", "gcp", "aws", "azure"]
output {
metrics = [otelcol.processor.batch.default.input]
logs = [otelcol.processor.batch.default.input]
traces = [otelcol.processor.batch.default.input]
}
}
// Batch for efficiency
otelcol.processor.batch "default" {
output {
metrics = [otelcol.exporter.otlphttp.grafana_cloud.input]
logs = [otelcol.exporter.otlphttp.grafana_cloud.input]
traces = [otelcol.exporter.otlphttp.grafana_cloud.input]
}
}
// Auth
otelcol.auth.basic "grafana_cloud" {
username = env("GRAFANA_CLOUD_INSTANCE_ID")
password = env("GRAFANA_CLOUD_API_KEY")
}
// Export to Grafana Cloud OTLP endpoint
otelcol.exporter.otlphttp "grafana_cloud" {
client {
endpoint = env("GRAFANA_CLOUD_OTLP_ENDPOINT")
auth = otelcol.auth.basic.grafana_cloud.handler
}
}GRAFANA_CLOUD_OTLP_ENDPOINT=https://otlp-gateway-<region>.grafana.net/otlp
GRAFANA_CLOUD_INSTANCE_ID=<your-instance-id>
GRAFANA_CLOUD_API_KEY=<your-api-key>span.kindservice-graphstraces_service_graph_request_totaltraces_service_graph_request_failed_totalservice.name@grafana/faro-core # Core SDK - signals, transports, API
@grafana/faro-web-sdk # Web instrumentations + transports
@grafana/faro-web-tracing # OpenTelemetry-JS distributed tracing
@grafana/faro-react # React-specific integrations (error boundary, router)npm install @grafana/faro-web-sdk
# or
yarn add @grafana/faro-web-sdkimport {
initializeFaro,
getWebInstrumentations,
} from '@grafana/faro-web-sdk';
const faro = initializeFaro({
url: 'https://faro-collector-prod-<region>.grafana.net/collect/<app-key>',
app: {
name: 'my-frontend-app',
version: '1.0.0',
environment: 'production',
},
instrumentations: [
...getWebInstrumentations({
captureConsole: true,
}),
],
});
// Manual API usage
faro.api.pushLog(['User clicked checkout button']);
faro.api.pushError(new Error('Payment failed'));
faro.api.pushEvent('button_click', { button: 'checkout' });<script src="https://unpkg.com/@grafana/faro-web-sdk@latest/dist/library/faro-web-sdk.iife.js"></script>
<script>
const { initializeFaro, getWebInstrumentations } = GrafanaFaroWebSdk;
initializeFaro({
url: 'https://faro-collector-prod-<region>.grafana.net/collect/<app-key>',
app: { name: 'my-app', version: '1.0.0' },
instrumentations: [...getWebInstrumentations()],
});
</script>npm install @grafana/faro-react @grafana/faro-web-tracingimport { initializeFaro, getWebInstrumentations } from '@grafana/faro-web-sdk';
import { TracingInstrumentation } from '@grafana/faro-web-tracing';
import {
createReactRouterV6DataOptions,
ReactIntegration,
withFaroRouterInstrumentation,
} from '@grafana/faro-react';
import { createBrowserRouter, RouterProvider } from 'react-router-dom';
const faro = initializeFaro({
url: 'https://faro-collector-prod-<region>.grafana.net/collect/<app-key>',
app: {
name: 'my-react-app',
version: '1.0.0',
environment: 'production',
},
instrumentations: [
...getWebInstrumentations({ captureConsole: true }),
new TracingInstrumentation(),
new ReactIntegration({
router: createReactRouterV6DataOptions({}),
}),
],
});
const router = withFaroRouterInstrumentation(
createBrowserRouter([
{ path: '/', element: <Home /> },
{ path: '/about', element: <About /> },
])
);
function App() {
return <RouterProvider router={router} />;
}initializeFaro({
url: '...',
app: { name: 'my-app' },
sessionTracking: {
enabled: true,
persistent: true,
maxSessionPersistenceTime: 4 * 60 * 60 * 1000, // 4 hours in ms
samplingRate: 1, // 1 = 100%, 0.5 = 50% of sessions
onSessionChange: (oldSession, newSession) => {
console.log('Session changed', newSession.id);
},
},
instrumentations: [...getWebInstrumentations()],
});urlinitializeFaro({ url: '...' })getWebInstrumentations()captureConsole: trueTracingInstrumentationtraceparenttracestate| Metric | Description |
|---|---|
| Total input/prompt tokens consumed |
| Total output/completion tokens consumed |
| Total cost in USD |
| Latency per LLM call (histogram) |
| Token usage histogram |
gen_ai.request.modelgen_ai.systemopenaianthropicpip install openlit openai anthropic cohereimport openlit
import openai
# One-line initialization - auto-instruments all supported LLM libraries
openlit.init()
# Optional parameters
openlit.init(
application_name="my-ai-app",
environment="production",
)
# Your existing code works unchanged - OpenLIT intercepts all LLM calls
client = openai.OpenAI()
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)export OTEL_SERVICE_NAME="my-ai-app"
export OTEL_DEPLOYMENT_ENVIRONMENT="production"
export OTEL_EXPORTER_OTLP_ENDPOINT="https://otlp-gateway-<region>.grafana.net/otlp"
# Base64 encode "instanceID:apiToken"
export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Basic <base64-encoded-instanceid:apitoken>"# Hallucination detection
evals = openlit.evals.Hallucination(
provider="openai",
api_key=os.getenv("OPENAI_API_KEY")
)
result = evals.measure(
prompt=user_message,
contexts=["Your knowledge base content here"],
text=llm_answer
)
# Content safety guard
guard = openlit.guard.All(
provider="openai",
api_key=os.getenv("OPENAI_API_KEY")
)
guard.detect(text=user_message)pip install openlitopenlit.init()| Signal | Product | Storage | Query Language |
|---|---|---|---|
| Metrics (RED) | App Observability | Mimir | PromQL |
| Traces | Tempo | Tempo | TraceQL |
| Logs | Loki | Loki | LogQL |
| Profiles | Pyroscope | Pyroscope | - |
| Browser RUM | Faro/Frontend Obs | Loki + Tempo | - |
| LLM metrics | AI Observability | Mimir | PromQL |
service.nameservice_nametraceIDprofileIDtraceparentTracingInstrumentationgen_ai_usage_cost_USD_sum