Open Telemetry pipeline Learning
Test sample python
Requirements
txt
opentelemetry-exporter-otlp-proto-grpc
opentelemetry-instrumentation-logging
opentelemetry-sdkTrace
py
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
# Service name is required for most backends
resource = Resource.create(attributes={SERVICE_NAME: "testwebserver"})
tracerProvider = TracerProvider(resource=resource)
processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="192.168.1.16:4317", insecure=True))
tracerProvider.add_span_processor(processor)
trace.set_tracer_provider(tracerProvider)
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("rootSpan"):
with tracer.start_as_current_span("childSpan"):
print("Hello world!")Logs
py
import logging
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk._logs import LoggerProvider
from opentelemetry._logs import set_logger_provider
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter
from opentelemetry.sdk._logs import LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
logger_provider = LoggerProvider(
resource=Resource.create(
{
"service.name": "otel-example",
"service.instance.id": "otel-example-instance",
}
),
)
set_logger_provider(logger_provider)
exporter = OTLPLogExporter(endpoint="http://192.168.1.16:4317", insecure=True)
logger_provider.add_log_record_processor(BatchLogRecordProcessor(exporter))
handler = LoggingHandler(level=logging.DEBUG, logger_provider=logger_provider)
logging.getLogger().addHandler(handler)
logger = logging.getLogger("otel-example")
logger.info("This is an info message")
logger.error("This is an error message")Trace and logs linked
py
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
import logging
from opentelemetry.sdk._logs import LoggerProvider
from opentelemetry._logs import set_logger_provider
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter
from opentelemetry.sdk._logs import LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.instrumentation.logging import LoggingInstrumentor
# share same resource
resource = Resource.create(attributes={SERVICE_NAME: "testwebserver"})
# traces
tracerProvider = TracerProvider(resource=resource)
processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="192.168.1.16:4317", insecure=True))
tracerProvider.add_span_processor(processor)
trace.set_tracer_provider(tracerProvider)
# logs
logger_provider = LoggerProvider(resource=resource)
set_logger_provider(logger_provider)
log_processor = BatchLogRecordProcessor(OTLPLogExporter(endpoint="http://192.168.1.16:4317", insecure=True))
logger_provider.add_log_record_processor(log_processor)
# inject trace_id/span_id to link trace and logs
LoggingInstrumentor().instrument(set_logging_format=True)
handler = LoggingHandler(level=logging.DEBUG, logger_provider=logger_provider)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().addHandler(handler)
logger = logging.getLogger("otel-example")
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("rootSpan"):
with tracer.start_as_current_span("childSpan"):
logger.info("This is an info message")
logger.warning("This is an warning message")
logger.error("This is an error message")Deployment
Docker compose
yaml
name: otel_stack
services:
pg_ats:
image: docker.io/postgres:16
volumes:
- ats_data:/var/lib/postgresql/data
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: devops
ports:
- "5433:5432"
tempo:
image: docker.io/grafana/tempo:2.8.2
container_name: tempo
ports:
- "3200:3200"
command: -config.file=/etc/tempo-config.yaml
volumes:
- ./tempo-config.yaml:/etc/tempo-config.yaml
loki:
image: docker.io/grafana/loki:latest
container_name: loki
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- ./loki-config.yaml:/etc/loki/local-config.yaml
pg-grafana:
image: docker.io/postgres:16
container_name: pg-grafana
# set shared memory limit when usin docker-compose
shm_size: 128mb
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_GRAFANA_DB}
ports:
- "${POSTGRES_GRAFANA_PORT}:5432"
volumes:
- postgres_grafana_data:/var/lib/postgresql/data
grafana:
image: docker.io/grafana/grafana
container_name: grafana
ports:
- "${GRAFANA_PORT}:3000"
environment:
GF_DATABASE_TYPE: postgres
GF_DATABASE_HOST: pg-grafana:5432
GF_DATABASE_NAME: ${POSTGRES_GRAFANA_DB}
GF_DATABASE_USER: ${GF_SECURITY_ADMIN_USER}
GF_DATABASE_PASSWORD: ${GF_SECURITY_ADMIN_PASSWORD}
depends_on:
- pg-grafana
- loki
- tempo
volumes:
- ./datasources:/etc/grafana/provisioning/datasources
otel-collector:
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:latest
container_name: otel-collector
ports:
- "4318:4318"
- "4317:4317"
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
environment:
OTEL_LOG_LEVEL: debug
command:
- "--config=/etc/otel-collector-config.yaml"
depends_on:
- loki
volumes:
postgres_grafana_data:docker-compose.env.txt
txt
POSTGRES_USER=admin
POSTGRES_PASSWORD=admin
POSTGRES_DB=app
POSTGRES_PORT=15432
POSTGRES_GRAFANA_DB=grafana
POSTGRES_GRAFANA_PORT=15433
GRAFANA_PORT=15300
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=admin./datasources/01-loki.yaml
yaml
apiVersion: 1
datasources:
- name: Loki
type: loki
uid: loki
access: proxy
orgId: 1
url: http://loki:3100
basicAuth: false
version: 1
editable: true./datasources/02-tempo.yaml
yaml
apiVersion: 1
datasources:
- name: Tempo
type: tempo
access: proxy
url: http://tempo:3200
basicAuth: false
isDefault: true
version: 1
editable: true
jsonData:
tracesToLogs:
datasourceUid: loki
spanStartTimeShift: -1m
spanEndTimeShift: 1m
tags: ["service.name"]
mappedTags:
- key: service.name
value: service_name
mapTagNamesEnabled: true
filterByTraceID: true
filterBySpanID: trueloki-config.yaml
yaml
auth_enabled: false
server:
http_listen_port: 3100
http_server_write_timeout: 310s
http_server_read_timeout: 310s
storage_config:
filesystem:
directory: /loki/chunks
common:
path_prefix: /loki
replication_factor: 1
ring:
kvstore:
store: inmemory
ingester:
chunk_encoding: snappy
chunk_idle_period: 2h
chunk_target_size: 1536000
max_chunk_age: 2h
querier:
max_concurrent: 8
schema_config:
configs:
- from: "2024-04-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
compactor:
working_directory: /loki/compactor
limits_config:
max_query_parallelism: 24
split_queries_by_interval: 15m
ingestion_rate_mb: 20
ingestion_burst_size_mb: 30
per_stream_rate_limit: "3MB"
per_stream_rate_limit_burst: "10MB"
query_timeout: 300s
allow_structured_metadata: true
ruler:
storage:
type: local
local:
directory: /loki/rulesotel-collector-config.yaml
yaml
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
processors:
batch:
# Batch traces to reduce the number of export calls
send_batch_size: 512
timeout: 5s
tail_sampling:
# Keep all error traces and sample 10% of successful ones
# This reduces storage costs while preserving important data
decision_wait: 10s
policies:
- name: errors-policy
type: status_code
status_code:
status_codes: [ERROR]
- name: probabilistic-policy
type: probabilistic
probabilistic:
sampling_percentage: 10
resource:
attributes:
# Add a cluster identifier to all traces
- key: deployment.environment
value: staging
action: upsert
exporters:
otlp/tempo:
endpoint: tempo:4317
tls:
insecure: true
otlphttp:
endpoint: http://loki:3100/otlp
tls:
insecure: true
service:
pipelines:
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp/tempo]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlphttp]tempo-config.yaml
yaml
# tempo-config.yaml
stream_over_http_enabled: true
server:
http_listen_port: 3200
grpc_listen_port: 9096
distributor:
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
ingester:
# How long traces stay in the ingester before being flushed to storage
lifecycler:
ring:
kvstore:
store: inmemory
max_block_duration: 5m
trace_idle_period: 10s
flush_check_period: 10s
storage:
trace:
backend: local
local:
path: /var/tempo/traces
metrics_generator:
registry:
external_labels:
source: tempo
cluster: infra
storage:
path: /tmp/tempo/generator/wal
traces_storage:
path: /var/tempo/wal
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics, local-blocks]C++ client implemetation
Tested using opentelemetry-cpp 1.18.0
cpp
// OTEL
#include "opentelemetry/logs/provider.h"
#include "opentelemetry/exporters/otlp/otlp_grpc_exporter_factory.h"
#include "opentelemetry/sdk/logs/logger_provider_factory.h"
#include "opentelemetry/sdk/trace/simple_processor_factory.h"
#include "opentelemetry/sdk/trace/tracer_provider_factory.h"
#include "opentelemetry/trace/provider.h"
#include "opentelemetry/exporters/otlp/otlp_grpc_log_record_exporter_factory.h"
#include "opentelemetry/sdk/logs/simple_log_record_processor_factory.h"
namespace trace = opentelemetry::trace;
namespace otlp = opentelemetry::exporter::otlp;
namespace logs_sdk = opentelemetry::sdk::logs;
namespace trace_sdk = opentelemetry::sdk::trace;
opentelemetry::exporter::otlp::OtlpGrpcExporterOptions opts;
opts.endpoint = "otel-collector:4317";
opts.use_ssl_credentials = false;
opentelemetry::exporter::otlp::OtlpGrpcLogRecordExporterOptions log_opts;
log_opts.endpoint = "otel-collector:4317";
log_opts.use_ssl_credentials = false;
std::shared_ptr<opentelemetry::sdk::trace::TracerProvider> tracer_provider;
std::shared_ptr<opentelemetry::sdk::logs::LoggerProvider> logger_provider;
// init tracer
// Create OTLP exporter instance
auto tracerexporter = otlp::OtlpGrpcExporterFactory::Create (opts);
auto tracerprocessor = trace_sdk::SimpleSpanProcessorFactory::Create (std::move (tracerexporter));
tracer_provider = trace_sdk::TracerProviderFactory::Create (std::move (tracerprocessor));
// Set the global trace provider
std::shared_ptr<opentelemetry::trace::TracerProvider> api_traver_provider = tracer_provider;
trace::Provider::SetTracerProvider (api_traver_provider);
// init logger
auto logerexporter = otlp::OtlpGrpcLogRecordExporterFactory::Create (log_opts);
auto loggerprocessor = logs_sdk::SimpleLogRecordProcessorFactory::Create (std::move (logerexporter));
logger_provider = logs_sdk::LoggerProviderFactory::Create (std::move (loggerprocessor));
// Set the global logger provider
std::shared_ptr<opentelemetry::logs::LoggerProvider> api_logger_provider = logger_provider;
opentelemetry::logs::Provider::SetLoggerProvider (api_logger_provider);
auto tracer = tracer_provider->GetTracer ("test-tracer");
auto logger = logger_provider->GetLogger ("foo_library_logger", "foo_library");
auto span = tracer->StartSpan ("span 1");
auto scoped_span = trace::Scope (tracer->StartSpan ("foo_library"));
auto ctx = span->GetContext ();
// Example of attribute see : https://opentelemetry.io/blog/2025/how-to-name-your-span-attributes/
// span->SetAttribute ("http.request.method", "GET");
// span->SetAttribute ("http.response.status_code", "200");
logger->Debug ("body", ctx.trace_id (), ctx.span_id (), ctx.trace_flags ());
// Example for simple logger
auto logger = _loggerProvider->GetLogger ("foo_library_logger", "foo_library");Basic logger
main
cpp
// void log (xx severity, const std::string & message, const std::string & category, std::source_location location = std::source_location::current ()
void aaa::log (xx severity, const std::string & message, const std::string & category, std::source_location location)
{
// === init logger ===
opentelemetry::exporter::otlp::OtlpGrpcLogRecordExporterOptions opts;
opts.endpoint = "otel-collector:4317";
opts.use_ssl_credentials = false;
auto exporter = otlp::OtlpGrpcLogRecordExporterFactory::Create (opts);
auto processor = logs_sdk::SimpleLogRecordProcessorFactory::Create (std::move (exporter));
// Resource
opentelemetry::sdk::resource::ResourceAttributes resourceAttributes = {{"service.name", category}};
auto resource = opentelemetry::sdk::resource::Resource::Create (resourceAttributes);
_loggerProvider = logs_sdk::LoggerProviderFactory::Create (std::move (processor), resource);
// Set the global logger provider
std::shared_ptr<opentelemetry::logs::LoggerProvider> api_logger_provider = _loggerProvider;
opentelemetry::logs::Provider::SetLoggerProvider (api_logger_provider);
// ===
auto logger = _loggerProvider->GetLogger ("foo_library_logger", "foo_library");
auto log_record = logger->CreateLogRecord ();
log_record->SetBody (message);
log_record->SetAttribute ("category", category);
log_record->SetAttribute ("code.function", location.function_name ());
log_record->SetAttribute ("code.line.number", static_cast<int64_t> (location.line ()));
log_record->SetAttribute ("code.file.path", location.file_name ());
// severity
log_record->SetSeverity (opentelemetry::logs::Severity::kWarn);
log_record->SetSeverity (opentelemetry::logs::Severity::kError);
log_record->SetSeverity (opentelemetry::logs::Severity::kFatal);
log_record->SetSeverity (opentelemetry::logs::Severity::kDebug);
logger->EmitLogRecord (std::move (log_record));
// cleanup
std::shared_ptr<opentelemetry::logs::LoggerProvider> noop;
opentelemetry::logs::Provider::SetLoggerProvider (noop);
}References
OpenTelemtrycpp example Following this articles Also this articles for k3s deployment Logs and logging exporter loki