Loading...
Loading...
Use this skill when creating database schemas or tables for Timescale, TimescaleDB, TigerData, or Tiger Cloud, especially for time-series, IoT, metrics, events, or log data. Use this to improve the performance of any insert-heavy table. **Trigger when user asks to:** - Create or design SQL schemas/tables AND Timescale/TimescaleDB/TigerData/Tiger Cloud is available - Set up hypertables, compression, retention policies, or continuous aggregates - Configure partition columns, segment_by, order_by, or chunk intervals - Optimize time-series database performance or storage - Create tables for sensors, metrics, telemetry, events, or transaction logs **Keywords:** CREATE TABLE, hypertable, Timescale, TimescaleDB, time-series, IoT, metrics, sensor data, compression policy, continuous aggregates, columnstore, retention policy, chunk interval, segment_by, order_by Step-by-step instructions for hypertable creation, column selection, compression policies, retention, continuous aggregates, and indexes.
npx skill4agent add timescale/pg-aiguide setup-timescaledb-hypertablesCREATE TABLE your_table_name (
timestamp TIMESTAMPTZ NOT NULL,
entity_id TEXT NOT NULL, -- device_id, user_id, symbol, etc.
category TEXT, -- sensor_type, event_type, asset_class, etc.
value_1 DOUBLE PRECISION, -- price, temperature, latency, etc.
value_2 DOUBLE PRECISION, -- volume, humidity, throughput, etc.
value_3 INTEGER, -- count, status, level, etc.
metadata JSONB -- flexible additional data
) WITH (
tsdb.hypertable,
tsdb.partition_column='timestamp',
tsdb.enable_columnstore=true, -- Disable if table has vector columns
tsdb.segmentby='entity_id', -- See selection guide below
tsdb.orderby='timestamp DESC', -- See selection guide below
tsdb.sparse_index='minmax(value_1),minmax(value_2),minmax(value_3)' -- see selection guide below
);timestampevent_timemeasured_atevent_timecreated_atlogged_atcreated_attransaction_timeprocessed_atidsequence_numbercreated_atinserted_atidingested_atupdated_atdevice_idsymbolservice_nameservice_name, metric_typemetric_name, metric_typeuser_idsession_idproduct_idcategory_idSELECT * FROM table WHERE entity_id = 'X' AND timestamp > ...
-- ↳ segment_by: entity_id (if >100 rows per chunk)timestamp DESCtimestamp DESCmetric_name, timestamp DESCuser_id, timestamp DESCsequence_id DESCtimestamp DESC, event_order DESCmetric_namesegment_by='service_name'order_by='metric_name, timestamp DESC'(segment_by_column, order_by_column)created_atupdated_atALTER TABLE table_name SET (
timescaledb.sparse_index = 'minmax(value_1),minmax(value_2)'
);SELECT set_chunk_time_interval('your_table_name', INTERVAL '1 day');CREATE INDEX idx_entity_timestamp ON your_table_name (entity_id, timestamp DESC);ALTER TABLE your_table_name ADD PRIMARY KEY (entity_id, timestamp);CREATE TABLE ... (id BIGINT PRIMARY KEY, ...) WITH (tsdb.partition_column='id');tsdb.enable_columnstore=trueafter => INTERVAL '7 days'add_columnstore_policy()afterafter-- In TimescaleDB 2.23 and later only needed if you want to override the default 7-day policy created by tsdb.enable_columnstore=true
-- Remove the existing auto-created policy first:
-- CALL remove_columnstore_policy('your_table_name');
-- Then add custom policy:
-- CALL add_columnstore_policy('your_table_name', after => INTERVAL '1 day');-- Example - replace with requirements or comment out
SELECT add_retention_policy('your_table_name', INTERVAL '365 days');CREATE MATERIALIZED VIEW your_table_hourly
WITH (timescaledb.continuous) AS
SELECT
time_bucket(INTERVAL '1 hour', timestamp) AS bucket,
entity_id,
category,
COUNT(*) as record_count,
AVG(value_1) as avg_value_1,
MIN(value_1) as min_value_1,
MAX(value_1) as max_value_1,
SUM(value_2) as sum_value_2
FROM your_table_name
GROUP BY bucket, entity_id, category;CREATE MATERIALIZED VIEW your_table_daily
WITH (timescaledb.continuous) AS
SELECT
time_bucket(INTERVAL '1 day', timestamp) AS bucket,
entity_id,
category,
COUNT(*) as record_count,
AVG(value_1) as avg_value_1,
MIN(value_1) as min_value_1,
MAX(value_1) as max_value_1,
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY value_1) as median_value_1,
PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY value_1) as p95_value_1,
SUM(value_2) as sum_value_2
FROM your_table_name
GROUP BY bucket, entity_id, category;SELECT add_continuous_aggregate_policy('your_table_hourly',
start_offset => NULL,
end_offset => INTERVAL '15 minutes',
schedule_interval => INTERVAL '15 minutes');SELECT add_continuous_aggregate_policy('your_table_daily',
start_offset => NULL,
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '1 hour');-- the following aggregate can be stale for data older than 7 days
-- SELECT add_continuous_aggregate_policy('aggregate_for_last_7_days',
-- start_offset => INTERVAL '7 days', -- only refresh last 7 days (NULL = refresh all)
-- end_offset => INTERVAL '15 minutes',
-- schedule_interval => INTERVAL '15 minutes');SELECT add_continuous_aggregate_policy('your_table_daily',
start_offset => NULL, -- Use NULL to refresh all data, or set to retention period if enabled on raw data
-- start_offset => INTERVAL '<retention period here>', -- uncomment if retention policy is enabled on the raw data table
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '1 hour');ALTER MATERIALIZED VIEW your_table_hourly SET (timescaledb.materialized_only = false);ALTER MATERIALIZED VIEW your_table_hourly SET (timescaledb.materialized_only = true);-- Hourly
ALTER MATERIALIZED VIEW your_table_hourly SET (
timescaledb.enable_columnstore,
timescaledb.segmentby = 'entity_id, category',
timescaledb.orderby = 'bucket DESC'
);
CALL add_columnstore_policy('your_table_hourly', after => INTERVAL '3 days');
-- Daily
ALTER MATERIALIZED VIEW your_table_daily SET (
timescaledb.enable_columnstore,
timescaledb.segmentby = 'entity_id, category',
timescaledb.orderby = 'bucket DESC'
);
CALL add_columnstore_policy('your_table_daily', after => INTERVAL '7 days');-- Example - replace or comment out
SELECT add_retention_policy('your_table_hourly', INTERVAL '2 years');
SELECT add_retention_policy('your_table_daily', INTERVAL '5 years');(filter_column, bucket DESC)WHERE filter_column = X AND bucket >= Y ORDER BY bucket DESCCREATE INDEX idx_hourly_entity_bucket ON your_table_hourly (entity_id, bucket DESC);
CREATE INDEX idx_hourly_category_bucket ON your_table_hourly (category, bucket DESC);WHERE entity_id = X AND category = YCREATE INDEX idx_hourly_entity_category_bucket ON your_table_hourly (entity_id, category, bucket DESC);-- Check hypertable
SELECT * FROM timescaledb_information.hypertables
WHERE hypertable_name = 'your_table_name';
-- Check compression settings
SELECT * FROM hypertable_compression_stats('your_table_name');
-- Check aggregates
SELECT * FROM timescaledb_information.continuous_aggregates;
-- Check policies
SELECT * FROM timescaledb_information.jobs ORDER BY job_id;
-- Monitor chunk information
SELECT
chunk_name,
range_start,
range_end,
is_compressed
FROM timescaledb_information.chunks
WHERE hypertable_name = 'your_table_name'
ORDER BY range_start DESC;timescaledb-tuneTIMESTAMPTZtimestamp>=<BETWEENTEXTchar(n)varchar(n)snake_caseCamelCaseBIGINT GENERATED ALWAYS AS IDENTITYSERIALBIGINTINTEGERSMALLINTDOUBLE PRECISIONREALFLOATNUMERICMONEYNOT EXISTSNOT INtime_bucket()date_trunc()timestamp(0)timescaledb.compresstimescaledb.enable_columnstoretimescaledb.compress_segmentbytimescaledb.segmentbytimescaledb.compress_orderbytimescaledb.orderbyadd_compression_policy()add_columnstore_policy()remove_compression_policy()remove_columnstore_policy()compress_chunk()convert_to_columnstore()CALLSELECTdecompress_chunk()convert_to_rowstore()CALLSELECThypertable_compression_stats('table_name')chunk_compression_stats('_timescaledb_internal._hyper_X_Y_chunk')columnstore_settings-- Compress a specific chunk
CALL convert_to_columnstore('_timescaledb_internal._hyper_7_1_chunk');
-- Check compression statistics
SELECT
number_compressed_chunks,
pg_size_pretty(before_compression_total_bytes) as before_compression,
pg_size_pretty(after_compression_total_bytes) as after_compression,
ROUND(100.0 * (1 - after_compression_total_bytes::numeric / NULLIF(before_compression_total_bytes, 0)), 1) as compression_pct
FROM hypertable_compression_stats('your_table_name');