Rust AppView - highly experimental!

fix: more aggressive conversion to columnstore

+73
+19
migrations/2025-12-08-235657_adjust_compression_policy_schedule/down.sql
··· 1 + -- Revert compression policy to original schedule 2 + -- 3 + -- This reverts to the original settings: 4 + -- - Run daily (every 1 day) 5 + -- - Compress chunks older than 2 days 6 + 7 + -- Revert schedule to daily 8 + SELECT alter_job(1000, schedule_interval => interval '1 day'); 9 + 10 + -- Revert compress_after to 2 days 11 + -- 2 days = 2 * 86400 seconds = 172,800 seconds 12 + -- In TID format: 172,800 * 1,000,000 microseconds << 10 = 176,947,200,000,000 ticks 13 + SELECT alter_job(1000, 14 + config => jsonb_set( 15 + (SELECT config FROM timescaledb_information.jobs WHERE job_id = 1000), 16 + '{compress_after}', 17 + to_jsonb((2::bigint * 86400 * 1000000::bigint << 10)::text) 18 + ) 19 + );
+24
migrations/2025-12-08-235657_adjust_compression_policy_schedule/up.sql
··· 1 + -- Adjust compression policy to run more frequently with shorter compress_after 2 + -- 3 + -- CHANGE: Update existing compression policy (job 1000) to: 4 + -- - Run every 10 minutes (instead of daily) 5 + -- - Compress chunks older than 4 hours (instead of 2 days) 6 + -- 7 + -- RATIONALE: With larger chunk sizes, we want to compress 8 + -- chunks that are no longer receiving active writes. Since chunks span multiple 9 + -- days, setting compress_after to 4 hours ensures we compress all but the latest 10 + -- active chunk, while running every 10 minutes ensures timely compression. 11 + 12 + -- Change schedule to run every 10 minutes 13 + SELECT alter_job(1000, schedule_interval => interval '10 minutes'); 14 + 15 + -- Change compress_after to 4 hours 16 + -- 4 hours = 4 * 3600 seconds = 14,400 seconds 17 + -- In TID format: 14,400 * 1,000,000 microseconds << 10 = 14,745,600,000,000 ticks 18 + SELECT alter_job(1000, 19 + config => jsonb_set( 20 + (SELECT config FROM timescaledb_information.jobs WHERE job_id = 1000), 21 + '{compress_after}', 22 + to_jsonb((4::bigint * 3600 * 1000000::bigint << 10)::text) 23 + ) 24 + );
+8
migrations/2025-12-09-000126_fix_timescaledb_compression_policy/down.sql
··· 1 + -- Reverse the integer_now function setup 2 + -- This will cause compression policy to fail again, but allows rollback if needed 3 + 4 + -- Remove the integer_now function from the posts hypertable 5 + SELECT set_integer_now_func('posts', NULL); 6 + 7 + -- Drop the tid_now function 8 + DROP FUNCTION IF EXISTS public.tid_now();
+22
migrations/2025-12-09-000126_fix_timescaledb_compression_policy/up.sql
··· 1 + -- Fix TimescaleDB compression policy by setting integer_now function 2 + -- 3 + -- PROBLEM: The compression policy fails with "integer_now function not set" 4 + -- because the posts table uses integer-based time (TID/rkey), and TimescaleDB needs 5 + -- to know how to calculate "now" in TID units to determine which chunks to compress. 6 + -- 7 + -- SOLUTION: Create and register a function that converts current time to TID format. 8 + 9 + -- Create function that returns current timestamp as TID 10 + -- TID format: (microseconds_since_epoch << 10) 11 + -- This matches the tid_now() function already used elsewhere in the codebase 12 + CREATE OR REPLACE FUNCTION public.tid_now() RETURNS BIGINT AS $$ 13 + SELECT (EXTRACT(EPOCH FROM NOW()) * 1000000)::BIGINT << 10; 14 + $$ LANGUAGE SQL STABLE; 15 + 16 + -- Register the function with the posts hypertable 17 + -- This allows TimescaleDB compression policies to work with integer-based time 18 + SELECT set_integer_now_func('posts', 'tid_now'); 19 + 20 + -- Note: The existing compression policy (job_id 1000) will now start working 21 + -- It compresses chunks based on the compress_after configuration 22 + -- The policy runs on a scheduled interval and converts old chunks from rowstore to columnstore format