SELECT version(); -- Check PostgreSQL version
SELECT current_date, current_time; -- Display current date and time
sudo apt update && sudo apt install postgresql postgresql-contrib
psql -U postgres
CREATE DATABASE testdb;
CREATE USER testuser WITH PASSWORD 'password';
GRANT ALL PRIVILEGES ON DATABASE testdb TO testuser;
psql -d testdb -U testuser
SELECT 'Hello, PostgreSQL!';
\q
CREATE TABLE test (id SERIAL, amount NUMERIC);
CREATE TABLE names (first_name VARCHAR(50));
CREATE TABLE events (event_date DATE, event_time TIME);
CREATE TABLE flags (is_active BOOLEAN);
CREATE TABLE sensors (readings INTEGER[]);
CREATE TABLE documents (data JSON);
CREATE TABLE items (id UUID DEFAULT gen_random_uuid());
SELECT 10 + 5, 20 > 10, true AND false;
SELECT 'Hello' || ' World', 'Postgres' LIKE 'Post%';
SELECT '123'::INTEGER;
CREATE DATABASE mydb;
ALTER DATABASE mydb RENAME TO newdb;
DROP DATABASE mydb;
CREATE TABLE employees (id SERIAL, name VARCHAR, age INT);
DROP TABLE employees;
ALTER TABLE employees ADD email VARCHAR(100);
\dt
\d employees
CREATE TABLE new_employees AS SELECT * FROM employees;
TRUNCATE TABLE employees;
INSERT INTO employees (name, age) VALUES ('Alice', 30);
INSERT INTO employees (name, age) VALUES ('Bob', 25), ('Carol', 28);
SELECT * FROM employees;
SELECT * FROM employees WHERE age > 25;
UPDATE employees SET age = 35 WHERE name = 'Alice';
DELETE FROM employees WHERE name = 'Bob';
INSERT INTO employees (name, age) VALUES ('Eve', 22) RETURNING id;
SELECT * FROM employees WHERE email IS NULL;
SELECT * FROM employees ORDER BY age DESC;
SELECT * FROM employees LIMIT 5;
CREATE TABLE users (id SERIAL, name VARCHAR NOT NULL);
CREATE TABLE users (email VARCHAR UNIQUE);
CREATE TABLE users (id SERIAL PRIMARY KEY);
CREATE TABLE orders (id SERIAL, user_id INT REFERENCES users(id));
CREATE TABLE accounts (balance NUMERIC CHECK (balance >= 0));
CREATE TABLE settings (theme VARCHAR DEFAULT 'light');
CREATE TABLE enrollment (student_id INT, course_id INT, PRIMARY KEY (student_id, course_id));
CREATE INDEX idx_name ON users(name);
CREATE UNIQUE INDEX idx_email ON users(email);
DROP INDEX idx_name;
SELECT * FROM employees INNER JOIN departments ON employees.dept_id = departments.id;
SELECT * FROM employees LEFT JOIN departments ON employees.dept_id = departments.id;
SELECT * FROM employees RIGHT JOIN departments ON employees.dept_id = departments.id;
SELECT * FROM employees FULL OUTER JOIN departments ON employees.dept_id = departments.id;
SELECT * FROM products CROSS JOIN categories;
SELECT a.name, b.name FROM employees a JOIN employees b ON a.manager_id = b.id;
SELECT * FROM orders JOIN customers USING(customer_id);
SELECT * FROM orders NATURAL JOIN customers;
SELECT * FROM a JOIN b ON a.id = b.a_id JOIN c ON b.id = c.b_id;
SELECT d.name, COUNT(e.id) FROM departments d JOIN employees e ON d.id = e.dept_id GROUP BY d.name;
CREATE VIEW emp_view AS SELECT name, age FROM employees;
SELECT * FROM emp_view;
CREATE OR REPLACE VIEW emp_view AS SELECT name FROM employees;
DROP VIEW emp_view;
CREATE MATERIALIZED VIEW emp_mview AS SELECT * FROM employees;
REFRESH MATERIALIZED VIEW emp_mview;
DROP MATERIALIZED VIEW emp_mview;
SELECT * FROM emp_view JOIN departments ON emp_view.dept_id = departments.id;
CREATE INDEX ON emp_mview (name);
CREATE FUNCTION add(a INT, b INT) RETURNS INT AS $$ BEGIN RETURN a + b; END; $$ LANGUAGE plpgsql;
SELECT add(2, 3);
DROP FUNCTION add(INT, INT);
CREATE FUNCTION is_adult(age INT) RETURNS BOOLEAN AS $$ BEGIN RETURN age >= 18; END; $$ LANGUAGE plpgsql;
CREATE PROCEDURE log_activity(msg TEXT) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO logs(message) VALUES (msg); END; $$;
CALL log_activity('User login');
DROP PROCEDURE log_activity(TEXT);
CREATE FUNCTION get_users() RETURNS TABLE(id INT, name TEXT) AS $$ BEGIN RETURN QUERY SELECT id, name FROM users; END; $$ LANGUAGE plpgsql;
CREATE FUNCTION factorial(n INT) RETURNS INT AS $$ BEGIN IF n = 0 THEN RETURN 1; ELSE RETURN n * factorial(n - 1); END IF; END; $$ LANGUAGE plpgsql;
CREATE FUNCTION range_series(n INT) RETURNS SETOF INT AS $$ BEGIN FOR i IN 1..n LOOP RETURN NEXT i; END LOOP; RETURN; END; $$ LANGUAGE plpgsql;
CREATE FUNCTION log_changes() RETURNS TRIGGER AS $$ BEGIN INSERT INTO logs(action) VALUES ('Data changed'); RETURN NEW; END; $$ LANGUAGE plpgsql;
CREATE TRIGGER before_insert_trigger BEFORE INSERT ON employees FOR EACH ROW EXECUTE FUNCTION log_changes();
CREATE TRIGGER after_insert_trigger AFTER INSERT ON employees FOR EACH ROW EXECUTE FUNCTION log_changes();
CREATE TRIGGER before_update_trigger BEFORE UPDATE ON employees FOR EACH ROW EXECUTE FUNCTION log_changes();
CREATE TRIGGER after_update_trigger AFTER UPDATE ON employees FOR EACH ROW EXECUTE FUNCTION log_changes();
CREATE TRIGGER before_delete_trigger BEFORE DELETE ON employees FOR EACH ROW EXECUTE FUNCTION log_changes();
CREATE TRIGGER after_delete_trigger AFTER DELETE ON employees FOR EACH ROW EXECUTE FUNCTION log_changes();
CREATE TRIGGER update_salary BEFORE UPDATE ON employees FOR EACH ROW WHEN (OLD.salary <> NEW.salary) EXECUTE FUNCTION log_changes();
ALTER TABLE employees DISABLE TRIGGER after_insert_trigger;
DROP TRIGGER after_insert_trigger ON employees;
BEGIN;
COMMIT;
ROLLBACK;
SAVEPOINT sp1;
ROLLBACK TO SAVEPOINT sp1;
RELEASE SAVEPOINT sp1;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET TRANSACTION READ ONLY;
BEGIN; UPDATE accounts SET balance = balance - 100 WHERE id = 1; -- In another session, try updating same row
pg_dump testdb > testdb.sql
pg_dump -Fc testdb > testdb.dump
psql testdb < testdb.sql
pg_restore -d testdb testdb.dump
pg_basebackup -D /backup -Fp -Xs -P -U postgres
CREATE ROLE backup WITH LOGIN PASSWORD 'backup123';
GRANT CONNECT ON DATABASE testdb TO backup;
pg_dump -t employees testdb > employees.sql
psql testdb < employees.sql
0 2 * * * /usr/bin/pg_dump testdb > /backups/testdb.sql
EXPLAIN SELECT * FROM employees WHERE age > 30;
EXPLAIN ANALYZE SELECT * FROM employees;
VACUUM employees;
VACUUM FULL employees;
ANALYZE employees;
REINDEX TABLE employees;
CLUSTER employees USING idx_name;
SELECT * FROM pg_stat_statements ORDER BY total_time DESC LIMIT 5;
SET work_mem = '64MB';
SET max_parallel_workers_per_gather = 4;
CREATE ROLE readonly;
GRANT readonly TO testuser;
GRANT SELECT ON employees TO readonly;
REVOKE SELECT ON employees FROM readonly;
ALTER ROLE testuser WITH PASSWORD 'securepass';
ALTER ROLE testuser CONNECTION LIMIT 5;
CREATE ROLE dev INHERIT;
ALTER ROLE testuser NOLOGIN;
ALTER ROLE testuser CREATEROLE CREATEDB;
host all all 192.168.1.0/24 md5
SELECT * FROM pg_extension;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
DROP EXTENSION "uuid-ossp";
CREATE EXTENSION postgis;
CREATE EXTENSION citext;
CREATE EXTENSION hstore;
CREATE EXTENSION pgcrypto;
SELECT gen_random_uuid();
COMMENT ON EXTENSION "uuid-ossp" IS 'UUID functions';
SELECT objid::regprocedure FROM pg_depend WHERE refobjid = 'uuid-ossp'::regnamespace;
CREATE TABLE products (data JSON);
CREATE TABLE products_b (data JSONB);
INSERT INTO products VALUES ('{"name": "Pen", "price": 1.2}');
SELECT data->'name' FROM products;
SELECT * FROM products WHERE data->>'name' = 'Pen';
SELECT data @> '{"price":1.2}' FROM products_b;
UPDATE products_b SET data = jsonb_set(data, '{price}', '2.5');
UPDATE products_b SET data = data - 'price';
SELECT jsonb_array_elements('[1,2,3]'::jsonb);
CREATE INDEX idx_jsonb_price ON products_b USING gin(data);
SELECT now(), pg_postmaster_start_time();
SELECT * FROM pg_stat_activity;
SELECT * FROM pg_locks;
SELECT pg_size_pretty(pg_database_size('testdb'));
SELECT * FROM pg_stat_user_tables ORDER BY seq_scan DESC;
logging_collector = on
log_min_duration_statement = 500
log_destination = 'stderr'
tail -f /var/log/postgresql/postgresql.log
SELECT * FROM pg_stat_bgwriter;
pip install psycopg2
import psycopg2 conn = psycopg2.connect("dbname=testdb user=postgres password=secret")
cur = conn.cursor()
cur.execute("SELECT * FROM employees")
rows = cur.fetchall() for row in rows: print(row)
cur.execute("INSERT INTO employees(name) VALUES (%s)", ("John",))
cur.execute("UPDATE employees SET age = %s WHERE id = %s", (30, 1))
cur.execute("DELETE FROM employees WHERE id = %s", (1,))
conn.commit() cur.close() conn.close()
try: # connection logic except psycopg2.Error as e: print("Error:", e)
$conn = pg_connect("host=localhost dbname=testdb user=postgres password=secret");
if(!$conn) { echo "Connection failed."; } else { echo "Connected successfully."; }
$result = pg_query($conn, "SELECT * FROM employees"); while ($row = pg_fetch_assoc($result)) { echo $row['name']; }
pg_query($conn, "INSERT INTO employees(name) VALUES('Alice')");
pg_query($conn, "UPDATE employees SET age = 28 WHERE name = 'Alice'");
pg_query($conn, "DELETE FROM employees WHERE name = 'Alice'");
pg_prepare($conn, "my_query", 'SELECT * FROM employees WHERE age > $1'); pg_execute($conn, "my_query", array(25));
$row = pg_fetch_row($result); echo $row[0];
pg_close($conn);
echo pg_last_error($conn);
npm install pg
const { Client } = require('pg');
const client = new Client({ user: 'postgres', host: 'localhost', database: 'testdb', password: 'secret', port: 5432, }); client.connect();
client.query('SELECT * FROM employees', (err, res) => { console.log(res.rows); client.end(); });
await client.query("INSERT INTO employees(name) VALUES($1)", ['Eve']);
await client.query("UPDATE employees SET age = $1 WHERE name = $2", [32, 'Eve']);
await client.query("DELETE FROM employees WHERE name = $1", ['Eve']);
try { await client.query(...); } catch (err) { console.error(err); }
client.end();
await client.query('SELECT * FROM employees WHERE age > $1', [30]);
<dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>42.2.5</version> </dependency>
import java.sql.*;
Connection conn = DriverManager.getConnection( "jdbc:postgresql://localhost:5432/testdb", "postgres", "secret" );
Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery("SELECT * FROM employees");
stmt.executeUpdate("INSERT INTO employees(name) VALUES('Jane')");
stmt.executeUpdate("UPDATE employees SET age = 26 WHERE name = 'Jane'");
stmt.executeUpdate("DELETE FROM employees WHERE name = 'Jane'");
PreparedStatement ps = conn.prepareStatement("SELECT * FROM employees WHERE age > ?"); ps.setInt(1, 30); ResultSet rs = ps.executeQuery();
while(rs.next()) { System.out.println(rs.getString("name")); }
rs.close(); stmt.close(); conn.close();
Install-Package Npgsql
using Npgsql;
var conn = new NpgsqlConnection("Host=localhost;Username=postgres;Password=secret;Database=testdb"); conn.Open();
var cmd = new NpgsqlCommand("SELECT * FROM employees", conn); var reader = cmd.ExecuteReader();
cmd = new NpgsqlCommand("INSERT INTO employees(name) VALUES('Liam')", conn); cmd.ExecuteNonQuery();
cmd = new NpgsqlCommand("UPDATE employees SET age = 35 WHERE name = 'Liam'", conn); cmd.ExecuteNonQuery();
cmd = new NpgsqlCommand("DELETE FROM employees WHERE name = 'Liam'", conn); cmd.ExecuteNonQuery();
while (reader.Read()) { Console.WriteLine(reader["name"]); }
cmd = new NpgsqlCommand("SELECT * FROM employees WHERE age > @age", conn); cmd.Parameters.AddWithValue("@age", 30); reader = cmd.ExecuteReader();
reader.Close(); conn.Close();
gem install pg
require 'pg'
conn = PG.connect(dbname: 'testdb', user: 'postgres', password: 'secret')
res = conn.exec("SELECT * FROM employees") res.each do |row| puts row["name"] end
conn.exec("INSERT INTO employees(name) VALUES('RubyUser')")
conn.exec("UPDATE employees SET age = 40 WHERE name = 'RubyUser'")
conn.exec("DELETE FROM employees WHERE name = 'RubyUser'")
conn.exec_params("SELECT * FROM employees WHERE age > $1", [30])
begin conn.exec("INVALID SQL") rescue PG::Error => e puts e.message end
conn.close
go get github.com/lib/pq
import ( "database/sql" _ "github.com/lib/pq" )
db, err := sql.Open("postgres", "user=postgres password=secret dbname=testdb sslmode=disable")
rows, err := db.Query("SELECT name FROM employees") for rows.Next() { var name string rows.Scan(&name) fmt.Println(name) }
_, err := db.Exec("INSERT INTO employees(name) VALUES($1)", "GoUser")
_, err := db.Exec("UPDATE employees SET age = $1 WHERE name = $2", 30, "GoUser")
_, err := db.Exec("DELETE FROM employees WHERE name = $1", "GoUser")
stmt, err := db.Prepare("SELECT name FROM employees WHERE age > $1") rows, err := stmt.Query(30)
if err != nil { log.Fatal(err) }
defer db.Close()
psql -U postgres -d testdb
#!/bin/bash psql -U postgres -d testdb -c "SELECT * FROM employees"
output=$(psql -U postgres -d testdb -t -c "SELECT COUNT(*) FROM employees") echo $output
psql -U postgres -d testdb -c "INSERT INTO employees(name) VALUES('ShellUser')"
name="ShellUser" psql -U postgres -d testdb -c "DELETE FROM employees WHERE name = '$name'"
psql -U postgres -d testdb -f script.sql
pg_dump -U postgres -d testdb -f backup.sql
psql -U postgres -d testdb -f backup.sql
for i in {1..5}; do psql -U postgres -d testdb -c "SELECT $i" done
pg_isready -h localhost -p 5432 -U postgres
sudo apt update && sudo apt install postgresql
sudo systemctl enable postgresql
sudo systemctl start postgresql
sudo -u postgres createuser myuser --interactive
sudo -u postgres createdb mydb
# Providers: Heroku, AWS RDS, DigitalOcean, Supabase
heroku create heroku addons:create heroku-postgresql:hobby-dev
heroku pg:psql
psql "postgres://user:password@host:port/dbname"
pg_dump -U postgres -d testdb > dump.sql psql -U postgres -d newdb < dump.sql
EXPLAIN ANALYZE SELECT * FROM employees WHERE age > 30;
CREATE INDEX idx_age ON employees(age);
VACUUM ANALYZE employees;
EXPLAIN SELECT * FROM employees WHERE age > 30;
SET work_mem = '64MB';
ALTER TABLE employees SET (autovacuum_enabled = true);
SELECT * FROM pg_locks;
SELECT * FROM pg_stat_statements ORDER BY total_time DESC LIMIT 5;
SET max_parallel_workers_per_gather = 4;
ANALYZE employees;
CREATE ROLE readonly_user WITH LOGIN PASSWORD 'password';
GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly_user;
REVOKE DELETE ON employees FROM readonly_user;
ALTER SYSTEM SET ssl = on;
hostssl all all 0.0.0.0/0 md5
ALTER USER postgres WITH PASSWORD 'newpassword';
shared_preload_libraries = 'pgaudit'
ALTER ROLE readonly_user CONNECTION LIMIT 5;
DROP ROLE IF EXISTS old_user;
ALTER SYSTEM SET password_encryption = 'scram-sha-256';
pg_dump -U postgres -F c -b -v -f backup.dump testdb
pg_restore -U postgres -d newdb -v backup.dump
pg_dump -U postgres -t employees testdb > employees.sql
psql -U postgres -d newdb -f employees.sql
pg_dumpall -U postgres -g > globals.sql
psql -U postgres -f globals.sql
wal_level = replica
restore_command = 'cp /archive/%f %p'
cron job with pg_dump
pg_restore -l backup.dump
wal_level = replica
archive_mode = on archive_command = 'cp %p /var/lib/postgresql/archive/%f'
host replication replica 192.168.1.0/24 md5
pg_basebackup -h primary -D /var/lib/postgresql/standby -P -U replica
pg_ctl start -D /var/lib/postgresql/standby
primary_conninfo = 'host=primary port=5432 user=replica password=secret'
SELECT * FROM pg_stat_replication;
pg_ctl promote -D /var/lib/postgresql/standby
Use repmgr or Patroni
SELECT * FROM pg_replication_slots;
SELECT * FROM pg_extension;
CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";
SELECT uuid_generate_v4();
CREATE EXTENSION postgis;
SELECT ST_Distance(ST_Point(0,0), ST_Point(3,4));
DROP EXTENSION postgis;
SET search_path TO public, extensions;
Create your own extension using C
ALTER EXTENSION postgis UPDATE TO \"3.1.0\";
SELECT * FROM pg_extension_config_dump();
WITH older AS (SELECT * FROM employees WHERE age > 40) SELECT * FROM older;
SELECT name, salary, RANK() OVER (ORDER BY salary DESC) FROM employees;
SELECT e.name, d.dept FROM employees e, LATERAL (SELECT * FROM dept WHERE dept_id = e.dept_id) d;
SELECT data->'name' FROM json_table WHERE id = 1;
SELECT * FROM table1 FULL OUTER JOIN table2 ON table1.id = table2.id;
SELECT name FROM employees WHERE EXISTS (SELECT 1 FROM dept WHERE dept.id = employees.dept_id);
SELECT name, CASE WHEN age > 50 THEN 'Senior' ELSE 'Junior' END AS level FROM employees;
WITH RECURSIVE t(n) AS ( SELECT 1 UNION ALL SELECT n+1 FROM t WHERE n < 5 ) SELECT * FROM t;
SELECT name, COALESCE(email, 'not_provided') FROM users;
SELECT dept, COUNT(*) FILTER (WHERE gender = 'M') AS males FROM employees GROUP BY dept;
CREATE FUNCTION log_update() RETURNS trigger AS $$ BEGIN INSERT INTO log_table(action, updated_at) VALUES('update', now()); RETURN NEW; END; $$ LANGUAGE plpgsql;
CREATE TRIGGER trg_update AFTER UPDATE ON employees FOR EACH ROW EXECUTE FUNCTION log_update();
CREATE TRIGGER trg_before_insert BEFORE INSERT ON employees FOR EACH ROW EXECUTE FUNCTION check_insert();
CREATE TRIGGER trg_delete AFTER DELETE ON employees FOR EACH ROW EXECUTE FUNCTION log_delete();
CREATE TRIGGER trg_instead INSTEAD OF INSERT ON my_view FOR EACH ROW EXECUTE FUNCTION handle_view_insert();
ALTER TABLE employees ENABLE TRIGGER trg_update;
DROP TRIGGER trg_update ON employees;
CREATE RULE replace_delete AS ON DELETE TO employees DO INSTEAD DELETE FROM archive WHERE id = OLD.id;
SELECT tgname FROM pg_trigger WHERE tgrelid = 'employees'::regclass;
CREATE TRIGGER audit_trigger AFTER INSERT ON employees FOR EACH ROW EXECUTE FUNCTION audit_log();
CREATE PUBLICATION mypub FOR TABLE employees;
CREATE SUBSCRIPTION mysub CONNECTION 'host=localhost dbname=src user=postgres' PUBLICATION mypub;
SELECT * FROM pg_publication;
SELECT * FROM pg_subscription;
ALTER PUBLICATION mypub ADD TABLE departments;
ALTER PUBLICATION mypub DROP TABLE departments;
DROP SUBSCRIPTION mysub;
DROP PUBLICATION mypub;
ALTER SUBSCRIPTION mysub REFRESH PUBLICATION;
CREATE SUBSCRIPTION mysub CONNECTION '...' PUBLICATION mypub WITH (slot_name = 'myslot');
ALTER TABLE documents ADD COLUMN content_vector tsvector;
UPDATE documents SET content_vector = to_tsvector('english', content);
SELECT * FROM documents WHERE to_tsvector('english', content) @@ to_tsquery('database');
CREATE INDEX idx_fts_content ON documents USING GIN(content_vector);
SELECT * FROM documents WHERE content_vector @@ plainto_tsquery('data structure');
SELECT ts_headline('english', content, to_tsquery('search')) FROM documents;
SELECT * FROM documents WHERE content_vector @@ phraseto_tsquery('big data');
SELECT title, ts_rank(content_vector, plainto_tsquery('database')) AS rank FROM documents ORDER BY rank DESC;
SELECT * FROM documents WHERE content_vector @@ websearch_to_tsquery('english', 'data AND index');
ALTER TEXT SEARCH CONFIGURATION pg_catalog.english ALTER MAPPING FOR asciiword WITH english_stem;
CREATE EXTENSION postgres_fdw;
CREATE SERVER foreign_srv FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'remote_host', dbname 'foreign_db');
CREATE USER MAPPING FOR local_user SERVER foreign_srv OPTIONS (user 'remote_user', password 'secret');
IMPORT FOREIGN SCHEMA public FROM SERVER foreign_srv INTO local_schema;
CREATE FOREIGN TABLE foreign_emps (id INT, name TEXT) SERVER foreign_srv OPTIONS (table_name 'employees');
SELECT * FROM foreign_emps;
SELECT l.name, f.salary FROM local_emps l JOIN foreign_emps f ON l.id = f.id;
DROP FOREIGN TABLE foreign_emps;
DROP SERVER foreign_srv CASCADE;
-- Use SSL and restrict permissions to avoid exposing sensitive data
SELECT * FROM pg_extension;
CREATE EXTENSION hstore;
CREATE EXTENSION "uuid-ossp";
SELECT uuid_generate_v4();
SELECT 'a=>1,b=>2'::hstore - 'a';
CREATE EXTENSION citext;
CREATE TABLE users (email CITEXT);
DROP EXTENSION hstore;
CREATE EXTENSION intarray;
CREATE TEXT SEARCH CONFIGURATION my_fts ( COPY = english );
SHOW all;
ALTER SYSTEM SET work_mem = '64MB';
SELECT pg_reload_conf();
SET work_mem TO '32MB';
SHOW log_min_duration_statement;
SET search_path TO myschema, public;
SHOW autovacuum;
SHOW checkpoint_timeout;
SHOW max_connections;
SHOW wal_level;
SELECT version();
# OS level step: install postgresql-15
initdb -D /var/lib/postgresql/15/main
pg_upgrade -b old_bin -B new_bin -d old_data -D new_data
pg_upgrade --check
./analyze_new_cluster.sh
./delete_old_cluster.sh
pg_dump dbname | psql -d newdb
SELECT * FROM pg_stat_user_tables;
cat pg_upgrade_internal.log
wal_level = replica
archive_mode = on
primary_conninfo = 'host=master user=replicator password=secret'
SELECT * FROM pg_create_physical_replication_slot('replica1');
pg_basebackup -h master -D /var/lib/postgresql/14/main -U replicator -Fp -Xs -P
restore_command = 'cp /var/lib/postgresql/wal/%f %p'
SELECT * FROM pg_stat_replication;
pg_ctl promote
SELECT * FROM pg_stat_replication WHERE state = 'streaming';
SELECT * FROM pg_control_checkpoint();
# Patroni setup for HA
# repmgr setup for standby management
# Lightweight connection pooling
# Virtual IP management for failover
# Load balancing PostgreSQL nodes
# Manage PostgreSQL via systemctl
# Cron-based failover check
# Automated backup and restore
# Network-based heartbeat setup
# Summary of HA tools: Patroni vs. repmgr vs. others