PostgreSQL's psql client is as scriptable as MySQL's, but with different conventions. The .pgpass file, PGPASSWORD env var, and connection URI format give you multiple authentication approaches. The -c and -f flags, combined with output format options, make psql a powerful automation tool.
1
Authentication and connection options
BASH
# ── ~/.pgpass (recommended) ───────────────────────────────
# Format: hostname:port:database:username:password
cat ~/.pgpass
# prod-db-01:5432:myapp:app_user:s3cr3t
# *:5432:*:backup_user:backup_pass
chmod 600 ~/.pgpass
# ── PGPASSWORD env var ────────────────────────────────────
PGPASSWORD="${DB_PASS}" psql -h prod-db-01 -U app_user myapp
# ── Connection URI ────────────────────────────────────────
PG_URI="postgresql://app_user:${DB_PASS}@prod-db-01:5432/myapp"
psql "${PG_URI}" -c "SELECT 1"
# ── Wrapper function ───────────────────────────────────────
db() {
PGPASSWORD="${DB_PASS}" psql \
-h "${DB_HOST}" -p "${DB_PORT:-5432}" \
-U "${DB_USER}" -d "${DB_NAME}" \
-t -A -F$'\t' \
"$@"
}
# -t tuple-only (no headers, no row count)
# -A unaligned output
# -F field separator (default: |, set to tab for AWK)
db -c "SELECT COUNT(*) FROM users"
2
Non-interactive queries and result handling
BASH
# ── Key flags for scripting ───────────────────────────────
# -c 'SQL' run SQL command and exit
# -f file run SQL from file
# -t tuple-only output (no headers, no row count)
# -A unaligned output (columns separated by -F separator)
# -F '\t' tab as field separator
# -q quiet (suppress startup messages)
# -v VAR=val set psql variable (use as :VAR in SQL)
# --csv CSV output (psql 12+)
# ── Fetch a single value ──────────────────────────────────
COUNT=$(db -c "SELECT COUNT(*) FROM users")
echo "Users: ${COUNT}"
# ── Process each row ─────────────────────────────────────
db -c "SELECT id,name,email FROM users WHERE active=true" | \
while IFS=$'\t' read -r id name email; do
echo "Processing: ${name} <${email}>"
done
# ── Run SQL from heredoc ──────────────────────────────────
db <<SQL
SELECT
u.name,
COUNT(o.id) AS orders,
SUM(o.total) AS revenue
FROM users u
JOIN orders o ON o.user_id = u.id
WHERE o.created_at >= NOW() - INTERVAL '30 days'
GROUP BY u.id
ORDER BY revenue DESC
LIMIT 10;
SQL
# ── Use psql variables to prevent SQL injection ────────────
USER_ID=42
db -v uid="${USER_ID}" \
-c "SELECT * FROM users WHERE id = :uid"
# ── CSV export ────────────────────────────────────────────
psql --csv -c "SELECT * FROM users WHERE active=true" \
-d myapp > active_users.csv 2>/dev/null
3
pg_dump backup and pg_restore
BASH
#!/usr/bin/env bash
# pg_backup.sh — Production PostgreSQL backup
set -euo pipefail
BACKUP_DIR="/backups/postgresql"
DB_NAME="myapp"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/${DB_NAME}_${TIMESTAMP}.dump"
mkdir -p "${BACKUP_DIR}"
# ── pg_dump custom format (compressed, parallel restore) ──
PGPASSWORD="${DB_PASS}" pg_dump \
-h "${DB_HOST}" -U "${DB_USER}" \
-d "${DB_NAME}" \
--format=custom \ # compressed custom format
--no-acl \ # skip privilege grants
--no-owner \ # skip ownership (useful for restore to diff user)
-f "${BACKUP_FILE}"
echo " ✔ Backup: ${BACKUP_FILE} ($(du -sh "${BACKUP_FILE}" | cut -f1))"
# ── Restore from custom format ────────────────────────────
PGPASSWORD="${DB_PASS}" pg_restore \
-h "${DB_HOST}" -U "${DB_USER}" \
-d "${DB_NAME}_restore" \
--jobs=4 \ # parallel restore with 4 workers
--no-acl --no-owner \
"${BACKUP_FILE}"
# ── SQL format dump (human-readable, pipeable) ────────────
PGPASSWORD="${DB_PASS}" pg_dump \
-h "${DB_HOST}" -U "${DB_USER}" \
--format=plain "${DB_NAME}" | gzip > "${DB_NAME}.sql.gz"
# Restore from SQL format
zcat "${DB_NAME}.sql.gz" | \
PGPASSWORD="${DB_PASS}" psql -h "${DB_HOST}" -U "${DB_USER}" -d "${DB_NAME}"
4
PostgreSQL health checks
BASH
#!/usr/bin/env bash
# pg_health.sh — PostgreSQL health checks
db() { PGPASSWORD="${DB_PASS}" psql -h "${DB_HOST}" -U "${DB_USER}" -d "${DB_NAME}" -tAF$'\t' "$@"; }
# Connectivity
db -c "SELECT 1" &>/dev/null || { echo "CRITICAL: DB unreachable"; exit 1; }
# Active connections
db -c "SELECT count,max_conn,round(count::numeric/max_conn*100,1) AS pct
FROM pg_stat_activity, (SELECT setting::int max_conn FROM pg_settings WHERE name='max_connections') m
GROUP BY max_conn"
# Long-running queries (>30s)
db -c "SELECT pid,usename,state,
EXTRACT(epoch FROM now()-query_start)::int AS seconds,
LEFT(query,80) AS query
FROM pg_stat_activity
WHERE state!='idle' AND query_start < now()-interval '30 seconds'
ORDER BY seconds DESC"
# Table bloat (tables needing VACUUM)
db -c "SELECT relname,n_dead_tup,n_live_tup,
ROUND(n_dead_tup::numeric/(NULLIF(n_live_tup+n_dead_tup,0))*100,1) AS dead_pct
FROM pg_stat_user_tables
WHERE n_dead_tup > 10000
ORDER BY dead_pct DESC LIMIT 10"
# Database sizes
db -c "SELECT datname,pg_size_pretty(pg_database_size(datname)) AS size
FROM pg_database WHERE datname NOT IN ('template0','template1')
ORDER BY pg_database_size(datname) DESC"
vriddh@prod-01:~/scripts$./pg_health.sh
Connections: 42/200 (21.0%)
Long query (87s): SELECT * FROM large_reports WHERE...
DB myapp: 42 GB
DB analytics: 18 GB
█
✔ PostgreSQL scripting rules — Always use
-t -A -F$'\t' together for clean tab-separated output suitable for AWK/while-read parsing. Use pg_dump --format=custom for production backups — it's compressed and supports parallel restore with --jobs=N. Use psql variables (-v) instead of shell string interpolation to prevent SQL injection. Check pg_stat_activity for connection and query monitoring.