Shell Scripting SQLite Database Centric Advanced May 2026

Shell Scripting Database Centric: SQLite in Shell Scripts

Use SQLite as a local database engine inside shell scripts — creating and querying databases without a server, importing CSV files, exporting to multiple formats, using SQLite as a job queue or state tracker, and batch data processing.

SQLite is the perfect embedded database for shell scripts that need more structure than a flat file but don't want the overhead of a server. It's available on every Linux system, requires no authentication, and the sqlite3 CLI supports nearly the same scripting patterns as MySQL and PostgreSQL.

BASH
# ── Key invocation patterns ───────────────────────────────
sqlite3 mydb.db "SELECT * FROM users"     # inline query
sqlite3 mydb.db < schema.sql              # execute SQL file
sqlite3 mydb.db <<SQL                    # heredoc SQL
  SELECT name, email FROM users WHERE active=1;
SQL
echo "SELECT COUNT(*) FROM users" | sqlite3 mydb.db

# ── Output formatting ─────────────────────────────────────
sqlite3 -separator $'\t' mydb.db "SELECT * FROM users"  # TSV
sqlite3 -csv mydb.db "SELECT * FROM users"            # CSV
sqlite3 -json mydb.db "SELECT * FROM users"           # JSON
sqlite3 -markdown mydb.db "SELECT * FROM users"       # Markdown table
sqlite3 -line mydb.db "SELECT * FROM users"          # one field per line

# ── Suppress headers and row counts ──────────────────────
sqlite3 -noheader mydb.db "SELECT name FROM users"

# ── Wrapper function ──────────────────────────────────────
DB="/var/lib/myapp/state.db"
db() { sqlite3 -separator $'\t' "${DB}" "$@"; }
db_q() { sqlite3 -separator $'\t' -noheader "${DB}" "$@"; }

db_q "SELECT COUNT(*) FROM jobs WHERE status='pending'"
BASH
#!/usr/bin/env bash
# job_queue.sh — SQLite-backed job queue for shell scripts

DB="/var/lib/myapp/jobs.db"
db() { sqlite3 -separator $'\t' "${DB}" "$@"; }

# ── Initialise schema ─────────────────────────────────────
db <<SQL
CREATE TABLE IF NOT EXISTS jobs (
  id         INTEGER PRIMARY KEY AUTOINCREMENT,
  type       TEXT    NOT NULL,
  payload    TEXT    NOT NULL DEFAULT '{}',
  status     TEXT    NOT NULL DEFAULT 'pending',
  attempts   INTEGER NOT NULL DEFAULT 0,
  created_at DATETIME NOT NULL DEFAULT (datetime('now')),
  started_at DATETIME,
  done_at    DATETIME,
  error      TEXT
);
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
SQL

# ── Enqueue a job ─────────────────────────────────────────
enqueue() {
  local type="${1}" payload="${2:-{}}"
  db "INSERT INTO jobs (type, payload) VALUES ('${type}', '${payload}')"
  echo "Enqueued: ${type}"
}

# ── Process next job ──────────────────────────────────────
process_next() {
  local row
  row=$(db "SELECT id,type,payload FROM jobs
    WHERE status='pending' ORDER BY id LIMIT 1")
  [[ -z "${row}" ]] && echo "No pending jobs" && return 0

  IFS=$'\t' read -r id type payload <<< "${row}"
  db "UPDATE jobs SET status='running', started_at=datetime('now'),
      attempts=attempts+1 WHERE id=${id}"

  echo "Processing job ${id}: ${type}"
  if handle_job "${type}" "${payload}"; then
    db "UPDATE jobs SET status='done', done_at=datetime('now') WHERE id=${id}"
    echo "  ✔ Done"
  else
    db "UPDATE jobs SET status='failed', error='handler failed' WHERE id=${id}"
    echo "  ✘ Failed"
  fi
}

handle_job() {
  case "${1}" in
    send_email) echo "Sending email: ${2}" ;;
    export_csv) echo "Exporting CSV" ;;
    *) echo "Unknown job type: ${1}"; return 1 ;;
  esac
}
BASH
# ── Import CSV into SQLite ────────────────────────────────
sqlite3 mydb.db <<SQL
.mode csv
.headers on
.import /path/to/data.csv my_table
SQL

# ── Import with schema creation ───────────────────────────
sqlite3 report.db <<SQL
CREATE TABLE IF NOT EXISTS sales (
  date    TEXT,
  region  TEXT,
  product TEXT,
  amount  REAL
);
.mode csv
.import sales.csv sales
SQL

# ── Query imported data with aggregation ──────────────────
sqlite3 -csv report.db <<SQL
SELECT
  region,
  SUM(amount)          AS total,
  COUNT(*)             AS transactions,
  ROUND(AVG(amount),2) AS avg_sale
FROM sales
GROUP BY region
ORDER BY total DESC;
SQL

# ── Export to multiple formats ────────────────────────────
sqlite3 -csv -header mydb.db "SELECT * FROM users" > users.csv
sqlite3 -json        mydb.db "SELECT * FROM users" > users.json
sqlite3 -markdown    mydb.db "SELECT * FROM users" > users.md

# ── Use SQLite to deduplicate a CSV ───────────────────────
sqlite3 :memory: <<SQL
.mode csv
.import input.csv raw
CREATE TABLE deduped AS SELECT DISTINCT * FROM raw;
.output output.csv
SELECT * FROM deduped;
SQL

# ── SQLite for fast local analytics ──────────────────────
sqlite3 :memory: <<SQL
.mode csv
.import access.log parsed
SELECT
  substr(col1,1,10) AS day,
  COUNT(*)          AS requests,
  COUNT(CASE WHEN col9 >= 500 THEN 1 END) AS errors
FROM parsed
GROUP BY day;
SQL
sqlite3 — in-memory analytics
vriddh@prod-01:~/scripts$sqlite3 -csv report.db "SELECT region,ROUND(SUM(amount),2) AS total FROM sales GROUP BY region ORDER BY total DESC"
region,total
South,142850.40
North,98320.15
East,76541.00
vriddh@prod-01:~/scripts$sqlite3 :memory: ".import data.csv t" "SELECT COUNT(DISTINCT col1) FROM t"
8421
✔ SQLite in scripts — Use :memory: as the database path for ephemeral analysis — the database vanishes when sqlite3 exits, perfect for one-shot data processing. Use -separator $'\t' with a wrapper function for clean tab-separated output. The .import dot-command auto-detects CSV when .mode csv is set. SQLite handles gigabyte CSVs efficiently with proper indexes — it's often faster than AWK for complex multi-table join analytics.