Untested scripts are technical debt waiting to explode. BATS gives bash the same test-driven workflow you'd expect in any programming language — describe blocks, assertion helpers, setup/teardown, and a clean pass/fail report. Once you've written tests for your core functions, refactoring becomes safe and deployments become predictable.
1
Install BATS
BASH
# ── Install BATS and helpers ──────────────────────────────
git clone https://github.com/bats-core/bats-core.git
cd bats-core && ./install.sh /usr/local
# Helper libraries
git clone https://github.com/bats-core/bats-support.git test/lib/bats-support
git clone https://github.com/bats-core/bats-assert.git test/lib/bats-assert
git clone https://github.com/bats-core/bats-file.git test/lib/bats-file
# Or via package manager
brew install bats-core # macOS
apt install bats # Ubuntu 20+
# Verify
bats --version # Bats 1.x.x
2
Your first BATS test file
BASH
#!/usr/bin/env bats
# tests/test_utils.bats — Tests for lib/utils.sh
# Load helpers
load 'lib/bats-support/load'
load 'lib/bats-assert/load'
load 'lib/bats-file/load'
# Setup runs before each test
setup() {
source "${BATS_TEST_DIRNAME}/../lib/utils.sh"
export TMPDIR
TMPDIR=$(mktemp -d)
}
# Teardown runs after each test
teardown() {
rm -rf "${TMPDIR}"
}
# ── Tests ─────────────────────────────────────────────────
@test "validate_hostname accepts valid hostnames" {
run validate_hostname "prod-db-01"
assert_success
run validate_hostname "web.example.com"
assert_success
}
@test "validate_hostname rejects invalid hostnames" {
run validate_hostname "prod db" # space
assert_failure
run validate_hostname "prod;rm -rf /" # injection
assert_failure
run validate_hostname "" # empty
assert_failure
}
@test "validate_port accepts valid ports" {
run validate_port "3306"
assert_success
run validate_port "65535"
assert_success
}
@test "validate_port rejects invalid ports" {
run validate_port "0"
assert_failure
run validate_port "65536"
assert_failure
run validate_port "abc"
assert_failure
}
3
Testing output, exit codes, and file side-effects
BASH
#!/usr/bin/env bats
# tests/test_backup.bats
load 'lib/bats-support/load'
load 'lib/bats-assert/load'
load 'lib/bats-file/load'
setup() {
export BACKUP_DIR
BACKUP_DIR=$(mktemp -d)
export DB_HOST="localhost"
export DB_USER="test"
export DB_PASS="test"
}
teardown() { rm -rf "${BACKUP_DIR}"; }
@test "backup creates output file" {
run ./bin/backup -o "${BACKUP_DIR}" testdb
assert_success
assert_file_exist "${BACKUP_DIR}/testdb_"*".sql"
}
@test "backup with -c creates gzipped file" {
run ./bin/backup -c -o "${BACKUP_DIR}" testdb
assert_success
assert_file_exist "${BACKUP_DIR}/testdb_"*".sql.gz"
}
@test "backup fails without database name" {
run ./bin/backup -o "${BACKUP_DIR}"
assert_failure
assert_output --partial "database name required"
}
@test "backup prints version with -V" {
run ./bin/backup -V
assert_success
assert_output --regexp "v[0-9]+\.[0-9]+\.[0-9]+"
}
@test "backup logs to stderr on error" {
DB_HOST="nonexistent" run ./bin/backup testdb
assert_failure
# stderr is accessible via $output when captured
assert_output --partial "ERROR"
}
4
Mocking commands in BATS
BASH
#!/usr/bin/env bats
# Mock external commands to test without real dependencies
setup() {
source "${BATS_TEST_DIRNAME}/../lib/network.sh"
# Create a bin dir in PATH for mock commands
MOCK_BIN=$(mktemp -d)
export PATH="${MOCK_BIN}:${PATH}"
}
teardown() { rm -rf "${MOCK_BIN}"; }
create_mock() {
local cmd="${1}" exit_code="${2:-0}" output="${3:-}"
cat > "${MOCK_BIN}/${cmd}" << MOCK
#!/usr/bin/env bash
echo "${output}"
exit ${exit_code}
MOCK
chmod +x "${MOCK_BIN}/${cmd}"
}
@test "check_http succeeds when curl returns 200" {
create_mock curl 0 "200" # mock curl to return "200"
run check_http "http://example.com/health"
assert_success
}
@test "check_http fails when curl returns 500" {
create_mock curl 0 "500"
run check_http "http://example.com/health"
assert_failure
}
@test "check_http fails when curl exits non-zero" {
create_mock curl 1 "" # curl fails (network error)
run check_http "http://example.com/health"
assert_failure
}
5
Running tests and CI integration
BASH
# ── Run all tests ─────────────────────────────────────────
bats tests/ # run all .bats files in dir
bats tests/test_utils.bats # run specific file
bats --tap tests/ # TAP format output
bats --junit tests/ # JUnit XML for CI
bats -j 4 tests/ # parallel (4 jobs)
# ── GitHub Actions CI ─────────────────────────────────────
# .github/workflows/test.yml
# name: Test
# on: [push, pull_request]
# jobs:
# test:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# - run: sudo apt-get install -y bats
# - run: bats tests/
# ── Makefile for convenience ──────────────────────────────
# test:
# bats tests/
# lint:
# shellcheck bin/* lib/*.sh
# check: lint test
Terminal output
Key
Test passed
Test failed
Skipped
vriddh@prod-01:~/myapp-scripts$bats tests/
tests/test_utils.bats
✓ validate_hostname accepts valid hostnames
✓ validate_hostname rejects invalid hostnames
✓ validate_port accepts valid ports
✓ validate_port rejects invalid ports
tests/test_backup.bats
✓ backup creates output file
✓ backup with -c creates gzipped file
✓ backup fails without database name
✗ backup logs to stderr on error
(in test file tests/test_backup.bats, line 42)
7 tests, 1 failure
█
✔ Testing rules — Use
setup() for per-test initialisation and teardown() for cleanup. Always test both success and failure paths. Mock external commands by prepending a temp dir to $PATH. Use assert_file_exist, assert_output, and assert_success/failure for readable assertions. Run shellcheck and bats together in CI — they catch different classes of bugs.