Vagrant OEL 8 DevOps · MySQL · Replication

VagrantMySQL Replication Lab

Complete MySQL Master + 2 Replicas lab with Vagrant and OEL 8. GTID-based replication, automated provisioning, one vagrant up delivers a full replication stack.

Spin up a complete MySQL replication environment with one vagrant up. This lab creates a Master + 2 Replica setup with GTID-based replication, ready for ProxySQL integration, failover testing, and replication monitoring.

Master 192.168.56.11 server-id = 1 Replica1 192.168.56.12 server-id = 2 Replica2 192.168.56.13 server-id = 3
Ruby — Replication Lab Vagrantfile
Vagrant.configure("2") do |config|
  config.vm.box              = "generic/oracle8"
  config.vm.box_check_update = false

  nodes = [
    { name: "master",   ip: "192.168.56.11", id: 1, port: 13306, role: "master"   },
    { name: "replica1", ip: "192.168.56.12", id: 2, port: 13307, role: "replica"  },
    { name: "replica2", ip: "192.168.56.13", id: 3, port: 13308, role: "replica"  },
  ]

  nodes.each do |node|
    config.vm.define node[:name] do |vm|
      vm.vm.hostname = "mysql-#{node[:name]}"
      vm.vm.network "private_network",  ip: node[:ip]
      vm.vm.network "forwarded_port",   guest: 3306, host: node[:port]

      vm.vm.provider "virtualbox" do |vb|
        vb.name   = "MySQL-#{node[:name].capitalize}"
        vb.memory = 2048
        vb.cpus   = 2
      end

      vm.vm.provider "parallels" do |prl|
        prl.name   = "MySQL-#{node[:name].capitalize}"
        prl.memory = 2048
        prl.cpus   = 2
        prl.update_guest_tools = false
      end

      vm.vm.provision "shell",
        path: "scripts/mysql_replication.sh",
        args: [node[:role], node[:id].to_s, node[:ip], "192.168.56.11"]
    end
  end
end
BASH — mysql_replication.sh
#!/bin/bash
# scripts/mysql_replication.sh
set -e

ROLE=${1:-master}
SERVER_ID=${2:-1}
MY_IP=${3:-192.168.56.11}
MASTER_IP=${4:-192.168.56.11}
ROOT_PASS="Root@123!"
REPL_PASS="Repl@123!"

echo "=== Setting up MySQL ${ROLE} (Server ID: ${SERVER_ID}) ==="

# Install MySQL 8
rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
dnf install -y https://dev.mysql.com/get/mysql80-community-release-el8-9.noarch.rpm 2>/dev/null
dnf module disable -y mysql 2>/dev/null
dnf install -y mysql-community-server

systemctl enable --now mysqld
TEMP_PASS=$(grep "temporary password" /var/log/mysqld.log | tail -1 | awk "{print $NF}")

# Change root password
mysql --connect-expired-password -u root -p"$TEMP_PASS"   -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '${ROOT_PASS}';"

# Write server config
cat > /etc/my.cnf.d/replication.cnf << EOF
[mysqld]
server-id              = ${SERVER_ID}
bind-address           = 0.0.0.0
gtid_mode              = ON
enforce_gtid_consistency = ON
log_bin                = /var/log/mysql/mysql-bin.log
binlog_format          = ROW
log_slave_updates      = ON
innodb_buffer_pool_size = 512M
max_connections        = 200
EOF

if [ "$ROLE" = "replica" ]; then
    echo "read_only = ON"       >> /etc/my.cnf.d/replication.cnf
    echo "super_read_only = ON" >> /etc/my.cnf.d/replication.cnf
fi

systemctl restart mysqld

# Setup root remote access
mysql -u root -p"$ROOT_PASS" << EOF
CREATE USER IF NOT EXISTS 'root'@'%' IDENTIFIED BY '${ROOT_PASS}';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
CREATE USER IF NOT EXISTS 'monitor'@'%' IDENTIFIED BY 'Monitor@123!';
GRANT SELECT, REPLICATION CLIENT ON *.* TO 'monitor'@'%';
FLUSH PRIVILEGES;
EOF

if [ "$ROLE" = "master" ]; then
    # Create replication user on master
    mysql -u root -p"$ROOT_PASS" << EOF
CREATE USER IF NOT EXISTS 'replicator'@'%' IDENTIFIED BY '${REPL_PASS}';
GRANT REPLICATION SLAVE ON *.* TO 'replicator'@'%';
FLUSH PRIVILEGES;
EOF
    echo "=== Master ready. Server ID: ${SERVER_ID} ==="

elif [ "$ROLE" = "replica" ]; then
    # Wait for master to be available
    echo "Waiting for master at ${MASTER_IP}..."
    until mysql -u root -p"$ROOT_PASS" -h "$MASTER_IP" -e "SELECT 1" 2>/dev/null; do
        sleep 5
        echo "  Still waiting for master..."
    done

    # Configure replication
    mysql -u root -p"$ROOT_PASS" << EOF
CHANGE MASTER TO
  MASTER_HOST='${MASTER_IP}',
  MASTER_USER='replicator',
  MASTER_PASSWORD='${REPL_PASS}',
  MASTER_AUTO_POSITION=1;
START SLAVE;
EOF
    # Show slave status
    sleep 3
    mysql -u root -p"$ROOT_PASS" -e "SHOW SLAVE STATUS\G" | grep -E "Slave_IO|Slave_SQL|Seconds_Behind"
    echo "=== Replica ready. Server ID: ${SERVER_ID} ==="
fi
BASH — Verify Replication
# Start lab (no-parallel so master starts first)
vagrant up --no-parallel

# Check replication status on replica1
vagrant ssh replica1 -c "mysql -u root -pRoot@123! -e 'SHOW SLAVE STATUS\G'" |   grep -E "Slave_IO|Slave_SQL|Seconds_Behind|Master_Host"

# Test replication — create data on master
vagrant ssh master -c "mysql -u root -pRoot@123! -e   'CREATE DATABASE IF NOT EXISTS testdb;    USE testdb;    CREATE TABLE IF NOT EXISTS t1 (id INT);    INSERT INTO t1 VALUES (1),(2),(3);'"

# Verify data on replicas
vagrant ssh replica1 -c "mysql -u root -pRoot@123! -e 'SELECT * FROM testdb.t1;'"
vagrant ssh replica2 -c "mysql -u root -pRoot@123! -e 'SELECT * FROM testdb.t1;'"

echo "Replication is working if both replicas show the same data!"