Storage-Benchmark Script
Dafür habe erstmal eine kleines Script gebaut, um die Storage Performance schnell messen zu können ohne irgendwelche Software installieren zu müssen. Das Script funktioniert unter Debian und Ubuntu (was ja auch ein Debian ist...).
#!/usr/bin/env bash
set -e
# === Konfiguration ===
TEST_DIR="${1:-./disk_test}"
TEST_FILE="$TEST_DIR/testfile.bin"
FILE_SIZE_MB=1024
BLOCK_SIZE_SEQ=1M
BLOCK_SIZE_4K=4K
COUNT_4K=262144 # 4K * 262144 = 1GB
JSON_FILE="disk_benchmark_$(date +%Y%m%d_%H%M%S).json"
mkdir -p "$TEST_DIR"
clear_cache() {
sync
if [ "$EUID" -eq 0 ]; then
echo 3 > /proc/sys/vm/drop_caches
fi
}
measure_time() {
START=$(date +%s.%N)
eval "$1"
END=$(date +%s.%N)
awk "BEGIN {print $END - $START}"
}
echo "Starte Benchmark im Verzeichnis: $TEST_DIR"
echo
# =============================
# Sequential Write
# =============================
echo "Teste Sequential Write..."
clear_cache
TIME_SEQ_WRITE=$(measure_time \
"dd if=/dev/zero of=$TEST_FILE bs=$BLOCK_SIZE_SEQ count=$FILE_SIZE_MB conv=fdatasync status=none")
SEQ_WRITE_MBPS=$(awk "BEGIN {printf "%.2f", $FILE_SIZE_MB / $TIME_SEQ_WRITE}")
# =============================
# Sequential Read
# =============================
echo "Teste Sequential Read..."
clear_cache
TIME_SEQ_READ=$(measure_time \
"dd if=$TEST_FILE of=/dev/null bs=$BLOCK_SIZE_SEQ status=none")
SEQ_READ_MBPS=$(awk "BEGIN {printf "%.2f", $FILE_SIZE_MB / $TIME_SEQ_READ}")
# =============================
# 4K Random Write
# =============================
echo "Teste 4K Random Write..."
clear_cache
TIME_4K_WRITE=$(measure_time \
"dd if=/dev/urandom of=$TEST_FILE bs=$BLOCK_SIZE_4K count=$COUNT_4K conv=fdatasync status=none")
TOTAL_MB_4K=$(awk "BEGIN {print ($COUNT_4K * 4) / 1024}")
WRITE_4K_MBPS=$(awk "BEGIN {printf "%.2f", $TOTAL_MB_4K / $TIME_4K_WRITE}")
WRITE_4K_IOPS=$(awk "BEGIN {printf "%.0f", $COUNT_4K / $TIME_4K_WRITE}")
# =============================
# 4K Random Read
# =============================
echo "Teste 4K Random Read..."
clear_cache
TIME_4K_READ=$(measure_time \
"dd if=$TEST_FILE of=/dev/null bs=$BLOCK_SIZE_4K status=none")
READ_4K_MBPS=$(awk "BEGIN {printf "%.2f", $TOTAL_MB_4K / $TIME_4K_READ}")
READ_4K_IOPS=$(awk "BEGIN {printf "%.0f", $COUNT_4K / $TIME_4K_READ}")
rm -f "$TEST_FILE"
TIMESTAMP=$(date -Iseconds)
HOSTNAME=$(hostname)
KERNEL=$(uname -r)
echo
echo "===== Ergebnisse ====="
echo "Seq Write: $SEQ_WRITE_MBPS MB/s"
echo "Seq Read: $SEQ_READ_MBPS MB/s"
echo "4K Write: $WRITE_4K_MBPS MB/s ($WRITE_4K_IOPS IOPS)"
echo "4K Read: $READ_4K_MBPS MB/s ($READ_4K_IOPS IOPS)"
cat <<EOF > "$JSON_FILE"
{
"timestamp": "$TIMESTAMP",
"hostname": "$HOSTNAME",
"kernel": "$KERNEL",
"test_directory": "$TEST_DIR",
"results": {
"sequential_write_mb_s": $SEQ_WRITE_MBPS,
"sequential_read_mb_s": $SEQ_READ_MBPS,
"random_4k_write_mb_s": $WRITE_4K_MBPS,
"random_4k_write_iops": $WRITE_4K_IOPS,
"random_4k_read_mb_s": $READ_4K_MBPS,
"random_4k_read_iops": $READ_4K_IOPS
}
}
EOF
echo
echo "Ergebnisse gespeichert in: $JSON_FILE"
Es orientiert sich etwas an CrystalDiskBench, aber ist dafür gedacht schnell per SSH und Nano auf dem Server angelegt und dann ausgeführt zu werden.
chmod +x disk_benchmark.sh
./disk_benchmark.sh
bezahlt von