Skip to content

ZTS: Optimize KSM on Linux and remove it for FreeBSD #17247

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/scripts/generate-ci-type.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
Patterns of files that are considered to trigger full CI.
"""
FULL_RUN_REGEX = list(map(re.compile, [
r'\.github/workflows/scripts/.*',
r'cmd.*',
r'configs/.*',
r'META',
Expand Down
44 changes: 9 additions & 35 deletions .github/workflows/scripts/qemu-1-setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,36 +10,12 @@ set -eu
export DEBIAN_FRONTEND="noninteractive"
sudo apt-get -y update
sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
ksmtuned virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
virt-manager linux-modules-extra-$(uname -r) zfsutils-linux

# generate ssh keys
rm -f ~/.ssh/id_ed25519
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""

# we expect RAM shortage
cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null
# /etc/ksmtuned.conf - Configuration file for ksmtuned
# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm
KSM_MONITOR_INTERVAL=60
# Millisecond sleep between ksm scans for 16Gb server.
# Smaller servers sleep more, bigger sleep less.
KSM_SLEEP_MSEC=30
KSM_NPAGES_BOOST=0
KSM_NPAGES_DECAY=0
KSM_NPAGES_MIN=1000
KSM_NPAGES_MAX=25000
KSM_THRES_COEF=80
KSM_THRES_CONST=8192
LOGFILE=/var/log/ksmtuned.log
DEBUG=1
EOF
sudo systemctl restart ksm
sudo systemctl restart ksmtuned

# not needed
sudo systemctl stop docker.socket
sudo systemctl stop multipathd.socket
Expand All @@ -65,16 +41,14 @@ $DISK
sync
sleep 1

# swap with same size as RAM
# swap with same size as RAM (16GiB)
sudo mkswap $DISK-part1
sudo swapon $DISK-part1

# 60GB data disk
# JBOD 2xdisk for OpenZFS storage (test vm's)
SSD1="$DISK-part2"

# 10GB data disk on ext4
sudo fallocate -l 10G /test.ssd1
SSD2=$(sudo losetup -b 4096 -f /test.ssd1 --show)
sudo fallocate -l 12G /test.ssd2
SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show)

# adjust zfs module parameter and create pool
exec 1>/dev/null
Expand All @@ -83,11 +57,11 @@ ARC_MAX=$((1024*1024*512))
echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 \
-O relatime=off -O atime=off -O xattr=sa -O compression=lz4 \
-O mountpoint=/mnt/tests
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
-O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \
-O redundant_metadata=none -O mountpoint=/mnt/tests

# no need for some scheduler
for i in /sys/block/s*/queue/scheduler; do
echo "none" | sudo tee $i > /dev/null
echo "none" | sudo tee $i
done
19 changes: 17 additions & 2 deletions .github/workflows/scripts/qemu-2-start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ echo "ENV=$ENV" >> $ENV
# result path
echo 'RESPATH="/var/tmp/test_results"' >> $ENV

# FreeBSD 13 has problems with: e1000+virtio
# FreeBSD 13 has problems with: e1000 and virtio
echo "NIC=$NIC" >> $ENV

# freebsd15 -> used in zfs-qemu.yml
Expand All @@ -162,6 +162,14 @@ echo "OSv=\"$OSv\"" >> $ENV
# FreeBSD 15 (Current) -> used for summary
echo "OSNAME=\"$OSNAME\"" >> $ENV

# default vm count for testings
VMs=2
echo "VMs=\"$VMs\"" >> $ENV

# default cpu count for testing vm's
CPU=2
echo "CPU=\"$CPU\"" >> $ENV

sudo mkdir -p "/mnt/tests"
sudo chown -R $(whoami) /mnt/tests

Expand Down Expand Up @@ -223,12 +231,19 @@ sudo virt-install \
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
--import --noautoconsole >/dev/null

# enable KSM on Linux
if [ ${OS:0:7} != "freebsd" ]; then
sudo virsh dommemstat --domain "openzfs" --period 5
sudo virsh node-memory-tune 100 50 1
echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null
fi

# Give the VMs hostnames so we don't have to refer to them with
# hardcoded IP addresses.
#
# vm0: Initial VM we install dependencies and build ZFS on.
# vm1..2 Testing VMs
for i in {0..9} ; do
for ((i=0; i<=VMs; i++)); do
echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
done

Expand Down
34 changes: 13 additions & 21 deletions .github/workflows/scripts/qemu-5-setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,39 +14,33 @@ PID=$(pidof /usr/bin/qemu-system-x86_64)
tail --pid=$PID -f /dev/null
sudo virsh undefine openzfs

# default values per test vm:
VMs=2
CPU=2

# cpu pinning
CPUSET=("0,1" "2,3")

case "$OS" in
freebsd*)
# FreeBSD can't be optimized via ksmtuned
# FreeBSD needs only 6GiB
RAM=6
;;
*)
# Linux can be optimized via ksmtuned
# Linux needs more memory, but can be optimized to share it via KSM
RAM=8
;;
esac

# this can be different for each distro
echo "VMs=$VMs" >> $ENV

# create snapshot we can clone later
sudo zfs snapshot zpool/openzfs@now

# setup the testing vm's
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
for i in $(seq 1 $VMs); do

# start testing VMs
for ((i=1; i<=VMs; i++)); do
echo "Creating disk for vm$i..."
DISK="/dev/zvol/zpool/vm$i"
FORMAT="raw"
sudo zfs clone zpool/openzfs@now zpool/vm$i
sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
sudo zfs clone zpool/openzfs@now zpool/vm$i-system
sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests

cat <<EOF > /tmp/user-data
#cloud-config
Expand Down Expand Up @@ -83,23 +77,21 @@ EOF
--graphics none \
--cloud-init user-data=/tmp/user-data \
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
--disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
--disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
--disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
--import --noautoconsole >/dev/null
done

# check the memory state from time to time
# generate some memory stats
cat <<EOF > cronjob.sh
# $OS
exec 1>>/var/tmp/stats.txt
exec 2>&1
echo "*******************************************************"
date
echo "********************************************************************************"
uptime
free -m
df -h /mnt/tests
zfs list
EOF

sudo chmod +x cronjob.sh
sudo mv -f cronjob.sh /root/cronjob.sh
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
Expand All @@ -108,15 +100,15 @@ rm crontab.txt

# check if the machines are okay
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
done
echo "All $VMs VMs are up now."

# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
mkdir -p $RESPATH/vm$i
read "pty" <<< $(sudo virsh ttyconsole vm$i)
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
Expand Down
15 changes: 10 additions & 5 deletions .github/workflows/scripts/qemu-6-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ if [ -z ${1:-} ]; then
echo 0 > /tmp/ctr
date "+%s" > /tmp/tsstart

for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
IP="192.168.122.1$i"
daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
$SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
Expand All @@ -58,7 +58,7 @@ if [ -z ${1:-} ]; then
done

# wait for all vm's to finish
for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
tail --pid=$(cat vm${i}.pid) -f /dev/null
pid=$(cat vm${i}log.pid)
rm -f vm${i}log.pid
Expand All @@ -72,19 +72,24 @@ fi
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
case "$1" in
freebsd*)
TDIR="/usr/local/share/zfs"
sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
sudo -E ./zfs/scripts/zfs.sh
TDIR="/usr/local/share/zfs"
sudo mv -f /var/tmp/*.txt /tmp
sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null
sudo mount -o noatime /dev/vtbd1 /var/tmp
sudo chmod 1777 /var/tmp
sudo mv -f /tmp/*.txt /var/tmp
;;
*)
# use xfs @ /var/tmp for all distros
TDIR="/usr/share/zfs"
sudo -E modprobe zfs
sudo mv -f /var/tmp/*.txt /tmp
sudo mkfs.xfs -fq /dev/vdb
sudo mount -o noatime /dev/vdb /var/tmp
sudo chmod 1777 /var/tmp
sudo mv -f /tmp/*.txt /var/tmp
sudo -E modprobe zfs
TDIR="/usr/share/zfs"
;;
esac

Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/scripts/qemu-7-prepare.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ BASE="$HOME/work/zfs/zfs"
MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"

# catch result files of testings (vm's should be there)
for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
Expand All @@ -37,7 +37,7 @@ cp -f /var/tmp/*.txt $RESPATH || true
cd $RESPATH

# prepare result files for summary
for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
file="vm$i/build-stderr.txt"
test -s $file && mv -f $file build-stderr.txt

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/scripts/qemu-8-summary.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ fi

echo -e "\nFull logs for download:\n $1\n"

for i in $(seq 1 $VMs); do
for ((i=1; i<=VMs; i++)); do
rv=$(cat vm$i/tests-exitcode.txt)

if [ $rv = 0 ]; then
Expand Down
Loading