Skip to content

Fix Github test failures #2120

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions test/includes/clustering.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ setup_clustering_bridge() {
echo "==> Setup clustering bridge ${name}"

ip link add "${name}" up type bridge
ip addr add 10.1.1.1/16 dev "${name}"
ip addr add 100.64.1.1/16 dev "${name}"

iptables -w -t nat -A POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
iptables -w -t nat -A POSTROUTING -s 100.64.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
}

Expand All @@ -18,7 +18,7 @@ teardown_clustering_bridge() {
if [ -e "/sys/class/net/${name}" ]; then
echo "==> Teardown clustering bridge ${name}"
echo 0 > /proc/sys/net/ipv4/ip_forward
iptables -w -t nat -D POSTROUTING -s 10.1.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
iptables -w -t nat -D POSTROUTING -s 100.64.0.0/16 -d 0.0.0.0/0 -j MASQUERADE
ip link del dev "${name}"
fi
}
Expand Down Expand Up @@ -80,8 +80,8 @@ set -e
ip link set dev lo up
ip link set dev "${veth2}" name eth0
ip link set eth0 up
ip addr add "10.1.1.10${id}/16" dev eth0
ip route add default via 10.1.1.1
ip addr add "100.64.1.10${id}/16" dev eth0
ip route add default via 100.64.1.1
EOF
}

Expand Down Expand Up @@ -133,11 +133,11 @@ spawn_incus_and_bootstrap_cluster() {

cat > "${INCUS_DIR}/preseed.yaml" <<EOF
config:
core.https_address: 10.1.1.101:8443
core.https_address: 100.64.1.101:8443
EOF
if [ "${port}" != "" ]; then
cat >> "${INCUS_DIR}/preseed.yaml" <<EOF
cluster.https_address: 10.1.1.101:${port}
cluster.https_address: 100.64.1.101:${port}
EOF
fi
if [ "${driver}" = "linstor" ]; then
Expand Down Expand Up @@ -248,7 +248,7 @@ spawn_incus_and_join_cluster() {
# If a custom cluster port was given, we need to first set the REST
# API address.
if [ "${port}" != "8443" ]; then
incus config set core.https_address "10.1.1.10${index}:8443"
incus config set core.https_address "100.64.1.10${index}:8443"
fi

# If there is a satellite name override, apply it.
Expand All @@ -260,8 +260,8 @@ spawn_incus_and_join_cluster() {
cluster:
enabled: true
server_name: node${index}
server_address: 10.1.1.10${index}:${port}
cluster_address: 10.1.1.10${target}:8443
server_address: 100.64.1.10${index}:${port}
cluster_address: 100.64.1.10${target}:8443
cluster_certificate: "$cert"
cluster_token: ${token}
member_config:
Expand Down
46 changes: 23 additions & 23 deletions test/suites/clustering.sh
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,8 @@ test_clustering_membership() {

# Client certificate are shared across all nodes.
token="$(INCUS_DIR=${INCUS_ONE_DIR} incus config trust add foo -q)"
incus remote add cluster 10.1.1.101:8443 --accept-certificate --token "${token}"
incus remote set-url cluster https://10.1.1.102:8443
incus remote add cluster 100.64.1.101:8443 --accept-certificate --token "${token}"
incus remote set-url cluster https://100.64.1.102:8443
incus network list cluster: | grep -q "${bridge}"
incus remote remove cluster

Expand Down Expand Up @@ -820,7 +820,7 @@ test_clustering_storage() {
# Manually send the join request.
cert=$(sed ':a;N;$!ba;s/\n/\\n/g' "${INCUS_ONE_DIR}/cluster.crt")
token="$(incus cluster add node3 --quiet)"
op=$(curl --unix-socket "${INCUS_THREE_DIR}/unix.socket" -X PUT "incus/1.0/cluster" -d "{\"server_name\":\"node3\",\"enabled\":true,\"member_config\":[${member_config}],\"server_address\":\"10.1.1.103:8443\",\"cluster_address\":\"10.1.1.101:8443\",\"cluster_certificate\":\"${cert}\",\"cluster_token\":\"${token}\"}" | jq -r .operation)
op=$(curl --unix-socket "${INCUS_THREE_DIR}/unix.socket" -X PUT "incus/1.0/cluster" -d "{\"server_name\":\"node3\",\"enabled\":true,\"member_config\":[${member_config}],\"server_address\":\"100.64.1.103:8443\",\"cluster_address\":\"100.64.1.101:8443\",\"cluster_certificate\":\"${cert}\",\"cluster_token\":\"${token}\"}" | jq -r .operation)
curl --unix-socket "${INCUS_THREE_DIR}/unix.socket" "incus${op}/wait"

# Ensure that node-specific config appears on all nodes,
Expand Down Expand Up @@ -1782,7 +1782,7 @@ test_clustering_join_api() {
INCUS_NETNS="${ns2}" spawn_incus "${INCUS_TWO_DIR}" false

token="$(incus cluster add node2 --quiet)"
op=$(curl --unix-socket "${INCUS_TWO_DIR}/unix.socket" -X PUT "incus/1.0/cluster" -d "{\"server_name\":\"node2\",\"enabled\":true,\"member_config\":[{\"entity\": \"storage-pool\",\"name\":\"data\",\"key\":\"source\",\"value\":\"\"}],\"server_address\":\"10.1.1.102:8443\",\"cluster_address\":\"10.1.1.101:8443\",\"cluster_certificate\":\"${cert}\",\"cluster_token\":\"${token}\"}" | jq -r .operation)
op=$(curl --unix-socket "${INCUS_TWO_DIR}/unix.socket" -X PUT "incus/1.0/cluster" -d "{\"server_name\":\"node2\",\"enabled\":true,\"member_config\":[{\"entity\": \"storage-pool\",\"name\":\"data\",\"key\":\"source\",\"value\":\"\"}],\"server_address\":\"100.64.1.102:8443\",\"cluster_address\":\"100.64.1.101:8443\",\"cluster_certificate\":\"${cert}\",\"cluster_token\":\"${token}\"}" | jq -r .operation)
curl --unix-socket "${INCUS_TWO_DIR}/unix.socket" "incus${op}/wait"

INCUS_DIR="${INCUS_ONE_DIR}" incus cluster show node2 | grep -q "message: Fully operational"
Expand Down Expand Up @@ -1959,7 +1959,7 @@ test_clustering_address() {

# Add a remote using the core.https_address of the bootstrap node, and check
# that the REST API is exposed.
url="https://10.1.1.101:8443"
url="https://100.64.1.101:8443"
token="$(INCUS_DIR="${INCUS_ONE_DIR}" incus config trust add foo --quiet)"
incus remote add cluster --token "${token}" --accept-certificate "${url}"
incus storage list cluster: | grep -q data
Expand All @@ -1982,13 +1982,13 @@ test_clustering_address() {

# The core.https_address config value can be changed and the REST API is still
# accessible.
INCUS_DIR="${INCUS_ONE_DIR}" incus config set "core.https_address" 10.1.1.101:9999
url="https://10.1.1.101:9999"
INCUS_DIR="${INCUS_ONE_DIR}" incus config set "core.https_address" 100.64.1.101:9999
url="https://100.64.1.101:9999"
incus remote set-url cluster "${url}"
incus storage list cluster:| grep -q data

# The cluster.https_address config value can't be changed.
! INCUS_DIR="${INCUS_ONE_DIR}" incus config set "cluster.https_address" "10.1.1.101:8448" || false
! INCUS_DIR="${INCUS_ONE_DIR}" incus config set "cluster.https_address" "100.64.1.101:8448" || false

# Create a container using the REST API exposed over core.https_address.
INCUS_DIR="${INCUS_ONE_DIR}" deps/import-busybox --alias testimage
Expand Down Expand Up @@ -2214,9 +2214,9 @@ test_clustering_recover() {
sleep 5

# Check the current database nodes
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "10.1.1.101:8443"
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "10.1.1.102:8443"
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "10.1.1.103:8443"
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "100.64.1.101:8443"
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "100.64.1.102:8443"
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "100.64.1.103:8443"

# Create a test project, just to insert something in the database.
INCUS_DIR="${INCUS_ONE_DIR}" incus project create p1
Expand All @@ -2238,8 +2238,8 @@ test_clustering_recover() {
INCUS_DIR="${INCUS_ONE_DIR}" incus project list | grep -q p1

# The database nodes have been updated
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "10.1.1.101:8443"
! INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "10.1.1.102:8443" || false
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "100.64.1.101:8443"
! INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster list-database | grep -q "100.64.1.102:8443" || false

# Cleanup the dead node.
INCUS_DIR="${INCUS_ONE_DIR}" incus cluster remove node2 --force --yes
Expand Down Expand Up @@ -2531,7 +2531,7 @@ test_clustering_remove_raft_node() {
# Remove the second node from the database but not from the raft configuration.
retries=10
while [ "${retries}" != "0" ]; do
INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global "DELETE FROM nodes WHERE address = '10.1.1.102:8443'" && break
INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql global "DELETE FROM nodes WHERE address = '100.64.1.102:8443'" && break
sleep 0.5
retries=$((retries-1))
done
Expand All @@ -2554,10 +2554,10 @@ test_clustering_remove_raft_node() {
INCUS_DIR="${INCUS_ONE_DIR}" incus cluster show node4 | grep -q "\- database$"

# The second node is still in the raft_nodes table.
INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql local "SELECT * FROM raft_nodes" | grep -q "10.1.1.102"
INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql local "SELECT * FROM raft_nodes" | grep -q "100.64.1.102"

# Force removing the raft node.
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster remove-raft-node -q "10.1.1.102"
INCUS_DIR="${INCUS_ONE_DIR}" incusd cluster remove-raft-node -q "100.64.1.102"

# Wait for a heartbeat to propagate and a rebalance to be performed.
sleep 12
Expand All @@ -2569,7 +2569,7 @@ test_clustering_remove_raft_node() {
INCUS_DIR="${INCUS_ONE_DIR}" incus cluster show node4 | grep -q "\- database$"

# The second node is gone from the raft_nodes_table.
! INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql local "SELECT * FROM raft_nodes" | grep -q "10.1.1.102" || false
! INCUS_DIR="${INCUS_ONE_DIR}" incus admin sql local "SELECT * FROM raft_nodes" | grep -q "100.64.1.102" || false

INCUS_DIR="${INCUS_ONE_DIR}" incus admin shutdown
INCUS_DIR="${INCUS_THREE_DIR}" incus admin shutdown
Expand Down Expand Up @@ -2745,13 +2745,13 @@ test_clustering_image_refresh() {
dir_configure "${INCUS_REMOTE_DIR}"
INCUS_DIR="${INCUS_REMOTE_DIR}" deps/import-busybox --alias testimage --public

INCUS_DIR="${INCUS_REMOTE_DIR}" incus config set core.https_address "10.1.1.104:8443"
INCUS_DIR="${INCUS_REMOTE_DIR}" incus config set core.https_address "100.64.1.104:8443"

# Add remotes
token="$(INCUS_DIR="${INCUS_ONE_DIR}" incus config trust add foo --quiet)"
incus remote add public "https://10.1.1.104:8443" --accept-certificate --token foo --public
incus remote add public "https://100.64.1.104:8443" --accept-certificate --token foo --public
token="$(INCUS_DIR="${INCUS_ONE_DIR}" incus config trust add foo --quiet)"
incus remote add cluster "https://10.1.1.101:8443" --accept-certificate --token "${token}"
incus remote add cluster "https://100.64.1.101:8443" --accept-certificate --token "${token}"

INCUS_DIR="${INCUS_REMOTE_DIR}" incus init testimage c1

Expand Down Expand Up @@ -3444,7 +3444,7 @@ test_clustering_groups() {
spawn_incus_and_join_cluster "${ns3}" "${bridge}" "${cert}" 3 1 "${INCUS_THREE_DIR}" "${INCUS_ONE_DIR}"

token="$(INCUS_DIR="${INCUS_ONE_DIR}" incus config trust add foo --quiet)"
incus remote add cluster --token "${token}" --accept-certificate "https://10.1.1.101:8443"
incus remote add cluster --token "${token}" --accept-certificate "https://100.64.1.101:8443"

# Initially, there is only the default group
incus cluster group show cluster:default
Expand Down Expand Up @@ -3899,7 +3899,7 @@ test_clustering_openfga() {
INCUS_DIR="${INCUS_ONE_DIR}" incus config set "oidc.issuer=http://127.0.0.1:$(cat "${TEST_DIR}/oidc.port")/"
INCUS_DIR="${INCUS_ONE_DIR}" incus config set "oidc.client.id=device"

BROWSER=curl incus remote add --accept-certificate oidc-openfga "https://10.1.1.101:8443" --auth-type oidc
BROWSER=curl incus remote add --accept-certificate oidc-openfga "https://100.64.1.101:8443" --auth-type oidc
! incus_remote info oidc-openfga: | grep -Fq 'core.https_address' || false

run_openfga
Expand All @@ -3926,7 +3926,7 @@ test_clustering_openfga() {
# After the second node has joined there should exist only one authorization model.
[ "$(fga model list --store-id "${OPENFGA_STORE_ID}" | jq '.authorization_models | length')" = 1 ]

BROWSER=curl incus remote add --accept-certificate node2 "https://10.1.1.102:8443" --auth-type oidc
BROWSER=curl incus remote add --accept-certificate node2 "https://100.64.1.102:8443" --auth-type oidc
! incus_remote info node2: | grep -Fq 'core.https_address' || false

# Add self as server admin. Should be able to see config now.
Expand Down