Handle errors in integration test setups

Thanks @kev-the-dev

Closes #1460

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-08-29 08:33:33 +02:00 committed by Kristoffer Dalby
parent 63caf9a222
commit b4a4d0f760
55 changed files with 829 additions and 1118 deletions

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLAllowStarDst:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLAllowStarDst
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLAllowUser80Dst:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLAllowUser80Dst
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLAllowUserDst:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLAllowUserDst
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLDenyAllPort80:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLDenyAllPort80
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLDevice1CanAccessDevice2:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLDevice1CanAccessDevice2
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLHostsInNetMapTable:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLHostsInNetMapTable
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLNamedHostsCanReach:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLNamedHostsCanReach
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestACLNamedHostsCanReachBySubnet:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestACLNamedHostsCanReachBySubnet
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestApiKeyCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestApiKeyCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestAuthKeyLogoutAndRelogin:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestAuthKeyLogoutAndRelogin
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestAuthWebFlowAuthenticationPingAll:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestAuthWebFlowAuthenticationPingAll
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestAuthWebFlowLogoutAndRelogin:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestAuthWebFlowLogoutAndRelogin
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestCreateTailscale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestCreateTailscale
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestDERPServerScenario:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestDERPServerScenario
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestEnablingRoutes:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestEnablingRoutes
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestEphemeral:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestEphemeral
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestExpireNode:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestExpireNode
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestHeadscale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestHeadscale
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestNodeCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestNodeCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestNodeExpireCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestNodeExpireCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestNodeMoveCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestNodeMoveCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestNodeRenameCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestNodeRenameCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestNodeTagCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestNodeTagCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestOIDCAuthenticationPingAll:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestOIDCAuthenticationPingAll
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestOIDCExpireNodesBasedOnTokenExpiry:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestOIDCExpireNodesBasedOnTokenExpiry
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestPingAllByHostname:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestPingAllByHostname
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestPingAllByIP:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestPingAllByIP
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestPreAuthKeyCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestPreAuthKeyCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestPreAuthKeyCommandReusableEphemeral:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestPreAuthKeyCommandReusableEphemeral
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestPreAuthKeyCommandWithoutExpiry:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestPreAuthKeyCommandWithoutExpiry
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestResolveMagicDNS:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestResolveMagicDNS
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestSSHIsBlockedInACL:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestSSHIsBlockedInACL
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestSSHMultipleUsersAllToAll:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestSSHMultipleUsersAllToAll
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestSSHNoSSHConfigured:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestSSHNoSSHConfigured
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestSSHOneUserAllToAll:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestSSHOneUserAllToAll
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestSSUserOnlyIsolation:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestSSUserOnlyIsolation
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestTaildrop:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestTaildrop
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestTailscaleNodesJoiningHeadcale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestTailscaleNodesJoiningHeadcale
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: TestUserCommand:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -34,7 +34,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run TestUserCommand
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -1,16 +0,0 @@
FROM ubuntu:22.04
ARG TAILSCALE_VERSION=*
ARG TAILSCALE_CHANNEL=stable
RUN apt-get update \
&& apt-get install -y gnupg curl ssh dnsutils ca-certificates \
&& adduser --shell=/bin/bash ssh-it-user
# Tailscale is deliberately split into a second stage so we can cash utils as a seperate layer.
RUN curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
&& apt-get update \
&& apt-get install -y tailscale=${TAILSCALE_VERSION} \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

View file

@ -31,7 +31,7 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
test: {{.Name}}:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -55,7 +55,7 @@ jobs:
integration_test/ integration_test/
config-example.yaml config-example.yaml
- name: Run general integration tests - name: Run {{.Name}}
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: | run: |
nix develop --command -- docker run \ nix develop --command -- docker run \

View file

@ -45,10 +45,14 @@ var veryLargeDestination = []string{
"208.0.0.0/4:*", "208.0.0.0/4:*",
} }
func aclScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario { func aclScenario(
t *testing.T,
policy *policy.ACLPolicy,
clientsPerUser int,
) *Scenario {
t.Helper() t.Helper()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
spec := map[string]int{ spec := map[string]int{
"user1": clientsPerUser, "user1": clientsPerUser,
@ -58,22 +62,19 @@ func aclScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Sc
err = scenario.CreateHeadscaleEnv(spec, err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{ []tsic.Option{
tsic.WithDockerEntrypoint([]string{ tsic.WithDockerEntrypoint([]string{
"/bin/bash", "/bin/sh",
"-c", "-c",
"/bin/sleep 3 ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev", "/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}), }),
tsic.WithDockerWorkdir("/"), tsic.WithDockerWorkdir("/"),
}, },
hsic.WithACLPolicy(policy), hsic.WithACLPolicy(policy),
hsic.WithTestName("acl"), hsic.WithTestName("acl"),
) )
assert.NoError(t, err) assertNoErr(t, err)
err = scenario.WaitForTailscaleSync()
assert.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
assert.NoError(t, err) assertNoErrListFQDN(t, err)
return scenario return scenario
} }
@ -260,7 +261,7 @@ func TestACLHostsInNetMapTable(t *testing.T) {
for name, testCase := range tests { for name, testCase := range tests {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
spec := testCase.users spec := testCase.users
@ -268,25 +269,23 @@ func TestACLHostsInNetMapTable(t *testing.T) {
[]tsic.Option{}, []tsic.Option{},
hsic.WithACLPolicy(&testCase.policy), hsic.WithACLPolicy(&testCase.policy),
) )
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
assert.NoError(t, err) assertNoErr(t, err)
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSyncWithPeerCount(testCase.want["user1"])
assert.NoError(t, err) assertNoErrSync(t, err)
for _, client := range allClients { for _, client := range allClients {
status, err := client.Status() status, err := client.Status()
assert.NoError(t, err) assertNoErr(t, err)
user := status.User[status.Self.UserID].LoginName user := status.User[status.Self.UserID].LoginName
assert.Equal(t, (testCase.want[user]), len(status.Peer)) assert.Equal(t, (testCase.want[user]), len(status.Peer))
} }
err = scenario.Shutdown()
assert.NoError(t, err)
}) })
} }
} }
@ -311,25 +310,26 @@ func TestACLAllowUser80Dst(t *testing.T) {
}, },
1, 1,
) )
defer scenario.Shutdown()
user1Clients, err := scenario.ListTailscaleClients("user1") user1Clients, err := scenario.ListTailscaleClients("user1")
assert.NoError(t, err) assertNoErr(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2") user2Clients, err := scenario.ListTailscaleClients("user2")
assert.NoError(t, err) assertNoErr(t, err)
// Test that user1 can visit all user2 // Test that user1 can visit all user2
for _, client := range user1Clients { for _, client := range user1Clients {
for _, peer := range user2Clients { for _, peer := range user2Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Len(t, result, 13) assert.Len(t, result, 13)
assert.NoError(t, err) assertNoErr(t, err)
} }
} }
@ -337,7 +337,7 @@ func TestACLAllowUser80Dst(t *testing.T) {
for _, client := range user2Clients { for _, client := range user2Clients {
for _, peer := range user1Clients { for _, peer := range user1Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
@ -347,9 +347,6 @@ func TestACLAllowUser80Dst(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
} }
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestACLDenyAllPort80(t *testing.T) { func TestACLDenyAllPort80(t *testing.T) {
@ -370,12 +367,13 @@ func TestACLDenyAllPort80(t *testing.T) {
}, },
4, 4,
) )
defer scenario.Shutdown()
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
assert.NoError(t, err) assertNoErr(t, err)
allHostnames, err := scenario.ListTailscaleClientsFQDNs() allHostnames, err := scenario.ListTailscaleClientsFQDNs()
assert.NoError(t, err) assertNoErr(t, err)
for _, client := range allClients { for _, client := range allClients {
for _, hostname := range allHostnames { for _, hostname := range allHostnames {
@ -393,9 +391,6 @@ func TestACLDenyAllPort80(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
} }
err = scenario.Shutdown()
assert.NoError(t, err)
} }
// Test to confirm that we can use user:* from one user. // Test to confirm that we can use user:* from one user.
@ -416,25 +411,26 @@ func TestACLAllowUserDst(t *testing.T) {
}, },
2, 2,
) )
defer scenario.Shutdown()
user1Clients, err := scenario.ListTailscaleClients("user1") user1Clients, err := scenario.ListTailscaleClients("user1")
assert.NoError(t, err) assertNoErr(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2") user2Clients, err := scenario.ListTailscaleClients("user2")
assert.NoError(t, err) assertNoErr(t, err)
// Test that user1 can visit all user2 // Test that user1 can visit all user2
for _, client := range user1Clients { for _, client := range user1Clients {
for _, peer := range user2Clients { for _, peer := range user2Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Len(t, result, 13) assert.Len(t, result, 13)
assert.NoError(t, err) assertNoErr(t, err)
} }
} }
@ -442,7 +438,7 @@ func TestACLAllowUserDst(t *testing.T) {
for _, client := range user2Clients { for _, client := range user2Clients {
for _, peer := range user1Clients { for _, peer := range user1Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
@ -452,9 +448,6 @@ func TestACLAllowUserDst(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
} }
err = scenario.Shutdown()
assert.NoError(t, err)
} }
// Test to confirm that we can use *:* from one user // Test to confirm that we can use *:* from one user
@ -474,25 +467,26 @@ func TestACLAllowStarDst(t *testing.T) {
}, },
2, 2,
) )
defer scenario.Shutdown()
user1Clients, err := scenario.ListTailscaleClients("user1") user1Clients, err := scenario.ListTailscaleClients("user1")
assert.NoError(t, err) assertNoErr(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2") user2Clients, err := scenario.ListTailscaleClients("user2")
assert.NoError(t, err) assertNoErr(t, err)
// Test that user1 can visit all user2 // Test that user1 can visit all user2
for _, client := range user1Clients { for _, client := range user1Clients {
for _, peer := range user2Clients { for _, peer := range user2Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Len(t, result, 13) assert.Len(t, result, 13)
assert.NoError(t, err) assertNoErr(t, err)
} }
} }
@ -500,7 +494,7 @@ func TestACLAllowStarDst(t *testing.T) {
for _, client := range user2Clients { for _, client := range user2Clients {
for _, peer := range user1Clients { for _, peer := range user1Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
@ -510,9 +504,6 @@ func TestACLAllowStarDst(t *testing.T) {
assert.Error(t, err) assert.Error(t, err)
} }
} }
err = scenario.Shutdown()
assert.NoError(t, err)
} }
// TestACLNamedHostsCanReachBySubnet is the same as // TestACLNamedHostsCanReachBySubnet is the same as
@ -537,25 +528,26 @@ func TestACLNamedHostsCanReachBySubnet(t *testing.T) {
}, },
3, 3,
) )
defer scenario.Shutdown()
user1Clients, err := scenario.ListTailscaleClients("user1") user1Clients, err := scenario.ListTailscaleClients("user1")
assert.NoError(t, err) assertNoErr(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2") user2Clients, err := scenario.ListTailscaleClients("user2")
assert.NoError(t, err) assertNoErr(t, err)
// Test that user1 can visit all user2 // Test that user1 can visit all user2
for _, client := range user1Clients { for _, client := range user1Clients {
for _, peer := range user2Clients { for _, peer := range user2Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Len(t, result, 13) assert.Len(t, result, 13)
assert.NoError(t, err) assertNoErr(t, err)
} }
} }
@ -563,19 +555,16 @@ func TestACLNamedHostsCanReachBySubnet(t *testing.T) {
for _, client := range user2Clients { for _, client := range user2Clients {
for _, peer := range user1Clients { for _, peer := range user1Clients {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url) t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Len(t, result, 13) assert.Len(t, result, 13)
assert.NoError(t, err) assertNoErr(t, err)
} }
} }
err = scenario.Shutdown()
assert.NoError(t, err)
} }
// This test aims to cover cases where individual hosts are allowed and denied // This test aims to cover cases where individual hosts are allowed and denied
@ -677,16 +666,17 @@ func TestACLNamedHostsCanReach(t *testing.T) {
&testCase.policy, &testCase.policy,
2, 2,
) )
defer scenario.Shutdown()
// Since user/users dont matter here, we basically expect that some clients // Since user/users dont matter here, we basically expect that some clients
// will be assigned these ips and that we can pick them up for our own use. // will be assigned these ips and that we can pick them up for our own use.
test1ip4 := netip.MustParseAddr("100.64.0.1") test1ip4 := netip.MustParseAddr("100.64.0.1")
test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1") test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1")
test1, err := scenario.FindTailscaleClientByIP(test1ip6) test1, err := scenario.FindTailscaleClientByIP(test1ip6)
assert.NoError(t, err) assertNoErr(t, err)
test1fqdn, err := test1.FQDN() test1fqdn, err := test1.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
test1ip4URL := fmt.Sprintf("http://%s/etc/hostname", test1ip4.String()) test1ip4URL := fmt.Sprintf("http://%s/etc/hostname", test1ip4.String())
test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String()) test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String())
test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn) test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn)
@ -694,10 +684,10 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test2ip4 := netip.MustParseAddr("100.64.0.2") test2ip4 := netip.MustParseAddr("100.64.0.2")
test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2") test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2")
test2, err := scenario.FindTailscaleClientByIP(test2ip6) test2, err := scenario.FindTailscaleClientByIP(test2ip6)
assert.NoError(t, err) assertNoErr(t, err)
test2fqdn, err := test2.FQDN() test2fqdn, err := test2.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
test2ip4URL := fmt.Sprintf("http://%s/etc/hostname", test2ip4.String()) test2ip4URL := fmt.Sprintf("http://%s/etc/hostname", test2ip4.String())
test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String()) test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String())
test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn) test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn)
@ -705,10 +695,10 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3ip4 := netip.MustParseAddr("100.64.0.3") test3ip4 := netip.MustParseAddr("100.64.0.3")
test3ip6 := netip.MustParseAddr("fd7a:115c:a1e0::3") test3ip6 := netip.MustParseAddr("fd7a:115c:a1e0::3")
test3, err := scenario.FindTailscaleClientByIP(test3ip6) test3, err := scenario.FindTailscaleClientByIP(test3ip6)
assert.NoError(t, err) assertNoErr(t, err)
test3fqdn, err := test3.FQDN() test3fqdn, err := test3.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
test3ip4URL := fmt.Sprintf("http://%s/etc/hostname", test3ip4.String()) test3ip4URL := fmt.Sprintf("http://%s/etc/hostname", test3ip4.String())
test3ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test3ip6.String()) test3ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test3ip6.String())
test3fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test3fqdn) test3fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test3fqdn)
@ -723,7 +713,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3ip4URL, test3ip4URL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test1.Curl(test3ip6URL) result, err = test1.Curl(test3ip6URL)
assert.Lenf( assert.Lenf(
@ -734,7 +724,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3ip6URL, test3ip6URL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test1.Curl(test3fqdnURL) result, err = test1.Curl(test3fqdnURL)
assert.Lenf( assert.Lenf(
@ -745,7 +735,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3fqdnURL, test3fqdnURL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
// test2 can query test3 // test2 can query test3
result, err = test2.Curl(test3ip4URL) result, err = test2.Curl(test3ip4URL)
@ -757,7 +747,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3ip4URL, test3ip4URL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test2.Curl(test3ip6URL) result, err = test2.Curl(test3ip6URL)
assert.Lenf( assert.Lenf(
@ -768,7 +758,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3ip6URL, test3ip6URL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test2.Curl(test3fqdnURL) result, err = test2.Curl(test3fqdnURL)
assert.Lenf( assert.Lenf(
@ -779,7 +769,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test3fqdnURL, test3fqdnURL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
// test3 cannot query test1 // test3 cannot query test1
result, err = test3.Curl(test1ip4URL) result, err = test3.Curl(test1ip4URL)
@ -818,7 +808,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test1.Curl(test2ip6URL) result, err = test1.Curl(test2ip6URL)
assert.Lenf( assert.Lenf(
t, t,
@ -828,7 +818,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test2ip6URL, test2ip6URL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test1.Curl(test2fqdnURL) result, err = test1.Curl(test2fqdnURL)
assert.Lenf( assert.Lenf(
@ -839,7 +829,7 @@ func TestACLNamedHostsCanReach(t *testing.T) {
test2fqdnURL, test2fqdnURL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
// test2 cannot query test1 // test2 cannot query test1
result, err = test2.Curl(test1ip4URL) result, err = test2.Curl(test1ip4URL)
@ -853,9 +843,6 @@ func TestACLNamedHostsCanReach(t *testing.T) {
result, err = test2.Curl(test1fqdnURL) result, err = test2.Curl(test1fqdnURL)
assert.Empty(t, result) assert.Empty(t, result)
assert.Error(t, err) assert.Error(t, err)
err = scenario.Shutdown()
assert.NoError(t, err)
}) })
} }
} }
@ -953,10 +940,10 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1") test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1")
test1, err := scenario.FindTailscaleClientByIP(test1ip) test1, err := scenario.FindTailscaleClientByIP(test1ip)
assert.NotNil(t, test1) assert.NotNil(t, test1)
assert.NoError(t, err) assertNoErr(t, err)
test1fqdn, err := test1.FQDN() test1fqdn, err := test1.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
test1ipURL := fmt.Sprintf("http://%s/etc/hostname", test1ip.String()) test1ipURL := fmt.Sprintf("http://%s/etc/hostname", test1ip.String())
test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String()) test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String())
test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn) test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn)
@ -965,10 +952,10 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2") test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2")
test2, err := scenario.FindTailscaleClientByIP(test2ip) test2, err := scenario.FindTailscaleClientByIP(test2ip)
assert.NotNil(t, test2) assert.NotNil(t, test2)
assert.NoError(t, err) assertNoErr(t, err)
test2fqdn, err := test2.FQDN() test2fqdn, err := test2.FQDN()
assert.NoError(t, err) assertNoErr(t, err)
test2ipURL := fmt.Sprintf("http://%s/etc/hostname", test2ip.String()) test2ipURL := fmt.Sprintf("http://%s/etc/hostname", test2ip.String())
test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String()) test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String())
test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn) test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn)
@ -983,7 +970,7 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
test2ipURL, test2ipURL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test1.Curl(test2ip6URL) result, err = test1.Curl(test2ip6URL)
assert.Lenf( assert.Lenf(
@ -994,7 +981,7 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
test2ip6URL, test2ip6URL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test1.Curl(test2fqdnURL) result, err = test1.Curl(test2fqdnURL)
assert.Lenf( assert.Lenf(
@ -1005,7 +992,7 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
test2fqdnURL, test2fqdnURL,
result, result,
) )
assert.NoError(t, err) assertNoErr(t, err)
result, err = test2.Curl(test1ipURL) result, err = test2.Curl(test1ipURL)
assert.Empty(t, result) assert.Empty(t, result)
@ -1018,9 +1005,6 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
result, err = test2.Curl(test1fqdnURL) result, err = test2.Curl(test1fqdnURL)
assert.Empty(t, result) assert.Empty(t, result)
assert.Error(t, err) assert.Error(t, err)
err = scenario.Shutdown()
assert.NoError(t, err)
}) })
} }
} }

View file

@ -42,22 +42,19 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
t.Parallel() t.Parallel()
baseScenario, err := NewScenario() baseScenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthOIDCScenario{ scenario := AuthOIDCScenario{
Scenario: baseScenario, Scenario: baseScenario,
} }
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
} }
oidcConfig, err := scenario.runMockOIDC(defaultAccessTTL) oidcConfig, err := scenario.runMockOIDC(defaultAccessTTL)
if err != nil { assertNoErrf(t, "failed to run mock OIDC server: %s", err)
t.Errorf("failed to run mock OIDC server: %s", err)
}
oidcMap := map[string]string{ oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer, "HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
@ -74,24 +71,16 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
hsic.WithHostnameAsServerURL(), hsic.WithHostnameAsServerURL(),
hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(oidcConfig.ClientSecret)), hsic.WithFileInContainer("/tmp/hs_client_oidc_secret", []byte(oidcConfig.ClientSecret)),
) )
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -99,11 +88,6 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
success := pingAllHelper(t, allClients, allAddrs) success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
@ -113,22 +97,19 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
shortAccessTTL := 5 * time.Minute shortAccessTTL := 5 * time.Minute
baseScenario, err := NewScenario() baseScenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthOIDCScenario{ scenario := AuthOIDCScenario{
Scenario: baseScenario, Scenario: baseScenario,
} }
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
} }
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL) oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
if err != nil { assertNoErrf(t, "failed to run mock OIDC server: %s", err)
t.Fatalf("failed to run mock OIDC server: %s", err)
}
oidcMap := map[string]string{ oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer, "HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
@ -144,24 +125,16 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
hsic.WithConfigEnv(oidcMap), hsic.WithConfigEnv(oidcMap),
hsic.WithHostnameAsServerURL(), hsic.WithHostnameAsServerURL(),
) )
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -171,12 +144,8 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps)) t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
// await all nodes being logged out after OIDC token expiry // await all nodes being logged out after OIDC token expiry
scenario.WaitForTailscaleLogout() err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func (s *AuthOIDCScenario) CreateHeadscaleEnv( func (s *AuthOIDCScenario) CreateHeadscaleEnv(
@ -188,7 +157,7 @@ func (s *AuthOIDCScenario) CreateHeadscaleEnv(
return err return err
} }
err = headscale.WaitForReady() err = headscale.WaitForRunning()
if err != nil { if err != nil {
return err return err
} }
@ -311,15 +280,11 @@ func (s *AuthOIDCScenario) runTailscaleUp(
log.Printf("running tailscale up for user %s", userStr) log.Printf("running tailscale up for user %s", userStr)
if user, ok := s.users[userStr]; ok { if user, ok := s.users[userStr]; ok {
for _, client := range user.Clients { for _, client := range user.Clients {
user.joinWaitGroup.Add(1) c := client
user.joinWaitGroup.Go(func() error {
go func(c TailscaleClient) { loginURL, err := c.LoginWithURL(loginServer)
defer user.joinWaitGroup.Done()
// TODO(juanfont): error handle this
loginURL, err := c.UpWithLoginURL(loginServer)
if err != nil { if err != nil {
log.Printf("failed to run tailscale up: %s", err) log.Printf("%s failed to run tailscale up: %s", c.Hostname(), err)
} }
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP()) loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
@ -336,9 +301,14 @@ func (s *AuthOIDCScenario) runTailscaleUp(
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil) req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
resp, err := httpClient.Do(req) resp, err := httpClient.Do(req)
if err != nil { if err != nil {
log.Printf("%s failed to get login url %s: %s", c.Hostname(), loginURL, err) log.Printf(
"%s failed to get login url %s: %s",
c.Hostname(),
loginURL,
err,
)
return return err
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -347,28 +317,29 @@ func (s *AuthOIDCScenario) runTailscaleUp(
if err != nil { if err != nil {
log.Printf("%s failed to read response body: %s", c.Hostname(), err) log.Printf("%s failed to read response body: %s", c.Hostname(), err)
return return err
} }
log.Printf("Finished request for %s to join tailnet", c.Hostname()) log.Printf("Finished request for %s to join tailnet", c.Hostname())
}(client)
err = client.WaitForReady() return nil
if err != nil { })
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
}
log.Printf("client %s is ready", client.Hostname()) log.Printf("client %s is ready", client.Hostname())
} }
user.joinWaitGroup.Wait() if err := user.joinWaitGroup.Wait(); err != nil {
return err
}
for _, client := range user.Clients { for _, client := range user.Clients {
err := client.WaitForReady() err := client.WaitForRunning()
if err != nil { if err != nil {
log.Printf("client %s was not ready: %s", client.Hostname(), err) return fmt.Errorf(
"%s tailscale node has not reached running: %w",
return fmt.Errorf("failed to up tailscale node: %w", err) client.Hostname(),
err,
)
} }
} }
@ -378,11 +349,11 @@ func (s *AuthOIDCScenario) runTailscaleUp(
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable) return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
} }
func (s *AuthOIDCScenario) Shutdown() error { func (s *AuthOIDCScenario) Shutdown() {
err := s.pool.Purge(s.mockOIDC) err := s.pool.Purge(s.mockOIDC)
if err != nil { if err != nil {
return err log.Printf("failed to remove mock oidc container")
} }
return s.Scenario.Shutdown() s.Scenario.Shutdown()
} }

View file

@ -28,12 +28,13 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
baseScenario, err := NewScenario() baseScenario, err := NewScenario()
if err != nil { if err != nil {
t.Errorf("failed to create scenario: %s", err) t.Fatalf("failed to create scenario: %s", err)
} }
scenario := AuthWebFlowScenario{ scenario := AuthWebFlowScenario{
Scenario: baseScenario, Scenario: baseScenario,
} }
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
@ -41,24 +42,16 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("webauthping")) err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("webauthping"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -66,11 +59,6 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
success := pingAllHelper(t, allClients, allAddrs) success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
@ -78,13 +66,12 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
t.Parallel() t.Parallel()
baseScenario, err := NewScenario() baseScenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthWebFlowScenario{ scenario := AuthWebFlowScenario{
Scenario: baseScenario, Scenario: baseScenario,
} }
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
@ -92,24 +79,16 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("weblogout")) err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("weblogout"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -122,7 +101,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
ips, err := client.IPs() ips, err := client.IPs()
if err != nil { if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
} }
clientIPs[client] = ips clientIPs[client] = ips
} }
@ -130,37 +109,32 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
err := client.Logout() err := client.Logout()
if err != nil { if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err) t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
} }
} }
scenario.WaitForTailscaleLogout() err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)
t.Logf("all clients logged out") t.Logf("all clients logged out")
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
if err != nil { assertNoErrGetHeadscale(t, err)
t.Errorf("failed to get headscale server: %s", err)
}
for userName := range spec { for userName := range spec {
err = scenario.runTailscaleUp(userName, headscale.GetEndpoint()) err = scenario.runTailscaleUp(userName, headscale.GetEndpoint())
if err != nil { if err != nil {
t.Errorf("failed to run tailscale up: %s", err) t.Fatalf("failed to run tailscale up: %s", err)
} }
} }
t.Logf("all clients logged in again") t.Logf("all clients logged in again")
allClients, err = scenario.ListTailscaleClients() allClients, err = scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err = scenario.ListTailscaleClientsIPs() allIps, err = scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
allAddrs = lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs = lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -172,12 +146,12 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
ips, err := client.IPs() ips, err := client.IPs()
if err != nil { if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
} }
// lets check if the IPs are the same // lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) { if len(ips) != len(clientIPs[client]) {
t.Errorf("IPs changed for client %s", client.Hostname()) t.Fatalf("IPs changed for client %s", client.Hostname())
} }
for _, ip := range ips { for _, ip := range ips {
@ -191,7 +165,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
} }
if !found { if !found {
t.Errorf( t.Fatalf(
"IPs changed for client %s. Used to be %v now %v", "IPs changed for client %s. Used to be %v now %v",
client.Hostname(), client.Hostname(),
clientIPs[client], clientIPs[client],
@ -202,11 +176,6 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
} }
t.Logf("all clients IPs are the same") t.Logf("all clients IPs are the same")
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func (s *AuthWebFlowScenario) CreateHeadscaleEnv( func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
@ -218,7 +187,7 @@ func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
return err return err
} }
err = headscale.WaitForReady() err = headscale.WaitForRunning()
if err != nil { if err != nil {
return err return err
} }
@ -250,36 +219,39 @@ func (s *AuthWebFlowScenario) runTailscaleUp(
log.Printf("running tailscale up for user %s", userStr) log.Printf("running tailscale up for user %s", userStr)
if user, ok := s.users[userStr]; ok { if user, ok := s.users[userStr]; ok {
for _, client := range user.Clients { for _, client := range user.Clients {
user.joinWaitGroup.Add(1) c := client
user.joinWaitGroup.Go(func() error {
go func(c TailscaleClient) { loginURL, err := c.LoginWithURL(loginServer)
defer user.joinWaitGroup.Done()
// TODO(juanfont): error handle this
loginURL, err := c.UpWithLoginURL(loginServer)
if err != nil { if err != nil {
log.Printf("failed to run tailscale up: %s", err) log.Printf("failed to run tailscale up (%s): %s", c.Hostname(), err)
return err
} }
err = s.runHeadscaleRegister(userStr, loginURL) err = s.runHeadscaleRegister(userStr, loginURL)
if err != nil { if err != nil {
log.Printf("failed to register client: %s", err) log.Printf("failed to register client (%s): %s", c.Hostname(), err)
}
}(client)
err := client.WaitForReady() return err
}
return nil
})
err := client.WaitForRunning()
if err != nil { if err != nil {
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err) log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
} }
} }
user.joinWaitGroup.Wait()
if err := user.joinWaitGroup.Wait(); err != nil {
return err
}
for _, client := range user.Clients { for _, client := range user.Clients {
err := client.WaitForReady() err := client.WaitForRunning()
if err != nil { if err != nil {
log.Printf("client %s was not ready: %s", client.Hostname(), err) return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err)
return fmt.Errorf("failed to up tailscale node: %w", err)
} }
} }

View file

@ -33,7 +33,8 @@ func TestUserCommand(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": 0, "user1": 0,
@ -41,10 +42,10 @@ func TestUserCommand(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
var listUsers []v1.User var listUsers []v1.User
err = executeAndUnmarshal(headscale, err = executeAndUnmarshal(headscale,
@ -57,7 +58,7 @@ func TestUserCommand(t *testing.T) {
}, },
&listUsers, &listUsers,
) )
assert.NoError(t, err) assertNoErr(t, err)
result := []string{listUsers[0].Name, listUsers[1].Name} result := []string{listUsers[0].Name, listUsers[1].Name}
sort.Strings(result) sort.Strings(result)
@ -79,7 +80,7 @@ func TestUserCommand(t *testing.T) {
"newname", "newname",
}, },
) )
assert.NoError(t, err) assertNoErr(t, err)
var listAfterRenameUsers []v1.User var listAfterRenameUsers []v1.User
err = executeAndUnmarshal(headscale, err = executeAndUnmarshal(headscale,
@ -92,7 +93,7 @@ func TestUserCommand(t *testing.T) {
}, },
&listAfterRenameUsers, &listAfterRenameUsers,
) )
assert.NoError(t, err) assertNoErr(t, err)
result = []string{listAfterRenameUsers[0].Name, listAfterRenameUsers[1].Name} result = []string{listAfterRenameUsers[0].Name, listAfterRenameUsers[1].Name}
sort.Strings(result) sort.Strings(result)
@ -102,9 +103,6 @@ func TestUserCommand(t *testing.T) {
[]string{"newname", "user1"}, []string{"newname", "user1"},
result, result,
) )
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestPreAuthKeyCommand(t *testing.T) { func TestPreAuthKeyCommand(t *testing.T) {
@ -115,20 +113,21 @@ func TestPreAuthKeyCommand(t *testing.T) {
count := 3 count := 3
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
user: 0, user: 0,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
keys := make([]*v1.PreAuthKey, count) keys := make([]*v1.PreAuthKey, count)
assert.NoError(t, err) assertNoErr(t, err)
for index := 0; index < count; index++ { for index := 0; index < count; index++ {
var preAuthKey v1.PreAuthKey var preAuthKey v1.PreAuthKey
@ -150,7 +149,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
}, },
&preAuthKey, &preAuthKey,
) )
assert.NoError(t, err) assertNoErr(t, err)
keys[index] = &preAuthKey keys[index] = &preAuthKey
} }
@ -171,7 +170,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
}, },
&listedPreAuthKeys, &listedPreAuthKeys,
) )
assert.NoError(t, err) assertNoErr(t, err)
// There is one key created by "scenario.CreateHeadscaleEnv" // There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 4) assert.Len(t, listedPreAuthKeys, 4)
@ -222,7 +221,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
listedPreAuthKeys[1].Key, listedPreAuthKeys[1].Key,
}, },
) )
assert.NoError(t, err) assertNoErr(t, err)
var listedPreAuthKeysAfterExpire []v1.PreAuthKey var listedPreAuthKeysAfterExpire []v1.PreAuthKey
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -238,14 +237,11 @@ func TestPreAuthKeyCommand(t *testing.T) {
}, },
&listedPreAuthKeysAfterExpire, &listedPreAuthKeysAfterExpire,
) )
assert.NoError(t, err) assertNoErr(t, err)
assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now())) assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now()))
assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now())) assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now())) assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now()))
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
@ -255,17 +251,18 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
user := "pre-auth-key-without-exp-user" user := "pre-auth-key-without-exp-user"
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
user: 0, user: 0,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
var preAuthKey v1.PreAuthKey var preAuthKey v1.PreAuthKey
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -282,7 +279,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
}, },
&preAuthKey, &preAuthKey,
) )
assert.NoError(t, err) assertNoErr(t, err)
var listedPreAuthKeys []v1.PreAuthKey var listedPreAuthKeys []v1.PreAuthKey
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -298,7 +295,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
}, },
&listedPreAuthKeys, &listedPreAuthKeys,
) )
assert.NoError(t, err) assertNoErr(t, err)
// There is one key created by "scenario.CreateHeadscaleEnv" // There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 2) assert.Len(t, listedPreAuthKeys, 2)
@ -308,9 +305,6 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
t, t,
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)), listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)),
) )
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
@ -320,17 +314,18 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
user := "pre-auth-key-reus-ephm-user" user := "pre-auth-key-reus-ephm-user"
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
user: 0, user: 0,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
var preAuthReusableKey v1.PreAuthKey var preAuthReusableKey v1.PreAuthKey
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -347,7 +342,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
}, },
&preAuthReusableKey, &preAuthReusableKey,
) )
assert.NoError(t, err) assertNoErr(t, err)
var preAuthEphemeralKey v1.PreAuthKey var preAuthEphemeralKey v1.PreAuthKey
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -364,7 +359,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
}, },
&preAuthEphemeralKey, &preAuthEphemeralKey,
) )
assert.NoError(t, err) assertNoErr(t, err)
assert.True(t, preAuthEphemeralKey.GetEphemeral()) assert.True(t, preAuthEphemeralKey.GetEphemeral())
assert.False(t, preAuthEphemeralKey.GetReusable()) assert.False(t, preAuthEphemeralKey.GetReusable())
@ -383,13 +378,10 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
}, },
&listedPreAuthKeys, &listedPreAuthKeys,
) )
assert.NoError(t, err) assertNoErr(t, err)
// There is one key created by "scenario.CreateHeadscaleEnv" // There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 3) assert.Len(t, listedPreAuthKeys, 3)
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestEnablingRoutes(t *testing.T) { func TestEnablingRoutes(t *testing.T) {
@ -399,27 +391,24 @@ func TestEnablingRoutes(t *testing.T) {
user := "enable-routing" user := "enable-routing"
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErrf(t, "failed to create scenario: %s", err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
user: 3, user: 3,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"))
assert.NoError(t, err) assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErrGetHeadscale(t, err)
// advertise routes using the up command // advertise routes using the up command
for i, client := range allClients { for i, client := range allClients {
@ -432,13 +421,11 @@ func TestEnablingRoutes(t *testing.T) {
"-login-server", headscale.GetEndpoint(), "-login-server", headscale.GetEndpoint(),
"--hostname", hostname, "--hostname", hostname,
}) })
assert.NoError(t, err) assertNoErrf(t, "failed to advertise route: %s", err)
} }
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
var routes []*v1.Route var routes []*v1.Route
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -453,7 +440,7 @@ func TestEnablingRoutes(t *testing.T) {
&routes, &routes,
) )
assert.NoError(t, err) assertNoErr(t, err)
assert.Len(t, routes, 3) assert.Len(t, routes, 3)
for _, route := range routes { for _, route := range routes {
@ -471,7 +458,7 @@ func TestEnablingRoutes(t *testing.T) {
"--route", "--route",
strconv.Itoa(int(route.Id)), strconv.Itoa(int(route.Id)),
}) })
assert.NoError(t, err) assertNoErr(t, err)
} }
var enablingRoutes []*v1.Route var enablingRoutes []*v1.Route
@ -486,7 +473,7 @@ func TestEnablingRoutes(t *testing.T) {
}, },
&enablingRoutes, &enablingRoutes,
) )
assert.NoError(t, err) assertNoErr(t, err)
for _, route := range enablingRoutes { for _, route := range enablingRoutes {
assert.Equal(t, route.Advertised, true) assert.Equal(t, route.Advertised, true)
@ -504,7 +491,7 @@ func TestEnablingRoutes(t *testing.T) {
"--route", "--route",
strconv.Itoa(int(routeIDToBeDisabled)), strconv.Itoa(int(routeIDToBeDisabled)),
}) })
assert.NoError(t, err) assertNoErr(t, err)
var disablingRoutes []*v1.Route var disablingRoutes []*v1.Route
err = executeAndUnmarshal( err = executeAndUnmarshal(
@ -518,7 +505,7 @@ func TestEnablingRoutes(t *testing.T) {
}, },
&disablingRoutes, &disablingRoutes,
) )
assert.NoError(t, err) assertNoErr(t, err)
for _, route := range disablingRoutes { for _, route := range disablingRoutes {
assert.Equal(t, true, route.Advertised) assert.Equal(t, true, route.Advertised)
@ -540,7 +527,8 @@ func TestApiKeyCommand(t *testing.T) {
count := 5 count := 5
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": 0, "user1": 0,
@ -548,10 +536,10 @@ func TestApiKeyCommand(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
keys := make([]string, count) keys := make([]string, count)
@ -675,9 +663,6 @@ func TestApiKeyCommand(t *testing.T) {
) )
} }
} }
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestNodeTagCommand(t *testing.T) { func TestNodeTagCommand(t *testing.T) {
@ -685,17 +670,18 @@ func TestNodeTagCommand(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": 0, "user1": 0,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
machineKeys := []string{ machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe", "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
@ -810,9 +796,6 @@ func TestNodeTagCommand(t *testing.T) {
found, found,
"should find a machine with the tag 'tag:test' in the list of machines", "should find a machine with the tag 'tag:test' in the list of machines",
) )
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestNodeCommand(t *testing.T) { func TestNodeCommand(t *testing.T) {
@ -820,7 +803,8 @@ func TestNodeCommand(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"machine-user": 0, "machine-user": 0,
@ -828,10 +812,10 @@ func TestNodeCommand(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
// Randomly generated machine keys // Randomly generated machine keys
machineKeys := []string{ machineKeys := []string{
@ -1053,9 +1037,6 @@ func TestNodeCommand(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.Len(t, listOnlyMachineUserAfterDelete, 4) assert.Len(t, listOnlyMachineUserAfterDelete, 4)
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestNodeExpireCommand(t *testing.T) { func TestNodeExpireCommand(t *testing.T) {
@ -1063,17 +1044,18 @@ func TestNodeExpireCommand(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"machine-expire-user": 0, "machine-expire-user": 0,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
// Randomly generated machine keys // Randomly generated machine keys
machineKeys := []string{ machineKeys := []string{
@ -1182,9 +1164,6 @@ func TestNodeExpireCommand(t *testing.T) {
assert.True(t, listAllAfterExpiry[2].Expiry.AsTime().Before(time.Now())) assert.True(t, listAllAfterExpiry[2].Expiry.AsTime().Before(time.Now()))
assert.True(t, listAllAfterExpiry[3].Expiry.AsTime().IsZero()) assert.True(t, listAllAfterExpiry[3].Expiry.AsTime().IsZero())
assert.True(t, listAllAfterExpiry[4].Expiry.AsTime().IsZero()) assert.True(t, listAllAfterExpiry[4].Expiry.AsTime().IsZero())
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestNodeRenameCommand(t *testing.T) { func TestNodeRenameCommand(t *testing.T) {
@ -1192,17 +1171,18 @@ func TestNodeRenameCommand(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"machine-rename-command": 0, "machine-rename-command": 0,
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
// Randomly generated machine keys // Randomly generated machine keys
machineKeys := []string{ machineKeys := []string{
@ -1349,9 +1329,6 @@ func TestNodeRenameCommand(t *testing.T) {
assert.Equal(t, "newmachine-3", listAllAfterRenameAttempt[2].GetGivenName()) assert.Equal(t, "newmachine-3", listAllAfterRenameAttempt[2].GetGivenName())
assert.Contains(t, listAllAfterRenameAttempt[3].GetGivenName(), "machine-4") assert.Contains(t, listAllAfterRenameAttempt[3].GetGivenName(), "machine-4")
assert.Contains(t, listAllAfterRenameAttempt[4].GetGivenName(), "machine-5") assert.Contains(t, listAllAfterRenameAttempt[4].GetGivenName(), "machine-5")
err = scenario.Shutdown()
assert.NoError(t, err)
} }
func TestNodeMoveCommand(t *testing.T) { func TestNodeMoveCommand(t *testing.T) {
@ -1359,7 +1336,8 @@ func TestNodeMoveCommand(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
assert.NoError(t, err) assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"old-user": 0, "old-user": 0,
@ -1367,10 +1345,10 @@ func TestNodeMoveCommand(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err) assertNoErr(t, err)
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
// Randomly generated machine key // Randomly generated machine key
machineKey := "nodekey:688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa" machineKey := "nodekey:688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa"
@ -1514,7 +1492,4 @@ func TestNodeMoveCommand(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, machine.User.Name, "old-user") assert.Equal(t, machine.User.Name, "old-user")
err = scenario.Shutdown()
assert.NoError(t, err)
} }

View file

@ -13,7 +13,7 @@ type ControlServer interface {
ConnectToNetwork(network *dockertest.Network) error ConnectToNetwork(network *dockertest.Network) error
GetHealthEndpoint() string GetHealthEndpoint() string
GetEndpoint() string GetEndpoint() string
WaitForReady() error WaitForRunning() error
CreateUser(user string) error CreateUser(user string) error
CreateAuthKey(user string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error) CreateAuthKey(user string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
ListMachinesInUser(user string) ([]*v1.Machine, error) ListMachinesInUser(user string) ([]*v1.Machine, error)

View file

@ -24,14 +24,13 @@ func TestDERPServerScenario(t *testing.T) {
// t.Parallel() // t.Parallel()
baseScenario, err := NewScenario() baseScenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err)
}
scenario := EmbeddedDERPServerScenario{ scenario := EmbeddedDERPServerScenario{
Scenario: baseScenario, Scenario: baseScenario,
tsicNetworks: map[string]*dockertest.Network{}, tsicNetworks: map[string]*dockertest.Network{},
} }
defer scenario.Shutdown()
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
@ -53,39 +52,23 @@ func TestDERPServerScenario(t *testing.T) {
hsic.WithTLS(), hsic.WithTLS(),
hsic.WithHostnameAsServerURL(), hsic.WithHostnameAsServerURL(),
) )
assertNoErrHeadscaleEnv(t, err)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allHostnames, err := scenario.ListTailscaleClientsFQDNs() allHostnames, err := scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
success := pingDerpAllHelper(t, allClients, allHostnames) success := pingDerpAllHelper(t, allClients, allHostnames)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv( func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv(
@ -105,7 +88,7 @@ func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv(
headscaleURL.Host = fmt.Sprintf("%s:%s", hsServer.GetHostname(), headscaleURL.Port()) headscaleURL.Host = fmt.Sprintf("%s:%s", hsServer.GetHostname(), headscaleURL.Port())
err = hsServer.WaitForReady() err = hsServer.WaitForRunning()
if err != nil { if err != nil {
return err return err
} }
@ -186,16 +169,11 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser(
cert := hsServer.GetCert() cert := hsServer.GetCert()
user.createWaitGroup.Add(1)
opts = append(opts, opts = append(opts,
tsic.WithHeadscaleTLS(cert), tsic.WithHeadscaleTLS(cert),
) )
go func() { user.createWaitGroup.Go(func() error {
defer user.createWaitGroup.Done()
// TODO(kradalby): error handle this
tsClient, err := tsic.New( tsClient, err := tsic.New(
s.pool, s.pool,
version, version,
@ -203,34 +181,45 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser(
opts..., opts...,
) )
if err != nil { if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err) return fmt.Errorf(
log.Printf("failed to create tailscale node: %s", err) "failed to create tailscale (%s) node: %w",
tsClient.Hostname(),
err,
)
} }
err = tsClient.WaitForReady() err = tsClient.WaitForNeedsLogin()
if err != nil { if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err) return fmt.Errorf(
log.Printf("failed to wait for tailscaled: %s", err) "failed to wait for tailscaled (%s) to need login: %w",
tsClient.Hostname(),
err,
)
} }
user.Clients[tsClient.Hostname()] = tsClient user.Clients[tsClient.Hostname()] = tsClient
}()
return nil
})
}
if err := user.createWaitGroup.Wait(); err != nil {
return err
} }
user.createWaitGroup.Wait()
return nil return nil
} }
return fmt.Errorf("failed to add tailscale node: %w", errNoUserAvailable) return fmt.Errorf("failed to add tailscale nodes: %w", errNoUserAvailable)
} }
func (s *EmbeddedDERPServerScenario) Shutdown() error { func (s *EmbeddedDERPServerScenario) Shutdown() {
for _, network := range s.tsicNetworks { for _, network := range s.tsicNetworks {
err := s.pool.RemoveNetwork(network) err := s.pool.RemoveNetwork(network)
if err != nil { if err != nil {
return err log.Printf("failed to remove DERP network %s", network.Network.Name)
} }
} }
return s.Scenario.Shutdown() s.Scenario.Shutdown()
} }

View file

@ -21,9 +21,8 @@ func TestPingAllByIP(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
@ -31,24 +30,16 @@ func TestPingAllByIP(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -56,11 +47,6 @@ func TestPingAllByIP(t *testing.T) {
success := pingAllHelper(t, allClients, allAddrs) success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestAuthKeyLogoutAndRelogin(t *testing.T) { func TestAuthKeyLogoutAndRelogin(t *testing.T) {
@ -68,9 +54,8 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
@ -78,25 +63,19 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
clientIPs := make(map[TailscaleClient][]netip.Addr) clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients { for _, client := range allClients {
ips, err := client.IPs() ips, err := client.IPs()
if err != nil { if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
} }
clientIPs[client] = ips clientIPs[client] = ips
} }
@ -104,45 +83,38 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
err := client.Logout() err := client.Logout()
if err != nil { if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err) t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
} }
} }
scenario.WaitForTailscaleLogout() err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)
t.Logf("all clients logged out") t.Logf("all clients logged out")
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
if err != nil { assertNoErrGetHeadscale(t, err)
t.Errorf("failed to get headscale server: %s", err)
}
for userName := range spec { for userName := range spec {
key, err := scenario.CreatePreAuthKey(userName, true, false) key, err := scenario.CreatePreAuthKey(userName, true, false)
if err != nil { if err != nil {
t.Errorf("failed to create pre-auth key for user %s: %s", userName, err) t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
} }
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey()) err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil { if err != nil {
t.Errorf("failed to run tailscale up for user %s: %s", userName, err) t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
} }
} }
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allClients, err = scenario.ListTailscaleClients() allClients, err = scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -154,12 +126,12 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
ips, err := client.IPs() ips, err := client.IPs()
if err != nil { if err != nil {
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err) t.Fatalf("failed to get IPs for client %s: %s", client.Hostname(), err)
} }
// lets check if the IPs are the same // lets check if the IPs are the same
if len(ips) != len(clientIPs[client]) { if len(ips) != len(clientIPs[client]) {
t.Errorf("IPs changed for client %s", client.Hostname()) t.Fatalf("IPs changed for client %s", client.Hostname())
} }
for _, ip := range ips { for _, ip := range ips {
@ -173,7 +145,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
} }
if !found { if !found {
t.Errorf( t.Fatalf(
"IPs changed for client %s. Used to be %v now %v", "IPs changed for client %s. Used to be %v now %v",
client.Hostname(), client.Hostname(),
clientIPs[client], clientIPs[client],
@ -182,13 +154,6 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
} }
} }
} }
t.Logf("all clients IPs are the same")
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestEphemeral(t *testing.T) { func TestEphemeral(t *testing.T) {
@ -196,9 +161,8 @@ func TestEphemeral(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
@ -206,46 +170,38 @@ func TestEphemeral(t *testing.T) {
} }
headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral")) headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
for userName, clientCount := range spec { for userName, clientCount := range spec {
err = scenario.CreateUser(userName) err = scenario.CreateUser(userName)
if err != nil { if err != nil {
t.Errorf("failed to create user %s: %s", userName, err) t.Fatalf("failed to create user %s: %s", userName, err)
} }
err = scenario.CreateTailscaleNodesInUser(userName, "all", clientCount, []tsic.Option{}...) err = scenario.CreateTailscaleNodesInUser(userName, "all", clientCount, []tsic.Option{}...)
if err != nil { if err != nil {
t.Errorf("failed to create tailscale nodes in user %s: %s", userName, err) t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err)
} }
key, err := scenario.CreatePreAuthKey(userName, true, true) key, err := scenario.CreatePreAuthKey(userName, true, true)
if err != nil { if err != nil {
t.Errorf("failed to create pre-auth key for user %s: %s", userName, err) t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err)
} }
err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey()) err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey())
if err != nil { if err != nil {
t.Errorf("failed to run tailscale up for user %s: %s", userName, err) t.Fatalf("failed to run tailscale up for user %s: %s", userName, err)
} }
} }
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -257,11 +213,12 @@ func TestEphemeral(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
err := client.Logout() err := client.Logout()
if err != nil { if err != nil {
t.Errorf("failed to logout client %s: %s", client.Hostname(), err) t.Fatalf("failed to logout client %s: %s", client.Hostname(), err)
} }
} }
scenario.WaitForTailscaleLogout() err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)
t.Logf("all clients logged out") t.Logf("all clients logged out")
@ -277,14 +234,9 @@ func TestEphemeral(t *testing.T) {
} }
if len(machines) != 0 { if len(machines) != 0 {
t.Errorf("expected no machines, got %d in user %s", len(machines), userName) t.Fatalf("expected no machines, got %d in user %s", len(machines), userName)
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestPingAllByHostname(t *testing.T) { func TestPingAllByHostname(t *testing.T) {
@ -292,9 +244,8 @@ func TestPingAllByHostname(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
// Omit 1.16.2 (-1) because it does not have the FQDN field // Omit 1.16.2 (-1) because it does not have the FQDN field
@ -303,33 +254,20 @@ func TestPingAllByHostname(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allHostnames, err := scenario.ListTailscaleClientsFQDNs() allHostnames, err := scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
success := pingAllHelper(t, allClients, allHostnames) success := pingAllHelper(t, allClients, allHostnames)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients)) t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
// If subtests are parallel, then they will start before setup is run. // If subtests are parallel, then they will start before setup is run.
@ -354,9 +292,8 @@ func TestTaildrop(t *testing.T) {
} }
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
// Omit 1.16.2 (-1) because it does not have the FQDN field // Omit 1.16.2 (-1) because it does not have the FQDN field
@ -364,31 +301,23 @@ func TestTaildrop(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
// This will essentially fetch and cache all the FQDNs // This will essentially fetch and cache all the FQDNs
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients { for _, client := range allClients {
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())} command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())}
if _, _, err := client.Execute(command); err != nil { if _, _, err := client.Execute(command); err != nil {
t.Errorf("failed to create taildrop file on %s, err: %s", client.Hostname(), err) t.Fatalf("failed to create taildrop file on %s, err: %s", client.Hostname(), err)
} }
for _, peer := range allClients { for _, peer := range allClients {
@ -417,7 +346,7 @@ func TestTaildrop(t *testing.T) {
return err return err
}) })
if err != nil { if err != nil {
t.Errorf( t.Fatalf(
"failed to send taildrop file on %s, err: %s", "failed to send taildrop file on %s, err: %s",
client.Hostname(), client.Hostname(),
err, err,
@ -434,7 +363,7 @@ func TestTaildrop(t *testing.T) {
"/tmp/", "/tmp/",
} }
if _, _, err := client.Execute(command); err != nil { if _, _, err := client.Execute(command); err != nil {
t.Errorf("failed to get taildrop file on %s, err: %s", client.Hostname(), err) t.Fatalf("failed to get taildrop file on %s, err: %s", client.Hostname(), err)
} }
for _, peer := range allClients { for _, peer := range allClients {
@ -454,13 +383,11 @@ func TestTaildrop(t *testing.T) {
) )
result, _, err := client.Execute(command) result, _, err := client.Execute(command)
if err != nil { assertNoErrf(t, "failed to execute command to ls taildrop: %s", err)
t.Errorf("failed to execute command to ls taildrop: %s", err)
}
log.Printf("Result for %s: %s\n", peer.Hostname(), result) log.Printf("Result for %s: %s\n", peer.Hostname(), result)
if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result { if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result {
t.Errorf( t.Fatalf(
"taildrop result is not correct %s, wanted %s", "taildrop result is not correct %s, wanted %s",
result, result,
fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()), fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()),
@ -469,11 +396,6 @@ func TestTaildrop(t *testing.T) {
}) })
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestResolveMagicDNS(t *testing.T) { func TestResolveMagicDNS(t *testing.T) {
@ -481,9 +403,8 @@ func TestResolveMagicDNS(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
// Omit 1.16.2 (-1) because it does not have the FQDN field // Omit 1.16.2 (-1) because it does not have the FQDN field
@ -492,30 +413,20 @@ func TestResolveMagicDNS(t *testing.T) {
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
// Poor mans cache // Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
_, err = scenario.ListTailscaleClientsIPs() _, err = scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get IPs: %s", err)
}
for _, client := range allClients { for _, client := range allClients {
for _, peer := range allClients { for _, peer := range allClients {
@ -528,7 +439,7 @@ func TestResolveMagicDNS(t *testing.T) {
} }
result, _, err := client.Execute(command) result, _, err := client.Execute(command)
if err != nil { if err != nil {
t.Errorf( t.Fatalf(
"failed to execute resolve/ip command %s from %s: %s", "failed to execute resolve/ip command %s from %s: %s",
peerFQDN, peerFQDN,
client.Hostname(), client.Hostname(),
@ -538,7 +449,7 @@ func TestResolveMagicDNS(t *testing.T) {
ips, err := peer.IPs() ips, err := peer.IPs()
if err != nil { if err != nil {
t.Errorf( t.Fatalf(
"failed to get ips for %s: %s", "failed to get ips for %s: %s",
peer.Hostname(), peer.Hostname(),
err, err,
@ -547,16 +458,11 @@ func TestResolveMagicDNS(t *testing.T) {
for _, ip := range ips { for _, ip := range ips {
if !strings.Contains(result, ip.String()) { if !strings.Contains(result, ip.String()) {
t.Errorf("ip %s is not found in \n%s\n", ip.String(), result) t.Fatalf("ip %s is not found in \n%s\n", ip.String(), result)
} }
} }
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestExpireNode(t *testing.T) { func TestExpireNode(t *testing.T) {
@ -564,33 +470,24 @@ func TestExpireNode(t *testing.T) {
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions), "user1": len(TailscaleVersions),
} }
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("expirenode")) err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("expirenode"))
if err != nil { assertNoErrHeadscaleEnv(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs() allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil { assertNoErrListClientIPs(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String() return x.String()
@ -601,25 +498,25 @@ func TestExpireNode(t *testing.T) {
for _, client := range allClients { for _, client := range allClients {
status, err := client.Status() status, err := client.Status()
assert.NoError(t, err) assertNoErr(t, err)
// Assert that we have the original count - self // Assert that we have the original count - self
assert.Len(t, status.Peers(), len(TailscaleVersions)-1) assert.Len(t, status.Peers(), len(TailscaleVersions)-1)
} }
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
assert.NoError(t, err) assertNoErr(t, err)
// TODO(kradalby): This is Headscale specific and would not play nicely // TODO(kradalby): This is Headscale specific and would not play nicely
// with other implementations of the ControlServer interface // with other implementations of the ControlServer interface
result, err := headscale.Execute([]string{ result, err := headscale.Execute([]string{
"headscale", "nodes", "expire", "--identifier", "0", "--output", "json", "headscale", "nodes", "expire", "--identifier", "0", "--output", "json",
}) })
assert.NoError(t, err) assertNoErr(t, err)
var machine v1.Machine var machine v1.Machine
err = json.Unmarshal([]byte(result), &machine) err = json.Unmarshal([]byte(result), &machine)
assert.NoError(t, err) assertNoErr(t, err)
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
@ -627,7 +524,7 @@ func TestExpireNode(t *testing.T) {
// of connected nodes. // of connected nodes.
for _, client := range allClients { for _, client := range allClients {
status, err := client.Status() status, err := client.Status()
assert.NoError(t, err) assertNoErr(t, err)
for _, peerKey := range status.Peers() { for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey] peerStatus := status.Peer[peerKey]
@ -642,9 +539,4 @@ func TestExpireNode(t *testing.T) {
assert.Len(t, status.Peers(), len(TailscaleVersions)-2) assert.Len(t, status.Peers(), len(TailscaleVersions)-2)
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }

View file

@ -428,9 +428,9 @@ func (t *HeadscaleInContainer) GetHostname() string {
return t.hostname return t.hostname
} }
// WaitForReady blocks until the Headscale instance is ready to // WaitForRunning blocks until the Headscale instance is ready to
// serve clients. // serve clients.
func (t *HeadscaleInContainer) WaitForReady() error { func (t *HeadscaleInContainer) WaitForRunning() error {
url := t.GetHealthEndpoint() url := t.GetHealthEndpoint()
log.Printf("waiting for headscale to be ready at %s", url) log.Printf("waiting for headscale to be ready at %s", url)

View file

@ -16,6 +16,7 @@ import (
"github.com/juanfont/headscale/integration/tsic" "github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3" "github.com/ory/dockertest/v3"
"github.com/puzpuzpuz/xsync/v2" "github.com/puzpuzpuz/xsync/v2"
"golang.org/x/sync/errgroup"
) )
const ( const (
@ -33,30 +34,33 @@ var (
tailscaleVersions2021 = []string{ tailscaleVersions2021 = []string{
"head", "head",
"unstable", "unstable",
"1.40.0", "1.48",
"1.38.4", "1.46",
"1.36.2", "1.44",
"1.34.2", "1.42",
"1.32.3", "1.40",
"1.30.2", "1.38",
"1.36",
"1.34",
"1.32",
"1.30",
} }
tailscaleVersions2019 = []string{ tailscaleVersions2019 = []string{
"1.28.0", "1.28",
"1.26.2", "1.26",
"1.24.2", "1.24",
"1.22.2", "1.22",
"1.20.4", "1.20",
"1.18",
} }
// tailscaleVersionsUnavailable = []string{ // tailscaleVersionsUnavailable = []string{
// // These versions seem to fail when fetching from apt. // // These versions seem to fail when fetching from apt.
// "1.18.2", // "1.14.6",
// "1.16.2", // "1.12.4",
// "1.14.6", // "1.10.2",
// "1.12.4", // "1.8.7",
// "1.10.2",
// "1.8.7",
// }. // }.
// TailscaleVersions represents a list of Tailscale versions the suite // TailscaleVersions represents a list of Tailscale versions the suite
@ -79,9 +83,9 @@ var (
type User struct { type User struct {
Clients map[string]TailscaleClient Clients map[string]TailscaleClient
createWaitGroup sync.WaitGroup createWaitGroup errgroup.Group
joinWaitGroup sync.WaitGroup joinWaitGroup errgroup.Group
syncWaitGroup sync.WaitGroup syncWaitGroup errgroup.Group
} }
// Scenario is a representation of an environment with one ControlServer and // Scenario is a representation of an environment with one ControlServer and
@ -148,7 +152,7 @@ func NewScenario() (*Scenario, error) {
// and networks associated with it. // and networks associated with it.
// In addition, it will save the logs of the ControlServer to `/tmp/control` in the // In addition, it will save the logs of the ControlServer to `/tmp/control` in the
// environment running the tests. // environment running the tests.
func (s *Scenario) Shutdown() error { func (s *Scenario) Shutdown() {
s.controlServers.Range(func(_ string, control ControlServer) bool { s.controlServers.Range(func(_ string, control ControlServer) bool {
err := control.Shutdown() err := control.Shutdown()
if err != nil { if err != nil {
@ -166,21 +170,19 @@ func (s *Scenario) Shutdown() error {
log.Printf("removing client %s in user %s", client.Hostname(), userName) log.Printf("removing client %s in user %s", client.Hostname(), userName)
err := client.Shutdown() err := client.Shutdown()
if err != nil { if err != nil {
return fmt.Errorf("failed to tear down client: %w", err) log.Printf("failed to tear down client: %s", err)
} }
} }
} }
if err := s.pool.RemoveNetwork(s.network); err != nil { if err := s.pool.RemoveNetwork(s.network); err != nil {
return fmt.Errorf("failed to remove network: %w", err) log.Printf("failed to remove network: %s", err)
} }
// TODO(kradalby): This seem redundant to the previous call // TODO(kradalby): This seem redundant to the previous call
// if err := s.network.Close(); err != nil { // if err := s.network.Close(); err != nil {
// return fmt.Errorf("failed to tear down network: %w", err) // return fmt.Errorf("failed to tear down network: %w", err)
// } // }
return nil
} }
// Users returns the name of all users associated with the Scenario. // Users returns the name of all users associated with the Scenario.
@ -213,7 +215,7 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
return nil, fmt.Errorf("failed to create headscale container: %w", err) return nil, fmt.Errorf("failed to create headscale container: %w", err)
} }
err = headscale.WaitForReady() err = headscale.WaitForRunning()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed reach headscale container: %w", err) return nil, fmt.Errorf("failed reach headscale container: %w", err)
} }
@ -286,17 +288,12 @@ func (s *Scenario) CreateTailscaleNodesInUser(
cert := headscale.GetCert() cert := headscale.GetCert()
hostname := headscale.GetHostname() hostname := headscale.GetHostname()
user.createWaitGroup.Add(1)
opts = append(opts, opts = append(opts,
tsic.WithHeadscaleTLS(cert), tsic.WithHeadscaleTLS(cert),
tsic.WithHeadscaleName(hostname), tsic.WithHeadscaleName(hostname),
) )
go func() { user.createWaitGroup.Go(func() error {
defer user.createWaitGroup.Done()
// TODO(kradalby): error handle this
tsClient, err := tsic.New( tsClient, err := tsic.New(
s.pool, s.pool,
version, version,
@ -304,20 +301,30 @@ func (s *Scenario) CreateTailscaleNodesInUser(
opts..., opts...,
) )
if err != nil { if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err) return fmt.Errorf(
log.Printf("failed to create tailscale node: %s", err) "failed to create tailscale (%s) node: %w",
tsClient.Hostname(),
err,
)
} }
err = tsClient.WaitForReady() err = tsClient.WaitForNeedsLogin()
if err != nil { if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err) return fmt.Errorf(
log.Printf("failed to wait for tailscaled: %s", err) "failed to wait for tailscaled (%s) to need login: %w",
tsClient.Hostname(),
err,
)
} }
user.Clients[tsClient.Hostname()] = tsClient user.Clients[tsClient.Hostname()] = tsClient
}()
return nil
})
}
if err := user.createWaitGroup.Wait(); err != nil {
return err
} }
user.createWaitGroup.Wait()
return nil return nil
} }
@ -332,29 +339,20 @@ func (s *Scenario) RunTailscaleUp(
) error { ) error {
if user, ok := s.users[userStr]; ok { if user, ok := s.users[userStr]; ok {
for _, client := range user.Clients { for _, client := range user.Clients {
user.joinWaitGroup.Add(1) c := client
user.joinWaitGroup.Go(func() error {
go func(c TailscaleClient) { return c.Login(loginServer, authKey)
defer user.joinWaitGroup.Done() })
// TODO(kradalby): error handle this
_ = c.Up(loginServer, authKey)
}(client)
err := client.WaitForReady()
if err != nil {
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
}
} }
user.joinWaitGroup.Wait() if err := user.joinWaitGroup.Wait(); err != nil {
return err
}
for _, client := range user.Clients { for _, client := range user.Clients {
err := client.WaitForReady() err := client.WaitForRunning()
if err != nil { if err != nil {
log.Printf("client %s was not ready: %s", client.Hostname(), err) return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err)
return fmt.Errorf("failed to up tailscale node: %w", err)
} }
} }
@ -381,18 +379,22 @@ func (s *Scenario) CountTailscale() int {
func (s *Scenario) WaitForTailscaleSync() error { func (s *Scenario) WaitForTailscaleSync() error {
tsCount := s.CountTailscale() tsCount := s.CountTailscale()
return s.WaitForTailscaleSyncWithPeerCount(tsCount - 1)
}
// WaitForTailscaleSyncWithPeerCount blocks execution until all the TailscaleClient reports
// to have all other TailscaleClients present in their netmap.NetworkMap.
func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int) error {
for _, user := range s.users { for _, user := range s.users {
for _, client := range user.Clients { for _, client := range user.Clients {
user.syncWaitGroup.Add(1) c := client
user.syncWaitGroup.Go(func() error {
go func(c TailscaleClient) { return c.WaitForPeers(peerCount)
defer user.syncWaitGroup.Done() })
}
// TODO(kradalby): error handle this if err := user.syncWaitGroup.Wait(); err != nil {
_ = c.WaitForPeers(tsCount) return err
}(client)
} }
user.syncWaitGroup.Wait()
} }
return nil return nil
@ -555,18 +557,18 @@ func (s *Scenario) ListTailscaleClientsFQDNs(users ...string) ([]string, error)
// WaitForTailscaleLogout blocks execution until all TailscaleClients have // WaitForTailscaleLogout blocks execution until all TailscaleClients have
// logged out of the ControlServer. // logged out of the ControlServer.
func (s *Scenario) WaitForTailscaleLogout() { func (s *Scenario) WaitForTailscaleLogout() error {
for _, user := range s.users { for _, user := range s.users {
for _, client := range user.Clients { for _, client := range user.Clients {
user.syncWaitGroup.Add(1) c := client
user.syncWaitGroup.Go(func() error {
go func(c TailscaleClient) { return c.WaitForLogout()
defer user.syncWaitGroup.Done() })
}
// TODO(kradalby): error handle this if err := user.syncWaitGroup.Wait(); err != nil {
_ = c.WaitForLogout() return err
}(client)
} }
user.syncWaitGroup.Wait()
} }
return nil
} }

View file

@ -34,44 +34,38 @@ func TestHeadscale(t *testing.T) {
user := "test-space" user := "test-space"
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
t.Run("start-headscale", func(t *testing.T) { t.Run("start-headscale", func(t *testing.T) {
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
if err != nil { if err != nil {
t.Errorf("failed to create start headcale: %s", err) t.Fatalf("failed to create start headcale: %s", err)
} }
err = headscale.WaitForReady() err = headscale.WaitForRunning()
if err != nil { if err != nil {
t.Errorf("headscale failed to become ready: %s", err) t.Fatalf("headscale failed to become ready: %s", err)
} }
}) })
t.Run("create-user", func(t *testing.T) { t.Run("create-user", func(t *testing.T) {
err := scenario.CreateUser(user) err := scenario.CreateUser(user)
if err != nil { if err != nil {
t.Errorf("failed to create user: %s", err) t.Fatalf("failed to create user: %s", err)
} }
if _, ok := scenario.users[user]; !ok { if _, ok := scenario.users[user]; !ok {
t.Errorf("user is not in scenario") t.Fatalf("user is not in scenario")
} }
}) })
t.Run("create-auth-key", func(t *testing.T) { t.Run("create-auth-key", func(t *testing.T) {
_, err := scenario.CreatePreAuthKey(user, true, false) _, err := scenario.CreatePreAuthKey(user, true, false)
if err != nil { if err != nil {
t.Errorf("failed to create preauthkey: %s", err) t.Fatalf("failed to create preauthkey: %s", err)
} }
}) })
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
// If subtests are parallel, then they will start before setup is run. // If subtests are parallel, then they will start before setup is run.
@ -85,9 +79,8 @@ func TestCreateTailscale(t *testing.T) {
user := "only-create-containers" user := "only-create-containers"
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
scenario.users[user] = &User{ scenario.users[user] = &User{
Clients: make(map[string]TailscaleClient), Clients: make(map[string]TailscaleClient),
@ -96,20 +89,15 @@ func TestCreateTailscale(t *testing.T) {
t.Run("create-tailscale", func(t *testing.T) { t.Run("create-tailscale", func(t *testing.T) {
err := scenario.CreateTailscaleNodesInUser(user, "all", 3) err := scenario.CreateTailscaleNodesInUser(user, "all", 3)
if err != nil { if err != nil {
t.Errorf("failed to add tailscale nodes: %s", err) t.Fatalf("failed to add tailscale nodes: %s", err)
} }
if clients := len(scenario.users[user].Clients); clients != 3 { if clients := len(scenario.users[user].Clients); clients != 3 {
t.Errorf("wrong number of tailscale clients: %d != %d", clients, 3) t.Fatalf("wrong number of tailscale clients: %d != %d", clients, 3)
} }
// TODO(kradalby): Test "all" version logic // TODO(kradalby): Test "all" version logic
}) })
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
// If subtests are parallel, then they will start before setup is run. // If subtests are parallel, then they will start before setup is run.
@ -127,53 +115,52 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
count := 1 count := 1
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err) defer scenario.Shutdown()
}
t.Run("start-headscale", func(t *testing.T) { t.Run("start-headscale", func(t *testing.T) {
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
if err != nil { if err != nil {
t.Errorf("failed to create start headcale: %s", err) t.Fatalf("failed to create start headcale: %s", err)
} }
err = headscale.WaitForReady() err = headscale.WaitForRunning()
if err != nil { if err != nil {
t.Errorf("headscale failed to become ready: %s", err) t.Fatalf("headscale failed to become ready: %s", err)
} }
}) })
t.Run("create-user", func(t *testing.T) { t.Run("create-user", func(t *testing.T) {
err := scenario.CreateUser(user) err := scenario.CreateUser(user)
if err != nil { if err != nil {
t.Errorf("failed to create user: %s", err) t.Fatalf("failed to create user: %s", err)
} }
if _, ok := scenario.users[user]; !ok { if _, ok := scenario.users[user]; !ok {
t.Errorf("user is not in scenario") t.Fatalf("user is not in scenario")
} }
}) })
t.Run("create-tailscale", func(t *testing.T) { t.Run("create-tailscale", func(t *testing.T) {
err := scenario.CreateTailscaleNodesInUser(user, "1.30.2", count) err := scenario.CreateTailscaleNodesInUser(user, "1.30.2", count)
if err != nil { if err != nil {
t.Errorf("failed to add tailscale nodes: %s", err) t.Fatalf("failed to add tailscale nodes: %s", err)
} }
if clients := len(scenario.users[user].Clients); clients != count { if clients := len(scenario.users[user].Clients); clients != count {
t.Errorf("wrong number of tailscale clients: %d != %d", clients, count) t.Fatalf("wrong number of tailscale clients: %d != %d", clients, count)
} }
}) })
t.Run("join-headscale", func(t *testing.T) { t.Run("join-headscale", func(t *testing.T) {
key, err := scenario.CreatePreAuthKey(user, true, false) key, err := scenario.CreatePreAuthKey(user, true, false)
if err != nil { if err != nil {
t.Errorf("failed to create preauthkey: %s", err) t.Fatalf("failed to create preauthkey: %s", err)
} }
headscale, err := scenario.Headscale() headscale, err := scenario.Headscale()
if err != nil { if err != nil {
t.Errorf("failed to create start headcale: %s", err) t.Fatalf("failed to create start headcale: %s", err)
} }
err = scenario.RunTailscaleUp( err = scenario.RunTailscaleUp(
@ -182,23 +169,18 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
key.GetKey(), key.GetKey(),
) )
if err != nil { if err != nil {
t.Errorf("failed to login: %s", err) t.Fatalf("failed to login: %s", err)
} }
}) })
t.Run("get-ips", func(t *testing.T) { t.Run("get-ips", func(t *testing.T) {
ips, err := scenario.GetIPs(user) ips, err := scenario.GetIPs(user)
if err != nil { if err != nil {
t.Errorf("failed to get tailscale ips: %s", err) t.Fatalf("failed to get tailscale ips: %s", err)
} }
if len(ips) != count*2 { if len(ips) != count*2 {
t.Errorf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2) t.Fatalf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2)
} }
}) })
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }

View file

@ -41,65 +41,79 @@ var retry = func(times int, sleepInterval time.Duration,
return result, stderr, err return result, stderr, err
} }
func TestSSHOneUserAllToAll(t *testing.T) { func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario {
IntegrationSkip(t) t.Helper()
t.Parallel()
scenario, err := NewScenario() scenario, err := NewScenario()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{ spec := map[string]int{
"user1": len(TailscaleVersions) - 5, "user1": clientsPerUser,
"user2": clientsPerUser,
} }
err = scenario.CreateHeadscaleEnv(spec, err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()}, []tsic.Option{
hsic.WithACLPolicy( tsic.WithDockerEntrypoint([]string{
&policy.ACLPolicy{ "/bin/sh",
Groups: map[string][]string{ "-c",
"group:integration-test": {"user1"}, "/bin/sleep 3 ; apk add openssh ; update-ca-certificates ; tailscaled --tun=tsdev",
}, }),
ACLs: []policy.ACL{ tsic.WithDockerWorkdir("/"),
{ },
Action: "accept", hsic.WithACLPolicy(policy),
Sources: []string{"*"}, hsic.WithTestName("ssh"),
Destinations: []string{"*:*"},
},
},
SSHs: []policy.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
),
hsic.WithConfigEnv(map[string]string{ hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", "HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
}), }),
) )
if err != nil { assertNoErr(t, err)
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErr(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErr(t, err)
t.Errorf("failed to get FQDNs: %s", err)
} return scenario
}
func TestSSHOneUserAllToAll(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario := sshScenario(t,
&policy.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"user1"},
},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []policy.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
len(TailscaleVersions)-5,
)
defer scenario.Shutdown()
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
assertNoErrListFQDN(t, err)
for _, client := range allClients { for _, client := range allClients {
for _, peer := range allClients { for _, peer := range allClients {
@ -110,78 +124,48 @@ func TestSSHOneUserAllToAll(t *testing.T) {
assertSSHHostname(t, client, peer) assertSSHHostname(t, client, peer)
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestSSHMultipleUsersAllToAll(t *testing.T) { func TestSSHMultipleUsersAllToAll(t *testing.T) {
IntegrationSkip(t) IntegrationSkip(t)
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario := sshScenario(t,
if err != nil { &policy.ACLPolicy{
t.Errorf("failed to create scenario: %s", err) Groups: map[string][]string{
} "group:integration-test": {"user1", "user2"},
},
spec := map[string]int{ ACLs: []policy.ACL{
"user1": len(TailscaleVersions) - 5, {
"user2": len(TailscaleVersions) - 5, Action: "accept",
} Sources: []string{"*"},
Destinations: []string{"*:*"},
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&policy.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"user1", "user2"},
},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []policy.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
}, },
}, },
), SSHs: []policy.SSH{
hsic.WithConfigEnv(map[string]string{ {
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", Action: "accept",
}), Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
len(TailscaleVersions)-5,
) )
if err != nil { defer scenario.Shutdown()
t.Errorf("failed to create headscale environment: %s", err)
}
nsOneClients, err := scenario.ListTailscaleClients("user1") nsOneClients, err := scenario.ListTailscaleClients("user1")
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
nsTwoClients, err := scenario.ListTailscaleClients("user2") nsTwoClients, err := scenario.ListTailscaleClients("user2")
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
testInterUserSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) { testInterUserSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) {
for _, client := range sourceClients { for _, client := range sourceClients {
@ -193,66 +177,38 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) {
testInterUserSSH(nsOneClients, nsTwoClients) testInterUserSSH(nsOneClients, nsTwoClients)
testInterUserSSH(nsTwoClients, nsOneClients) testInterUserSSH(nsTwoClients, nsOneClients)
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestSSHNoSSHConfigured(t *testing.T) { func TestSSHNoSSHConfigured(t *testing.T) {
IntegrationSkip(t) IntegrationSkip(t)
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario := sshScenario(t,
if err != nil { &policy.ACLPolicy{
t.Errorf("failed to create scenario: %s", err) Groups: map[string][]string{
} "group:integration-test": {"user1"},
spec := map[string]int{
"user1": len(TailscaleVersions) - 5,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&policy.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"user1"},
},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []policy.SSH{},
}, },
), ACLs: []policy.ACL{
hsic.WithTestName("sshnoneconfigured"), {
hsic.WithConfigEnv(map[string]string{ Action: "accept",
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", Sources: []string{"*"},
}), Destinations: []string{"*:*"},
},
},
SSHs: []policy.SSH{},
},
len(TailscaleVersions)-5,
) )
if err != nil { defer scenario.Shutdown()
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients { for _, client := range allClients {
for _, peer := range allClients { for _, peer := range allClients {
@ -263,73 +219,45 @@ func TestSSHNoSSHConfigured(t *testing.T) {
assertSSHPermissionDenied(t, client, peer) assertSSHPermissionDenied(t, client, peer)
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestSSHIsBlockedInACL(t *testing.T) { func TestSSHIsBlockedInACL(t *testing.T) {
IntegrationSkip(t) IntegrationSkip(t)
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario := sshScenario(t,
if err != nil { &policy.ACLPolicy{
t.Errorf("failed to create scenario: %s", err) Groups: map[string][]string{
} "group:integration-test": {"user1"},
},
spec := map[string]int{ ACLs: []policy.ACL{
"user1": len(TailscaleVersions) - 5, {
} Action: "accept",
Sources: []string{"*"},
err = scenario.CreateHeadscaleEnv(spec, Destinations: []string{"*:80"},
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&policy.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"user1"},
},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:80"},
},
},
SSHs: []policy.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
}, },
}, },
), SSHs: []policy.SSH{
hsic.WithTestName("sshisblockedinacl"), {
hsic.WithConfigEnv(map[string]string{ Action: "accept",
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", Sources: []string{"group:integration-test"},
}), Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
len(TailscaleVersions)-5,
) )
if err != nil { defer scenario.Shutdown()
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients() allClients, err := scenario.ListTailscaleClients()
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients { for _, client := range allClients {
for _, peer := range allClients { for _, peer := range allClients {
@ -340,86 +268,55 @@ func TestSSHIsBlockedInACL(t *testing.T) {
assertSSHTimeout(t, client, peer) assertSSHTimeout(t, client, peer)
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func TestSSUserOnlyIsolation(t *testing.T) { func TestSSUserOnlyIsolation(t *testing.T) {
IntegrationSkip(t) IntegrationSkip(t)
t.Parallel() t.Parallel()
scenario, err := NewScenario() scenario := sshScenario(t,
if err != nil { &policy.ACLPolicy{
t.Errorf("failed to create scenario: %s", err) Groups: map[string][]string{
} "group:ssh1": {"user1"},
"group:ssh2": {"user2"},
spec := map[string]int{ },
"useracl1": len(TailscaleVersions) - 5, ACLs: []policy.ACL{
"useracl2": len(TailscaleVersions) - 5, {
} Action: "accept",
Sources: []string{"*"},
err = scenario.CreateHeadscaleEnv(spec, Destinations: []string{"*:*"},
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&policy.ACLPolicy{
Groups: map[string][]string{
"group:ssh1": {"useracl1"},
"group:ssh2": {"useracl2"},
},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []policy.SSH{
{
Action: "accept",
Sources: []string{"group:ssh1"},
Destinations: []string{"group:ssh1"},
Users: []string{"ssh-it-user"},
},
{
Action: "accept",
Sources: []string{"group:ssh2"},
Destinations: []string{"group:ssh2"},
Users: []string{"ssh-it-user"},
},
}, },
}, },
), SSHs: []policy.SSH{
hsic.WithTestName("sshtwouseraclblock"), {
hsic.WithConfigEnv(map[string]string{ Action: "accept",
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1", Sources: []string{"group:ssh1"},
}), Destinations: []string{"group:ssh1"},
Users: []string{"ssh-it-user"},
},
{
Action: "accept",
Sources: []string{"group:ssh2"},
Destinations: []string{"group:ssh2"},
Users: []string{"ssh-it-user"},
},
},
},
len(TailscaleVersions)-5,
) )
if err != nil { defer scenario.Shutdown()
t.Errorf("failed to create headscale environment: %s", err)
}
ssh1Clients, err := scenario.ListTailscaleClients("useracl1") ssh1Clients, err := scenario.ListTailscaleClients("user1")
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
ssh2Clients, err := scenario.ListTailscaleClients("useracl2") ssh2Clients, err := scenario.ListTailscaleClients("user2")
if err != nil { assertNoErrListClients(t, err)
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync() err = scenario.WaitForTailscaleSync()
if err != nil { assertNoErrSync(t, err)
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs() _, err = scenario.ListTailscaleClientsFQDNs()
if err != nil { assertNoErrListFQDN(t, err)
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range ssh1Clients { for _, client := range ssh1Clients {
for _, peer := range ssh2Clients { for _, peer := range ssh2Clients {
@ -460,11 +357,6 @@ func TestSSUserOnlyIsolation(t *testing.T) {
assertSSHHostname(t, client, peer) assertSSHHostname(t, client, peer)
} }
} }
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
} }
func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) { func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
@ -487,7 +379,7 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien
t.Helper() t.Helper()
result, _, err := doSSH(t, client, peer) result, _, err := doSSH(t, client, peer)
assert.NoError(t, err) assertNoErr(t, err)
assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", "")) assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
} }
@ -507,7 +399,7 @@ func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient
t.Helper() t.Helper()
result, stderr, err := doSSH(t, client, peer) result, stderr, err := doSSH(t, client, peer)
assert.NoError(t, err) assertNoErr(t, err)
assert.Empty(t, result) assert.Empty(t, result)

View file

@ -14,14 +14,18 @@ type TailscaleClient interface {
Hostname() string Hostname() string
Shutdown() error Shutdown() error
Version() string Version() string
Execute(command []string, options ...dockertestutil.ExecuteCommandOption) (string, string, error) Execute(
Up(loginServer, authKey string) error command []string,
UpWithLoginURL(loginServer string) (*url.URL, error) options ...dockertestutil.ExecuteCommandOption,
) (string, string, error)
Login(loginServer, authKey string) error
LoginWithURL(loginServer string) (*url.URL, error)
Logout() error Logout() error
IPs() ([]netip.Addr, error) IPs() ([]netip.Addr, error)
FQDN() (string, error) FQDN() (string, error)
Status() (*ipnstate.Status, error) Status() (*ipnstate.Status, error)
WaitForReady() error WaitForNeedsLogin() error
WaitForRunning() error
WaitForLogout() error WaitForLogout() error
WaitForPeers(expected int) error WaitForPeers(expected int) error
Ping(hostnameOrIP string, opts ...tsic.PingOption) error Ping(hostnameOrIP string, opts ...tsic.PingOption) error

View file

@ -34,9 +34,14 @@ var (
errTailscaleWrongPeerCount = errors.New("wrong peer count") errTailscaleWrongPeerCount = errors.New("wrong peer count")
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey") errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
errTailscaleNotConnected = errors.New("tailscale not connected") errTailscaleNotConnected = errors.New("tailscale not connected")
errTailscaledNotReadyForLogin = errors.New("tailscaled not ready for login")
errTailscaleNotLoggedOut = errors.New("tailscale not logged out") errTailscaleNotLoggedOut = errors.New("tailscale not logged out")
) )
func errTailscaleStatus(hostname string, err error) error {
return fmt.Errorf("%s failed to fetch tailscale status: %w", hostname, err)
}
// TailscaleInContainer is an implementation of TailscaleClient which // TailscaleInContainer is an implementation of TailscaleClient which
// sets up a Tailscale instance inside a container. // sets up a Tailscale instance inside a container.
type TailscaleInContainer struct { type TailscaleInContainer struct {
@ -165,7 +170,7 @@ func New(
network: network, network: network,
withEntrypoint: []string{ withEntrypoint: []string{
"/bin/bash", "/bin/sh",
"-c", "-c",
"/bin/sleep 3 ; update-ca-certificates ; tailscaled --tun=tsdev", "/bin/sleep 3 ; update-ca-certificates ; tailscaled --tun=tsdev",
}, },
@ -204,16 +209,48 @@ func New(
return nil, err return nil, err
} }
container, err := pool.BuildAndRunWithBuildOptions( var container *dockertest.Resource
createTailscaleBuildOptions(version), switch version {
tailscaleOptions, case "head":
dockertestutil.DockerRestartPolicy, buildOptions := &dockertest.BuildOptions{
dockertestutil.DockerAllowLocalIPv6, Dockerfile: "Dockerfile.tailscale-HEAD",
dockertestutil.DockerAllowNetworkAdministration, ContextDir: dockerContextPath,
) BuildArgs: []docker.BuildArg{},
}
container, err = pool.BuildAndRunWithBuildOptions(
buildOptions,
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
case "unstable":
tailscaleOptions.Repository = "tailscale/tailscale"
tailscaleOptions.Tag = version
container, err = pool.RunWithOptions(
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
default:
tailscaleOptions.Repository = "tailscale/tailscale"
tailscaleOptions.Tag = "v" + version
container, err = pool.RunWithOptions(
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
}
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"could not start tailscale container (version: %s): %w", "%s could not start tailscale container (version: %s): %w",
hostname,
version, version,
err, err,
) )
@ -270,7 +307,7 @@ func (t *TailscaleInContainer) Execute(
options..., options...,
) )
if err != nil { if err != nil {
log.Printf("command stderr: %s\n", stderr) // log.Printf("command stderr: %s\n", stderr)
if stdout != "" { if stdout != "" {
log.Printf("command stdout: %s\n", stdout) log.Printf("command stdout: %s\n", stdout)
@ -288,18 +325,15 @@ func (t *TailscaleInContainer) Execute(
// Up runs the login routine on the given Tailscale instance. // Up runs the login routine on the given Tailscale instance.
// This login mechanism uses the authorised key for authentication. // This login mechanism uses the authorised key for authentication.
func (t *TailscaleInContainer) Up( func (t *TailscaleInContainer) Login(
loginServer, authKey string, loginServer, authKey string,
) error { ) error {
command := []string{ command := []string{
"tailscale", "tailscale",
"up", "up",
"-login-server", "--login-server=" + loginServer,
loginServer, "--authkey=" + authKey,
"--authkey", "--hostname=" + t.hostname,
authKey,
"--hostname",
t.hostname,
} }
if t.withSSH { if t.withSSH {
@ -313,7 +347,12 @@ func (t *TailscaleInContainer) Up(
} }
if _, _, err := t.Execute(command); err != nil { if _, _, err := t.Execute(command); err != nil {
return fmt.Errorf("failed to join tailscale client: %w", err) return fmt.Errorf(
"%s failed to join tailscale client (%s): %w",
t.hostname,
strings.Join(command, " "),
err,
)
} }
return nil return nil
@ -321,16 +360,14 @@ func (t *TailscaleInContainer) Up(
// Up runs the login routine on the given Tailscale instance. // Up runs the login routine on the given Tailscale instance.
// This login mechanism uses web + command line flow for authentication. // This login mechanism uses web + command line flow for authentication.
func (t *TailscaleInContainer) UpWithLoginURL( func (t *TailscaleInContainer) LoginWithURL(
loginServer string, loginServer string,
) (*url.URL, error) { ) (*url.URL, error) {
command := []string{ command := []string{
"tailscale", "tailscale",
"up", "up",
"-login-server", "--login-server=" + loginServer,
loginServer, "--hostname=" + t.hostname,
"--hostname",
t.hostname,
} }
_, stderr, err := t.Execute(command) _, stderr, err := t.Execute(command)
@ -378,7 +415,7 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
result, _, err := t.Execute(command) result, _, err := t.Execute(command)
if err != nil { if err != nil {
return []netip.Addr{}, fmt.Errorf("failed to join tailscale client: %w", err) return []netip.Addr{}, fmt.Errorf("%s failed to join tailscale client: %w", t.hostname, err)
} }
for _, address := range strings.Split(result, "\n") { for _, address := range strings.Split(result, "\n") {
@ -432,19 +469,37 @@ func (t *TailscaleInContainer) FQDN() (string, error) {
return status.Self.DNSName, nil return status.Self.DNSName, nil
} }
// WaitForReady blocks until the Tailscale (tailscaled) instance is ready // WaitForNeedsLogin blocks until the Tailscale (tailscaled) instance has
// to login or be used. // started and needs to be logged into.
func (t *TailscaleInContainer) WaitForReady() error { func (t *TailscaleInContainer) WaitForNeedsLogin() error {
return t.pool.Retry(func() error { return t.pool.Retry(func() error {
status, err := t.Status() status, err := t.Status()
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch tailscale status: %w", err) return errTailscaleStatus(t.hostname, err)
} }
if status.CurrentTailnet != nil { // ipnstate.Status.CurrentTailnet was added in Tailscale 1.22.0
// https://github.com/tailscale/tailscale/pull/3865
//
// Before that, we can check the BackendState to see if the
// tailscaled daemon is connected to the control system.
if status.BackendState == "NeedsLogin" {
return nil return nil
} }
return errTailscaledNotReadyForLogin
})
}
// WaitForRunning blocks until the Tailscale (tailscaled) instance is logged in
// and ready to be used.
func (t *TailscaleInContainer) WaitForRunning() error {
return t.pool.Retry(func() error {
status, err := t.Status()
if err != nil {
return errTailscaleStatus(t.hostname, err)
}
// ipnstate.Status.CurrentTailnet was added in Tailscale 1.22.0 // ipnstate.Status.CurrentTailnet was added in Tailscale 1.22.0
// https://github.com/tailscale/tailscale/pull/3865 // https://github.com/tailscale/tailscale/pull/3865
// //
@ -460,10 +515,10 @@ func (t *TailscaleInContainer) WaitForReady() error {
// WaitForLogout blocks until the Tailscale instance has logged out. // WaitForLogout blocks until the Tailscale instance has logged out.
func (t *TailscaleInContainer) WaitForLogout() error { func (t *TailscaleInContainer) WaitForLogout() error {
return t.pool.Retry(func() error { return fmt.Errorf("%s err: %w", t.hostname, t.pool.Retry(func() error {
status, err := t.Status() status, err := t.Status()
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch tailscale status: %w", err) return errTailscaleStatus(t.hostname, err)
} }
if status.CurrentTailnet == nil { if status.CurrentTailnet == nil {
@ -471,7 +526,7 @@ func (t *TailscaleInContainer) WaitForLogout() error {
} }
return errTailscaleNotLoggedOut return errTailscaleNotLoggedOut
}) }))
} }
// WaitForPeers blocks until N number of peers is present in the // WaitForPeers blocks until N number of peers is present in the
@ -480,11 +535,17 @@ func (t *TailscaleInContainer) WaitForPeers(expected int) error {
return t.pool.Retry(func() error { return t.pool.Retry(func() error {
status, err := t.Status() status, err := t.Status()
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch tailscale status: %w", err) return errTailscaleStatus(t.hostname, err)
} }
if peers := status.Peers(); len(peers) != expected { if peers := status.Peers(); len(peers) != expected {
return errTailscaleWrongPeerCount return fmt.Errorf(
"%s err: %w expected %d, got %d",
t.hostname,
errTailscaleWrongPeerCount,
expected,
len(peers),
)
} }
return nil return nil
@ -683,47 +744,3 @@ func (t *TailscaleInContainer) Curl(url string, opts ...CurlOption) (string, err
func (t *TailscaleInContainer) WriteFile(path string, data []byte) error { func (t *TailscaleInContainer) WriteFile(path string, data []byte) error {
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data) return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
} }
func createTailscaleBuildOptions(version string) *dockertest.BuildOptions {
var tailscaleBuildOptions *dockertest.BuildOptions
switch version {
case "head":
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale-HEAD",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{},
}
case "unstable":
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: "*", // Installs the latest version https://askubuntu.com/a/824926
},
{
Name: "TAILSCALE_CHANNEL",
Value: "unstable",
},
},
}
default:
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: version,
},
{
Name: "TAILSCALE_CHANNEL",
Value: "stable",
},
},
}
}
return tailscaleBuildOptions
}

View file

@ -12,6 +12,53 @@ const (
derpPingCount = 10 derpPingCount = 10
) )
func assertNoErr(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "unexpected error: %s", err)
}
func assertNoErrf(t *testing.T, msg string, err error) {
t.Helper()
if err != nil {
t.Fatalf(msg, err)
}
}
func assertNoErrHeadscaleEnv(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to create headscale environment: %s", err)
}
func assertNoErrGetHeadscale(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to get headscale: %s", err)
}
func assertNoErrListClients(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to list clients: %s", err)
}
func assertNoErrListClientIPs(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to get client IPs: %s", err)
}
func assertNoErrSync(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to have all clients sync up: %s", err)
}
func assertNoErrListFQDN(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to list FQDNs: %s", err)
}
func assertNoErrLogout(t *testing.T, err error) {
t.Helper()
assertNoErrf(t, "failed to log out tailscale nodes: %s", err)
}
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int { func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
t.Helper() t.Helper()
success := 0 success := 0
@ -20,7 +67,7 @@ func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int
for _, addr := range addrs { for _, addr := range addrs {
err := client.Ping(addr) err := client.Ping(addr)
if err != nil { if err != nil {
t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err) t.Fatalf("failed to ping %s from %s: %s", addr, client.Hostname(), err)
} else { } else {
success++ success++
} }
@ -47,7 +94,7 @@ func pingDerpAllHelper(t *testing.T, clients []TailscaleClient, addrs []string)
tsic.WithPingUntilDirect(false), tsic.WithPingUntilDirect(false),
) )
if err != nil { if err != nil {
t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err) t.Fatalf("failed to ping %s from %s: %s", addr, client.Hostname(), err)
} else { } else {
success++ success++
} }