diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 00000000..614f851b --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,15 @@ +# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json +language: "en-GB" +early_access: false +reviews: + profile: "chill" + request_changes_workflow: false + high_level_summary: true + poem: true + review_status: true + collapse_walkthrough: false + auto_review: + enabled: true + drafts: true +chat: + auto_reply: true diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 8563e7af..00000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -name: "Bug report" -about: "Create a bug report to help us improve" -title: "" -labels: ["bug"] -assignees: "" ---- - - - -## Bug description - - - -## Environment - - - -- OS: -- Headscale version: -- Tailscale version: - - - -- [ ] Headscale is behind a (reverse) proxy -- [ ] Headscale runs in a container - -## To Reproduce - - - -## Logs and attachments - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 00000000..a7afb6d3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,83 @@ +name: 🐞 Bug +description: File a bug/issue +title: "[Bug] " +labels: ["bug", "needs triage"] +body: + - type: checkboxes + attributes: + label: Is this a support request? + description: This issue tracker is for bugs and feature requests only. If you need help, please use ask in our Discord community + options: + - label: This is not a support request + required: true + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Current Behavior + description: A concise description of what you're experiencing. + validations: + required: true + - type: textarea + attributes: + label: Expected Behavior + description: A concise description of what you expected to happen. + validations: + required: true + - type: textarea + attributes: + label: Steps To Reproduce + description: Steps to reproduce the behavior. + placeholder: | + 1. In this environment... + 1. With this config... + 1. Run '...' + 1. See error... + validations: + required: true + - type: textarea + attributes: + label: Environment + description: | + examples: + - **OS**: Ubuntu 20.04 + - **Headscale version**: 0.22.3 + - **Tailscale version**: 1.64.0 + value: | + - OS: + - Headscale version: + - Tailscale version: + render: markdown + validations: + required: true + - type: checkboxes + attributes: + label: Runtime environment + options: + - label: Headscale is behind a (reverse) proxy + required: false + - label: Headscale runs in a container + required: false + - type: textarea + attributes: + label: Anything else? + description: | + Links? References? Anything that will give us more context about the issue you are encountering! + + - Client netmap dump (see below) + - ACL configuration + - Headscale configuration + + Dump the netmap of tailscale clients: + `tailscale debug netmap > DESCRIPTIVE_NAME.json` + + Please provide information describing the netmap, which client, which headscale version etc. + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 92c51b8f..00000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -name: "Feature request" -about: "Suggest an idea for headscale" -title: "" -labels: ["enhancement"] -assignees: "" ---- - -<!-- -We typically have a clear roadmap for what we want to improve and reserve the right -to close feature requests that does not fit in the roadmap, or fit with the scope -of the project, or we actually want to implement ourselves. - -Headscale is a multinational community across the globe. Our language is English. -All bug reports needs to be in English. ---> - -## Why - -<!-- Include the reason, why you would need the feature. E.g. what problem - does it solve? Or which workflow is currently frustrating and will be improved by - this? --> - -## Description - -<!-- A clear and precise description of what new or changed feature you want. --> diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 00000000..70f1a146 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,36 @@ +name: 🚀 Feature Request +description: Suggest an idea for Headscale +title: "[Feature] <title>" +labels: [enhancement] +body: + - type: textarea + attributes: + label: Use case + description: Please describe the use case for this feature. + placeholder: | + <!-- Include the reason, why you would need the feature. E.g. what problem + does it solve? Or which workflow is currently frustrating and will be improved by + this? --> + validations: + required: true + - type: textarea + attributes: + label: Description + description: A clear and precise description of what new or changed feature you want. + validations: + required: true + - type: checkboxes + attributes: + label: Contribution + description: Are you willing to contribute to the implementation of this feature? + options: + - label: I can write the design doc for this feature + required: false + - label: I can contribute this feature + required: false + - type: textarea + attributes: + label: How can it be implemented? + description: Free text for your ideas on how this feature could be implemented. + validations: + required: false diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d4e4f4f9..9d8e731d 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -12,7 +12,7 @@ If you find mistakes in the documentation, please submit a fix to the documentat <!-- Please tick if the following things apply. You… --> -- [ ] read the [CONTRIBUTING guidelines](README.md#contributing) +- [ ] have read the [CONTRIBUTING.md](./CONTRIBUTING.md) file - [ ] raised a GitHub issue or discussed it on the projects chat beforehand - [ ] added unit tests - [ ] added integration tests diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9d4b9925..09c5cd34 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -16,31 +16,29 @@ jobs: build: runs-on: ubuntu-latest permissions: write-all - steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 2 - - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v34 + uses: dorny/paths-filter@v3 with: - files: | - *.nix - go.* - **/*.go - integration_test/ - config-example.yaml - + filters: | + files: + - '*.nix' + - 'go.*' + - '**/*.go' + - 'integration_test/' + - 'config-example.yaml' - uses: DeterminateSystems/nix-installer-action@main - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' - uses: DeterminateSystems/magic-nix-cache-action@main - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' - name: Run build id: build - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' run: | nix build |& tee build-result BUILD_STATUS="${PIPESTATUS[0]}" @@ -66,8 +64,8 @@ jobs: body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}' }) - - uses: actions/upload-artifact@v3 - if: steps.changed-files.outputs.any_changed == 'true' + - uses: actions/upload-artifact@v4 + if: steps.changed-files.outputs.files == 'true' with: name: headscale-linux path: result/bin/headscale diff --git a/.github/workflows/check-tests.yaml b/.github/workflows/check-tests.yaml index c085f178..b1b94532 100644 --- a/.github/workflows/check-tests.yaml +++ b/.github/workflows/check-tests.yaml @@ -15,22 +15,22 @@ jobs: fetch-depth: 2 - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v34 + uses: dorny/paths-filter@v3 with: - files: | - *.nix - go.* - **/*.go - integration_test/ - config-example.yaml - + filters: | + files: + - '*.nix' + - 'go.*' + - '**/*.go' + - 'integration_test/' + - 'config-example.yaml' - uses: DeterminateSystems/nix-installer-action@main - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' - uses: DeterminateSystems/magic-nix-cache-action@main - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' - name: Generate and check integration tests - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' run: | nix develop --command bash -c "cd cmd/gh-action-integration-generator/ && go generate" git diff --exit-code .github/workflows/test-integration.yaml diff --git a/.github/workflows/contributors.yml b/.github/workflows/contributors.yml deleted file mode 100644 index 4b05ffd2..00000000 --- a/.github/workflows/contributors.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Contributors - -on: - push: - branches: - - main - workflow_dispatch: -jobs: - add-contributors: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Delete upstream contributor branch - # Allow continue on failure to account for when the - # upstream branch is deleted or does not exist. - continue-on-error: true - run: git push origin --delete update-contributors - - name: Create up-to-date contributors branch - run: git checkout -B update-contributors - - name: Push empty contributors branch - run: git push origin update-contributors - - name: Switch back to main - run: git checkout main - - uses: BobAnkh/add-contributors@v0.2.2 - with: - CONTRIBUTOR: "## Contributors" - COLUMN_PER_ROW: "6" - ACCESS_TOKEN: ${{secrets.GITHUB_TOKEN}} - IMG_WIDTH: "100" - FONT_SIZE: "14" - PATH: "/README.md" - COMMIT_MESSAGE: "docs(README): update contributors" - AVATAR_SHAPE: "round" - BRANCH: "update-contributors" - PULL_REQUEST: "main" diff --git a/.github/workflows/docs-test.yml b/.github/workflows/docs-test.yml new file mode 100644 index 00000000..b0e60131 --- /dev/null +++ b/.github/workflows/docs-test.yml @@ -0,0 +1,27 @@ +name: Test documentation build + +on: [pull_request] + +concurrency: + group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install python + uses: actions/setup-python@v4 + with: + python-version: 3.x + - name: Setup cache + uses: actions/cache@v2 + with: + key: ${{ github.ref }} + path: .cache + - name: Setup dependencies + run: pip install -r docs/requirements.txt + - name: Build docs + run: mkdocs build --strict diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 1d19ed3d..c5cddef7 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,4 +1,5 @@ name: Build documentation + on: push: branches: @@ -15,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install python uses: actions/setup-python@v4 with: @@ -30,16 +31,22 @@ jobs: - name: Build docs run: mkdocs build --strict - name: Upload artifact - uses: actions/upload-pages-artifact@v1 + uses: actions/upload-pages-artifact@v3 with: path: ./site + deploy: environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} + permissions: + pages: write + id-token: write runs-on: ubuntu-latest needs: build steps: + - name: Configure Pages + uses: actions/configure-pages@v4 - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v1 + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/gh-actions-updater.yaml b/.github/workflows/gh-actions-updater.yaml index 6b44051a..48d0fabd 100644 --- a/.github/workflows/gh-actions-updater.yaml +++ b/.github/workflows/gh-actions-updater.yaml @@ -1,6 +1,5 @@ name: GitHub Actions Version Updater -# Controls when the action will run. on: schedule: # Automatically run on every Sunday @@ -11,13 +10,13 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: # [Required] Access token with `workflow` scope. token: ${{ secrets.WORKFLOW_SECRET }} - name: Run GitHub Actions Version Updater - uses: saadmk11/github-actions-version-updater@v0.7.1 + uses: saadmk11/github-actions-version-updater@v0.8.1 with: # [Required] Access token with `workflow` scope. token: ${{ secrets.WORKFLOW_SECRET }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ade5ffc0..94953fbc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,7 +1,6 @@ ---- name: Lint -on: [push, pull_request] +on: [pull_request] concurrency: group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} @@ -11,63 +10,64 @@ jobs: golangci-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 2 - - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v34 + uses: dorny/paths-filter@v3 with: - files: | - *.nix - go.* - **/*.go - integration_test/ - config-example.yaml - + filters: | + files: + - '*.nix' + - 'go.*' + - '**/*.go' + - 'integration_test/' + - 'config-example.yaml' - uses: DeterminateSystems/nix-installer-action@main + if: steps.changed-files.outputs.files == 'true' - uses: DeterminateSystems/magic-nix-cache-action@main + if: steps.changed-files.outputs.files == 'true' - name: golangci-lint - if: steps.changed-files.outputs.any_changed == 'true' - run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --out-format=github-actions . + if: steps.changed-files.outputs.files == 'true' + run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --out-format=colored-line-number prettier-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: fetch-depth: 2 - - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v14.1 + uses: dorny/paths-filter@v3 with: - files: | - *.nix - **/*.md - **/*.yml - **/*.yaml - **/*.ts - **/*.js - **/*.sass - **/*.css - **/*.scss - **/*.html - + filters: | + files: + - '*.nix' + - '**/*.md' + - '**/*.yml' + - '**/*.yaml' + - '**/*.ts' + - '**/*.js' + - '**/*.sass' + - '**/*.css' + - '**/*.scss' + - '**/*.html' - uses: DeterminateSystems/nix-installer-action@main + if: steps.changed-files.outputs.files == 'true' - uses: DeterminateSystems/magic-nix-cache-action@main + if: steps.changed-files.outputs.files == 'true' - name: Prettify code - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' run: nix develop --command -- prettier --no-error-on-unmatched-pattern --ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html} proto-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - + - uses: actions/checkout@v4 - uses: DeterminateSystems/nix-installer-action@main - uses: DeterminateSystems/magic-nix-cache-action@main diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7929ac56..3554677f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,18 +12,18 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GHCR - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c30571c4..592929cb 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,5 @@ name: Close inactive issues + on: schedule: - cron: "30 1 * * *" @@ -10,7 +11,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@v5 + - uses: actions/stale@v9 with: days-before-issue-stale: 90 days-before-issue-close: 7 @@ -19,4 +20,5 @@ jobs: close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." days-before-pr-stale: -1 days-before-pr-close: -1 + exempt-issue-labels: "no-stale-bot" repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test-integration.yaml b/.github/workflows/test-integration.yaml index bad05bce..d6c7eff2 100644 --- a/.github/workflows/test-integration.yaml +++ b/.github/workflows/test-integration.yaml @@ -18,6 +18,7 @@ jobs: - TestACLNamedHostsCanReachBySubnet - TestACLNamedHostsCanReach - TestACLDevice1CanAccessDevice2 + - TestPolicyUpdateWhileRunningWithCLIInDatabase - TestOIDCAuthenticationPingAll - TestOIDCExpireNodesBasedOnTokenExpiry - TestAuthWebFlowAuthenticationPingAll @@ -26,6 +27,7 @@ jobs: - TestPreAuthKeyCommand - TestPreAuthKeyCommandWithoutExpiry - TestPreAuthKeyCommandReusableEphemeral + - TestPreAuthKeyCorrectUserLoggedInCommand - TestApiKeyCommand - TestNodeTagCommand - TestNodeAdvertiseTagNoACLCommand @@ -34,19 +36,27 @@ jobs: - TestNodeExpireCommand - TestNodeRenameCommand - TestNodeMoveCommand + - TestPolicyCommand + - TestPolicyBrokenConfigCommand + - TestResolveMagicDNS + - TestValidateResolvConf - TestDERPServerScenario - TestPingAllByIP - TestPingAllByIPPublicDERP - TestAuthKeyLogoutAndRelogin - TestEphemeral + - TestEphemeralInAlternateTimezone + - TestEphemeral2006DeletedTooQuickly - TestPingAllByHostname - TestTaildrop - - TestResolveMagicDNS - TestExpireNode - - TestNodeOnlineLastSeenStatus + - TestNodeOnlineStatus + - TestPingAllByIPManyUpDown + - Test2118DeletingOnlineNodePanics - TestEnablingRoutes - TestHASubnetRouterFailover - TestEnableDisableAutoApprovedRoute + - TestAutoApprovedSubRoute2068 - TestSubnetRouteACL - TestHeadscale - TestCreateTailscale @@ -61,23 +71,27 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 2 - - uses: DeterminateSystems/nix-installer-action@main - - uses: DeterminateSystems/magic-nix-cache-action@main - - uses: satackey/action-docker-layer-caching@main - continue-on-error: true - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v34 + uses: dorny/paths-filter@v3 with: - files: | - *.nix - go.* - **/*.go - integration_test/ - config-example.yaml + filters: | + files: + - '*.nix' + - 'go.*' + - '**/*.go' + - 'integration_test/' + - 'config-example.yaml' + - uses: DeterminateSystems/nix-installer-action@main + if: steps.changed-files.outputs.files == 'true' + - uses: DeterminateSystems/magic-nix-cache-action@main + if: steps.changed-files.outputs.files == 'true' + - uses: satackey/action-docker-layer-caching@main + if: steps.changed-files.outputs.files == 'true' + continue-on-error: true - name: Run Integration Test uses: Wandalen/wretry.action@master - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' env: USE_POSTGRES: ${{ matrix.database == 'postgres' && '1' || '0' }} with: @@ -98,12 +112,12 @@ jobs: -parallel 1 \ -run "^${{ matrix.test }}$" - uses: actions/upload-artifact@v4 - if: always() && steps.changed-files.outputs.any_changed == 'true' + if: always() && steps.changed-files.outputs.files == 'true' with: name: ${{ matrix.test }}-${{matrix.database}}-logs path: "control_logs/*.log" - uses: actions/upload-artifact@v4 - if: always() && steps.changed-files.outputs.any_changed == 'true' + if: always() && steps.changed-files.outputs.files == 'true' with: name: ${{ matrix.test }}-${{matrix.database}}-pprof path: "control_logs/*.pprof.tar" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c2700d17..f4659332 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,26 +11,27 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 2 - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v34 + uses: dorny/paths-filter@v3 with: - files: | - *.nix - go.* - **/*.go - integration_test/ - config-example.yaml + filters: | + files: + - '*.nix' + - 'go.*' + - '**/*.go' + - 'integration_test/' + - 'config-example.yaml' - uses: DeterminateSystems/nix-installer-action@main - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' - uses: DeterminateSystems/magic-nix-cache-action@main - if: steps.changed-files.outputs.any_changed == 'true' + if: steps.changed-files.outputs.files == 'true' - name: Run tests - if: steps.changed-files.outputs.any_changed == 'true' - run: nix develop --check + if: steps.changed-files.outputs.files == 'true' + run: nix develop --command -- gotestsum diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 6fcea23e..c04bb9cc 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Nix uses: DeterminateSystems/nix-installer-action@main - name: Update flake.lock diff --git a/.gitignore b/.gitignore index f6e506bc..1662d7f2 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ dist/ /headscale config.json config.yaml +config*.yaml derp.yaml *.hujson *.key diff --git a/.golangci.yaml b/.golangci.yaml index 65a88511..cd41a4df 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -12,19 +12,13 @@ linters: disable: - depguard - - exhaustivestruct - revive - lll - - interfacer - - scopelint - - maligned - - golint - gofmt - gochecknoglobals - gochecknoinits - gocognit - funlen - - exhaustivestruct - tagliatelle - godox - ireturn @@ -34,13 +28,6 @@ linters: - musttag # causes issues with imported libs - depguard - # deprecated - - structcheck # replaced by unused - - ifshort # deprecated by the owner - - varcheck # replaced by unused - - nosnakecase # replaced by revive - - deadcode # replaced by unused - # We should strive to enable these: - wrapcheck - dupl diff --git a/.goreleaser.yml b/.goreleaser.yml index b1df31c7..4aabde4b 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,4 +1,5 @@ --- +version: 2 before: hooks: - go mod tidy -compat=1.22 @@ -135,7 +136,7 @@ kos: - id: ghcr-debug repository: ghcr.io/juanfont/headscale bare: true - base_image: "debian:12" + base_image: gcr.io/distroless/base-debian12:debug build: headscale main: ./cmd/headscale env: @@ -160,7 +161,7 @@ kos: - id: dockerhub-debug build: headscale - base_image: "debian:12" + base_image: gcr.io/distroless/base-debian12:debug repository: headscale/headscale bare: true platforms: @@ -184,7 +185,7 @@ kos: checksum: name_template: "checksums.txt" snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/.prettierignore b/.prettierignore index 146ae4dd..d455d02c 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1 +1,6 @@ .github/workflows/test-integration-v2* +docs/dns-records.md +docs/running-headscale-container.md +docs/running-headscale-linux-manual.md +docs/running-headscale-linux.md +docs/running-headscale-openbsd.md diff --git a/CHANGELOG.md b/CHANGELOG.md index c0186961..d9818217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,10 +26,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473) - Change the structure of database configuration, see [config-example.yaml](./config-example.yaml) for the new structure. [#1700](https://github.com/juanfont/headscale/pull/1700) - Old structure has been remove and the configuration _must_ be converted. - - Adds additional configuration for PostgreSQL for setting max open, idle conection and idle connection lifetime. + - Adds additional configuration for PostgreSQL for setting max open, idle connection and idle connection lifetime. - API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553) - Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611) - - The latest supported client is 1.38 + - The oldest supported client is 1.42 - Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564) - If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url. - Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611) @@ -39,6 +39,16 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - `/var/lib/headscale` and `/var/run/headscale` is no longer created automatically, see [container docs](./docs/running-headscale-container.md) - Prefixes are now defined per v4 and v6 range. [#1756](https://github.com/juanfont/headscale/pull/1756) - `ip_prefixes` option is now `prefixes.v4` and `prefixes.v6` + - `prefixes.allocation` can be set to assign IPs at `sequential` or `random`. [#1869](https://github.com/juanfont/headscale/pull/1869) +- MagicDNS domains no longer contain usernames []() + - This is in preperation to fix Headscales implementation of tags which currently does not correctly remove the link between a tagged device and a user. As tagged devices will not have a user, this will require a change to the DNS generation, removing the username, see [#1369](https://github.com/juanfont/headscale/issues/1369) for more information. + - `use_username_in_magic_dns` can be used to turn this behaviour on again, but note that this option _will be removed_ when tags are fixed. + - dns.base_domain can no longer be the same as (or part of) server_url. + - This option brings Headscales behaviour in line with Tailscale. +- YAML files are no longer supported for headscale policy. [#1792](https://github.com/juanfont/headscale/pull/1792) + - HuJSON is now the only supported format for policy. +- DNS configuration has been restructured [#2034](https://github.com/juanfont/headscale/pull/2034) + - Please review the new [config-example.yaml](./config-example.yaml) for the new structure. ### Changes @@ -53,6 +63,18 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259) - Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565) - Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702) +- Add command to backfill IP addresses for nodes missing IPs from configured prefixes. [#1869](https://github.com/juanfont/headscale/pull/1869) +- Log available update as warning [#1877](https://github.com/juanfont/headscale/pull/1877) +- Add `autogroup:internet` to Policy [#1917](https://github.com/juanfont/headscale/pull/1917) +- Restore foreign keys and add constraints [#1562](https://github.com/juanfont/headscale/pull/1562) +- Make registration page easier to use on mobile devices +- Make write-ahead-log default on and configurable for SQLite [#1985](https://github.com/juanfont/headscale/pull/1985) +- Add APIs for managing headscale policy. [#1792](https://github.com/juanfont/headscale/pull/1792) +- Fix for registering nodes using preauthkeys when running on a postgres database in a non-UTC timezone. [#764](https://github.com/juanfont/headscale/issues/764) +- Make sure integration tests cover postgres for all scenarios +- CLI commands (all except `serve`) only requires minimal configuration, no more errors or warnings from unset settings [#2109](https://github.com/juanfont/headscale/pull/2109) +- CLI results are now concistently sent to stdout and errors to stderr [#2109](https://github.com/juanfont/headscale/pull/2109) +- Fix issue where shutting down headscale would hang [#2113](https://github.com/juanfont/headscale/pull/2113) ## 0.22.3 (2023-05-12) @@ -65,7 +87,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ ### Changes - Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382) - - Profiles are continously generated in our integration tests. + - Profiles are continuously generated in our integration tests. - Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391) - Improvements on Noise implementation [#1379](https://github.com/juanfont/headscale/pull/1379) - Replace node filter logic, ensuring nodes with access can see eachother [#1381](https://github.com/juanfont/headscale/pull/1381) @@ -156,7 +178,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - SSH ACLs status: - Support `accept` and `check` (SSH can be enabled and used for connecting and authentication) - Rejecting connections **are not supported**, meaning that if you enable SSH, then assume that _all_ `ssh` connections **will be allowed**. - - If you decied to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients. + - If you decided to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients. - We are currently improving our testing of the SSH ACLs, help us get an overview by testing and giving feedback. - This feature should be considered dangerous and it is disabled by default. Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`. @@ -206,7 +228,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ ### Changes - Updated dependencies (including the library that lacked armhf support) [#722](https://github.com/juanfont/headscale/pull/722) -- Fix missing group expansion in function `excludeCorretlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563) +- Fix missing group expansion in function `excludeCorrectlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563) - Improve registration protocol implementation and switch to NodeKey as main identifier [#725](https://github.com/juanfont/headscale/pull/725) - Add ability to connect to PostgreSQL via unix socket [#734](https://github.com/juanfont/headscale/pull/734) @@ -226,7 +248,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Fix send on closed channel crash in polling [#542](https://github.com/juanfont/headscale/pull/542) - Fixed spurious calls to setLastStateChangeToNow from ephemeral nodes [#566](https://github.com/juanfont/headscale/pull/566) - Add command for moving nodes between namespaces [#362](https://github.com/juanfont/headscale/issues/362) -- Added more configuration parameters for OpenID Connect (scopes, free-form paramters, domain and user allowlist) +- Added more configuration parameters for OpenID Connect (scopes, free-form parameters, domain and user allowlist) - Add command to set tags on a node [#525](https://github.com/juanfont/headscale/issues/525) - Add command to view tags of nodes [#356](https://github.com/juanfont/headscale/issues/356) - Add --all (-a) flag to enable routes command [#360](https://github.com/juanfont/headscale/issues/360) @@ -274,10 +296,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Fix a bug were the same IP could be assigned to multiple hosts if joined in quick succession [#346](https://github.com/juanfont/headscale/pull/346) - Simplify the code behind registration of machines [#366](https://github.com/juanfont/headscale/pull/366) - - Nodes are now only written to database if they are registrated successfully + - Nodes are now only written to database if they are registered successfully - Fix a limitation in the ACLs that prevented users to write rules with `*` as source [#374](https://github.com/juanfont/headscale/issues/374) - Reduce the overhead of marshal/unmarshal for Hostinfo, routes and endpoints by using specific types in Machine [#371](https://github.com/juanfont/headscale/pull/371) -- Apply normalization function to FQDN on hostnames when hosts registers and retrieve informations [#363](https://github.com/juanfont/headscale/issues/363) +- Apply normalization function to FQDN on hostnames when hosts registers and retrieve information [#363](https://github.com/juanfont/headscale/issues/363) - Fix a bug that prevented the use of `tailscale logout` with OIDC [#508](https://github.com/juanfont/headscale/issues/508) - Added Tailscale repo HEAD and unstable releases channel to the integration tests targets [#513](https://github.com/juanfont/headscale/pull/513) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..4c3ca130 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributing + +Headscale is "Open Source, acknowledged contribution", this means that any contribution will have to be discussed with the maintainers before being added to the project. +This model has been chosen to reduce the risk of burnout by limiting the maintenance overhead of reviewing and validating third-party code. + +## Why do we have this model? + +Headscale has a small maintainer team that tries to balance working on the project, fixing bugs and reviewing contributions. + +When we work on issues ourselves, we develop first hand knowledge of the code and it makes it possible for us to maintain and own the code as the project develops. + +Code contributions are seen as a positive thing. People enjoy and engage with our project, but it also comes with some challenges; we have to understand the code, we have to understand the feature, we might have to become familiar with external libraries or services and we think about security implications. All those steps are required during the reviewing process. After the code has been merged, the feature has to be maintained. Any changes reliant on external services must be updated and expanded accordingly. + +The review and day-1 maintenance adds a significant burden on the maintainers. Often we hope that the contributor will help out, but we found that most of the time, they disappear after their new feature was added. + +This means that when someone contributes, we are mostly happy about it, but we do have to run it through a series of checks to establish if we actually can maintain this feature. + +## What do we require? + +A general description is provided here and an explicit list is provided in our pull request template. + +All new features have to start out with a design document, which should be discussed on the issue tracker (not discord). It should include a use case for the feature, how it can be implemented, who will implement it and a plan for maintaining it. + +All features have to be end-to-end tested (integration tests) and have good unit test coverage to ensure that they work as expected. This will also ensure that the feature continues to work as expected over time. If a change cannot be tested, a strong case for why this is not possible needs to be presented. + +The contributor should help to maintain the feature over time. In case the feature is not maintained probably, the maintainers reserve themselves the right to remove features they redeem as unmaintainable. This should help to improve the quality of the software and keep it in a maintainable state. + +## Bug fixes + +Headscale is open to code contributions for bug fixes without discussion. + +## Documentation + +If you find mistakes in the documentation, please submit a fix to the documentation. diff --git a/Dockerfile.debug b/Dockerfile.debug index 659ae4cc..e5066060 100644 --- a/Dockerfile.debug +++ b/Dockerfile.debug @@ -2,31 +2,24 @@ # and are in no way endorsed by Headscale's maintainers as an # official nor supported release or distribution. -FROM docker.io/golang:1.22-bookworm AS build +FROM docker.io/golang:1.23-bookworm ARG VERSION=dev ENV GOPATH /go WORKDIR /go/src/headscale -COPY go.mod go.sum /go/src/headscale/ -RUN go mod download - -COPY . . - -RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale -RUN test -e /go/bin/headscale - -# Debug image -FROM docker.io/golang:1.22-bookworm - -COPY --from=build /go/bin/headscale /bin/headscale -ENV TZ UTC - RUN apt-get update \ && apt-get install --no-install-recommends --yes less jq \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean RUN mkdir -p /var/run/headscale +COPY go.mod go.sum /go/src/headscale/ +RUN go mod download + +COPY . . + +RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale && test -e /go/bin/headscale + # Need to reset the entrypoint or everything will run as a busybox script ENTRYPOINT [] EXPOSE 8080/tcp diff --git a/Dockerfile.tailscale-HEAD b/Dockerfile.tailscale-HEAD index 83ff9fe5..92b0cae5 100644 --- a/Dockerfile.tailscale-HEAD +++ b/Dockerfile.tailscale-HEAD @@ -1,21 +1,43 @@ -# This Dockerfile and the images produced are for testing headscale, -# and are in no way endorsed by Headscale's maintainers as an -# official nor supported release or distribution. +# Copyright (c) Tailscale Inc & AUTHORS +# SPDX-License-Identifier: BSD-3-Clause -FROM golang:latest +# This Dockerfile is more or less lifted from tailscale/tailscale +# to ensure a similar build process when testing the HEAD of tailscale. -RUN apt-get update \ - && apt-get install -y dnsutils git iptables ssh ca-certificates \ - && rm -rf /var/lib/apt/lists/* +FROM golang:1.23-alpine AS build-env -RUN useradd --shell=/bin/bash --create-home ssh-it-user +WORKDIR /go/src +RUN apk add --no-cache git + +# Replace `RUN git...` with `COPY` and a local checked out version of Tailscale in `./tailscale` +# to test specific commits of the Tailscale client. This is useful when trying to find out why +# something specific broke between two versions of Tailscale with for example `git bisect`. +# COPY ./tailscale . RUN git clone https://github.com/tailscale/tailscale.git -WORKDIR /go/tailscale +WORKDIR /go/src/tailscale -RUN git checkout main \ - && sh build_dist.sh tailscale.com/cmd/tailscale \ - && sh build_dist.sh tailscale.com/cmd/tailscaled \ - && cp tailscale /usr/local/bin/ \ - && cp tailscaled /usr/local/bin/ + +# see build_docker.sh +ARG VERSION_LONG="" +ENV VERSION_LONG=$VERSION_LONG +ARG VERSION_SHORT="" +ENV VERSION_SHORT=$VERSION_SHORT +ARG VERSION_GIT_HASH="" +ENV VERSION_GIT_HASH=$VERSION_GIT_HASH +ARG TARGETARCH + +RUN GOARCH=$TARGETARCH go install -ldflags="\ + -X tailscale.com/version.longStamp=$VERSION_LONG \ + -X tailscale.com/version.shortStamp=$VERSION_SHORT \ + -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ + -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot + +FROM alpine:3.18 +RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl + +COPY --from=build-env /go/bin/* /usr/local/bin/ +# For compat with the previous run.sh, although ideally you should be +# using build_docker.sh which sets an entrypoint for the image. +RUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh diff --git a/Makefile b/Makefile index 442690ed..719393f5 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ test_integration: --name headscale-test-suite \ -v $$PWD:$$PWD -w $$PWD/integration \ -v /var/run/docker.sock:/var/run/docker.sock \ + -v $$PWD/control_logs:/tmp/control \ golang:1 \ go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8 diff --git a/README.md b/README.md index 457e56ff..ff44e8e4 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,6 @@ buttons available in the repo. - Taildrop (File Sharing) - [Access control lists](https://tailscale.com/kb/1018/acls/) - [MagicDNS](https://tailscale.com/kb/1081/magicdns) -- Support for multiple IP ranges in the tailnet - Dual stack (IPv4 and IPv6) - Routing advertising (including exit nodes) - Ephemeral nodes @@ -63,15 +62,15 @@ buttons available in the repo. ## Client OS support -| OS | Supports headscale | -| ------- | --------------------------------------------------------- | -| Linux | Yes | -| OpenBSD | Yes | -| FreeBSD | Yes | -| macOS | Yes (see `/apple` on your headscale for more information) | -| Windows | Yes [docs](./docs/windows-client.md) | -| Android | Yes [docs](./docs/android-client.md) | -| iOS | Yes [docs](./docs/iOS-client.md) | +| OS | Supports headscale | +| ------- | -------------------------------------------------------------------------------------------------- | +| Linux | Yes | +| OpenBSD | Yes | +| FreeBSD | Yes | +| Windows | Yes (see [docs](./docs/windows-client.md) and `/windows` on your headscale for more information) | +| Android | Yes (see [docs](./docs/android-client.md)) | +| macOS | Yes (see [docs](./docs/apple-client.md#macos) and `/apple` on your headscale for more information) | +| iOS | Yes (see [docs](./docs/apple-client.md#ios) and `/apple` on your headscale for more information) | ## Running headscale @@ -87,24 +86,19 @@ Please have a look at the [`documentation`](https://headscale.net/). ## Disclaimer -1. This project is not associated with Tailscale Inc. -2. The purpose of Headscale is maintaining a working, self-hosted Tailscale control panel. +This project is not associated with Tailscale Inc. + +However, one of the active maintainers for Headscale [is employed by Tailscale](https://tailscale.com/blog/opensource) and he is allowed to spend work hours contributing to the project. Contributions from this maintainer are reviewed by other maintainers. + +The maintainers work together on setting the direction for the project. The underlying principle is to serve the community of self-hosters, enthusiasts and hobbyists - while having a sustainable project. ## Contributing -Headscale is "Open Source, acknowledged contribution", this means that any -contribution will have to be discussed with the Maintainers before being submitted. - -This model has been chosen to reduce the risk of burnout by limiting the -maintenance overhead of reviewing and validating third-party code. - -Headscale is open to code contributions for bug fixes without discussion. - -If you find mistakes in the documentation, please submit a fix to the documentation. +Please read the [CONTRIBUTING.md](./CONTRIBUTING.md) file. ### Requirements -To contribute to headscale you would need the lastest version of [Go](https://golang.org) +To contribute to headscale you would need the latest version of [Go](https://golang.org) and [Buf](https://buf.build)(Protobuf generator). We recommend using [Nix](https://nixos.org/) to setup a development environment. This can @@ -172,938 +166,8 @@ make build ## Contributors -<table> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kradalby> - <img src=https://avatars.githubusercontent.com/u/98431?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kristoffer Dalby/> - <br /> - <sub style="font-size:14px"><b>Kristoffer Dalby</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/juanfont> - <img src=https://avatars.githubusercontent.com/u/181059?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Juan Font/> - <br /> - <sub style="font-size:14px"><b>Juan Font</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/restanrm> - <img src=https://avatars.githubusercontent.com/u/4344371?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Adrien Raffin-Caboisse/> - <br /> - <sub style="font-size:14px"><b>Adrien Raffin-Caboisse</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/cure> - <img src=https://avatars.githubusercontent.com/u/149135?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ward Vandewege/> - <br /> - <sub style="font-size:14px"><b>Ward Vandewege</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/huskyii> - <img src=https://avatars.githubusercontent.com/u/5499746?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jiang Zhu/> - <br /> - <sub style="font-size:14px"><b>Jiang Zhu</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/tsujamin> - <img src=https://avatars.githubusercontent.com/u/2435619?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Benjamin Roberts/> - <br /> - <sub style="font-size:14px"><b>Benjamin Roberts</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/reynico> - <img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/> - <br /> - <sub style="font-size:14px"><b>Nico</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/evenh> - <img src=https://avatars.githubusercontent.com/u/2701536?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Even Holthe/> - <br /> - <sub style="font-size:14px"><b>Even Holthe</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/e-zk> - <img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/> - <br /> - <sub style="font-size:14px"><b>e-zk</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ImpostorKeanu> - <img src=https://avatars.githubusercontent.com/u/11574161?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Justin Angel/> - <br /> - <sub style="font-size:14px"><b>Justin Angel</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ItalyPaleAle> - <img src=https://avatars.githubusercontent.com/u/43508?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Alessandro (Ale) Segala/> - <br /> - <sub style="font-size:14px"><b>Alessandro (Ale) Segala</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ohdearaugustin> - <img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/> - <br /> - <sub style="font-size:14px"><b>ohdearaugustin</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mpldr> - <img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/> - <br /> - <sub style="font-size:14px"><b>Moritz Poldrack</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Orhideous> - <img src=https://avatars.githubusercontent.com/u/2265184?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andriy Kushnir/> - <br /> - <sub style="font-size:14px"><b>Andriy Kushnir</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/GrigoriyMikhalkin> - <img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/> - <br /> - <sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/christian-heusel> - <img src=https://avatars.githubusercontent.com/u/26827864?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Christian Heusel/> - <br /> - <sub style="font-size:14px"><b>Christian Heusel</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mike-lloyd03> - <img src=https://avatars.githubusercontent.com/u/49411532?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mike Lloyd/> - <br /> - <sub style="font-size:14px"><b>Mike Lloyd</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/iSchluff> - <img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/> - <br /> - <sub style="font-size:14px"><b>Anton Schubert</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Niek> - <img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/> - <br /> - <sub style="font-size:14px"><b>Niek van der Maas</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/negbie> - <img src=https://avatars.githubusercontent.com/u/20154956?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Eugen Biegler/> - <br /> - <sub style="font-size:14px"><b>Eugen Biegler</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/617a7a> - <img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/> - <br /> - <sub style="font-size:14px"><b>Azz</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/qbit> - <img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/> - <br /> - <sub style="font-size:14px"><b>Aaron Bieber</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kazauwa> - <img src=https://avatars.githubusercontent.com/u/12330159?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Igor Perepilitsyn/> - <br /> - <sub style="font-size:14px"><b>Igor Perepilitsyn</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Aluxima> - <img src=https://avatars.githubusercontent.com/u/16262531?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Laurent Marchaud/> - <br /> - <sub style="font-size:14px"><b>Laurent Marchaud</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/majst01> - <img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/> - <br /> - <sub style="font-size:14px"><b>Stefan Majer</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fdelucchijr> - <img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/> - <br /> - <sub style="font-size:14px"><b>Fernando De Lucchi</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/OrvilleQ> - <img src=https://avatars.githubusercontent.com/u/21377465?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Orville Q. Song/> - <br /> - <sub style="font-size:14px"><b>Orville Q. Song</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/hdhoang> - <img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/> - <br /> - <sub style="font-size:14px"><b>hdhoang</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/bravechamp> - <img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/> - <br /> - <sub style="font-size:14px"><b>bravechamp</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/deonthomasgy> - <img src=https://avatars.githubusercontent.com/u/150036?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Deon Thomas/> - <br /> - <sub style="font-size:14px"><b>Deon Thomas</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/madjam002> - <img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/> - <br /> - <sub style="font-size:14px"><b>Jamie Greeff</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jonathanspw> - <img src=https://avatars.githubusercontent.com/u/8390543?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan Wright/> - <br /> - <sub style="font-size:14px"><b>Jonathan Wright</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ChibangLW> - <img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/> - <br /> - <sub style="font-size:14px"><b>ChibangLW</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/majabojarska> - <img src=https://avatars.githubusercontent.com/u/33836570?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Maja Bojarska/> - <br /> - <sub style="font-size:14px"><b>Maja Bojarska</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mevansam> - <img src=https://avatars.githubusercontent.com/u/403630?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mevan Samaratunga/> - <br /> - <sub style="font-size:14px"><b>Mevan Samaratunga</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/dragetd> - <img src=https://avatars.githubusercontent.com/u/3639577?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael G./> - <br /> - <sub style="font-size:14px"><b>Michael G.</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ptman> - <img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/> - <br /> - <sub style="font-size:14px"><b>Paul Tötterman</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/samson4649> - <img src=https://avatars.githubusercontent.com/u/12725953?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Samuel Lock/> - <br /> - <sub style="font-size:14px"><b>Samuel Lock</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/loprima-l> - <img src=https://avatars.githubusercontent.com/u/69201633?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=loprima-l/> - <br /> - <sub style="font-size:14px"><b>loprima-l</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/unreality> - <img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/> - <br /> - <sub style="font-size:14px"><b>unreality</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/vsychov> - <img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/> - <br /> - <sub style="font-size:14px"><b>MichaelKo</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kevin1sMe> - <img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/> - <br /> - <sub style="font-size:14px"><b>kevinlin</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/QZAiXH> - <img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/> - <br /> - <sub style="font-size:14px"><b>Snack</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/artemklevtsov> - <img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/> - <br /> - <sub style="font-size:14px"><b>Artem Klevtsov</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/cmars> - <img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/> - <br /> - <sub style="font-size:14px"><b>Casey Marshall</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/dbevacqua> - <img src=https://avatars.githubusercontent.com/u/6534306?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dbevacqua/> - <br /> - <sub style="font-size:14px"><b>dbevacqua</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/joshuataylor> - <img src=https://avatars.githubusercontent.com/u/225131?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Josh Taylor/> - <br /> - <sub style="font-size:14px"><b>Josh Taylor</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/CNLHC> - <img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LIU HANCHENG/> - <br /> - <sub style="font-size:14px"><b>LIU HANCHENG</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/motiejus> - <img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/> - <br /> - <sub style="font-size:14px"><b>Motiejus Jakštys</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pvinis> - <img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/> - <br /> - <sub style="font-size:14px"><b>Pavlos Vinieratos</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/SilverBut> - <img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/> - <br /> - <sub style="font-size:14px"><b>Silver Bullet</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/snh> - <img src=https://avatars.githubusercontent.com/u/2051768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Steven Honson/> - <br /> - <sub style="font-size:14px"><b>Steven Honson</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ratsclub> - <img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/> - <br /> - <sub style="font-size:14px"><b>Victor Freire</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/qzydustin> - <img src=https://avatars.githubusercontent.com/u/44362429?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhenyu Qi/> - <br /> - <sub style="font-size:14px"><b>Zhenyu Qi</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/t56k> - <img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/> - <br /> - <sub style="font-size:14px"><b>thomas</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/puzpuzpuz> - <img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/> - <br /> - <sub style="font-size:14px"><b>Andrei Pechkurov</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/linsomniac> - <img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/> - <br /> - <sub style="font-size:14px"><b>Sean Reifschneider</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/aberoham> - <img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/> - <br /> - <sub style="font-size:14px"><b>Abraham Ingersoll</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/iFargle> - <img src=https://avatars.githubusercontent.com/u/124551390?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Albert Copeland/> - <br /> - <sub style="font-size:14px"><b>Albert Copeland</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/theryecatcher> - <img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/> - <br /> - <sub style="font-size:14px"><b>Anoop Sundaresh</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/apognu> - <img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/> - <br /> - <sub style="font-size:14px"><b>Antoine POPINEAU</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/tony1661> - <img src=https://avatars.githubusercontent.com/u/5287266?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antonio Fernandez/> - <br /> - <sub style="font-size:14px"><b>Antonio Fernandez</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/aofei> - <img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/> - <br /> - <sub style="font-size:14px"><b>Aofei Sheng</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/arnarg> - <img src=https://avatars.githubusercontent.com/u/1291396?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arnar/> - <br /> - <sub style="font-size:14px"><b>Arnar</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/awoimbee> - <img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/> - <br /> - <sub style="font-size:14px"><b>Arthur Woimbée</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/avirut> - <img src=https://avatars.githubusercontent.com/u/27095602?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Avirut Mehta/> - <br /> - <sub style="font-size:14px"><b>Avirut Mehta</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/winterheart> - <img src=https://avatars.githubusercontent.com/u/81112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azamat H. Hackimov/> - <br /> - <sub style="font-size:14px"><b>Azamat H. Hackimov</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/stensonb> - <img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/> - <br /> - <sub style="font-size:14px"><b>Bryan Stenson</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/yangchuansheng> - <img src=https://avatars.githubusercontent.com/u/15308462?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt= Carson Yang/> - <br /> - <sub style="font-size:14px"><b> Carson Yang</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kundel> - <img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Darrell Kundel/> - <br /> - <sub style="font-size:14px"><b>Darrell Kundel</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fatih-acar> - <img src=https://avatars.githubusercontent.com/u/15028881?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=fatih-acar/> - <br /> - <sub style="font-size:14px"><b>fatih-acar</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fkr> - <img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/> - <br /> - <sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/felixonmars> - <img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/> - <br /> - <sub style="font-size:14px"><b>Felix Yan</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/gabe565> - <img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/> - <br /> - <sub style="font-size:14px"><b>Gabe Cook</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/JJGadgets> - <img src=https://avatars.githubusercontent.com/u/5709019?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JJGadgets/> - <br /> - <sub style="font-size:14px"><b>JJGadgets</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/hrtkpf> - <img src=https://avatars.githubusercontent.com/u/42646788?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hrtkpf/> - <br /> - <sub style="font-size:14px"><b>hrtkpf</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jessebot> - <img src=https://avatars.githubusercontent.com/u/2389292?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JesseBot/> - <br /> - <sub style="font-size:14px"><b>JesseBot</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jimt> - <img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/> - <br /> - <sub style="font-size:14px"><b>Jim Tittsler</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jsiebens> - <img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/> - <br /> - <sub style="font-size:14px"><b>Johan Siebens</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/johnae> - <img src=https://avatars.githubusercontent.com/u/28332?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=John Axel Eriksson/> - <br /> - <sub style="font-size:14px"><b>John Axel Eriksson</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ShadowJonathan> - <img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/> - <br /> - <sub style="font-size:14px"><b>Jonathan de Jong</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/JulienFloris> - <img src=https://avatars.githubusercontent.com/u/20380255?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Julien Zweverink/> - <br /> - <sub style="font-size:14px"><b>Julien Zweverink</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/win-t> - <img src=https://avatars.githubusercontent.com/u/1589120?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kurnia D Win/> - <br /> - <sub style="font-size:14px"><b>Kurnia D Win</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Lucalux> - <img src=https://avatars.githubusercontent.com/u/70356955?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Lucalux/> - <br /> - <sub style="font-size:14px"><b>Lucalux</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/foxtrot> - <img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/> - <br /> - <sub style="font-size:14px"><b>Marc</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mhameed> - <img src=https://avatars.githubusercontent.com/u/447017?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mesar Hameed/> - <br /> - <sub style="font-size:14px"><b>Mesar Hameed</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mikejsavage> - <img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/> - <br /> - <sub style="font-size:14px"><b>Michael Savage</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pkrivanec> - <img src=https://avatars.githubusercontent.com/u/25530641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Philipp Krivanec/> - <br /> - <sub style="font-size:14px"><b>Philipp Krivanec</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/piec> - <img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/> - <br /> - <sub style="font-size:14px"><b>Pierre Carru</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/donran> - <img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/> - <br /> - <sub style="font-size:14px"><b>Pontus N</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/nnsee> - <img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/> - <br /> - <sub style="font-size:14px"><b>Rasmus Moorats</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/rcursaru> - <img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/> - <br /> - <sub style="font-size:14px"><b>rcursaru</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/renovate-bot> - <img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/> - <br /> - <sub style="font-size:14px"><b>Mend Renovate</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ryanfowler> - <img src=https://avatars.githubusercontent.com/u/2668821?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ryan Fowler/> - <br /> - <sub style="font-size:14px"><b>Ryan Fowler</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/muzy> - <img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian/> - <br /> - <sub style="font-size:14px"><b>Sebastian</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/shaananc> - <img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/> - <br /> - <sub style="font-size:14px"><b>Shaanan Cohney</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/6ixfalls> - <img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/> - <br /> - <sub style="font-size:14px"><b>Six</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/stefanvanburen> - <img src=https://avatars.githubusercontent.com/u/622527?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan VanBuren/> - <br /> - <sub style="font-size:14px"><b>Stefan VanBuren</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/sophware> - <img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/> - <br /> - <sub style="font-size:14px"><b>sophware</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/m-tanner-dev0> - <img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/> - <br /> - <sub style="font-size:14px"><b>Tanner</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Teteros> - <img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/> - <br /> - <sub style="font-size:14px"><b>Teteros</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/gitter-badger> - <img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/> - <br /> - <sub style="font-size:14px"><b>The Gitter Badger</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/tianon> - <img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/> - <br /> - <sub style="font-size:14px"><b>Tianon Gravi</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/thetillhoff> - <img src=https://avatars.githubusercontent.com/u/25052289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Till Hoffmann/> - <br /> - <sub style="font-size:14px"><b>Till Hoffmann</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/woudsma> - <img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/> - <br /> - <sub style="font-size:14px"><b>Tjerk Woudsma</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/y0ngb1n> - <img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=杨斌 Aben/> - <br /> - <sub style="font-size:14px"><b>杨斌 Aben</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/sleepymole> - <img src=https://avatars.githubusercontent.com/u/17199941?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yujie Xia/> - <br /> - <sub style="font-size:14px"><b>Yujie Xia</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/newellz2> - <img src=https://avatars.githubusercontent.com/u/52436542?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zachary Newell/> - <br /> - <sub style="font-size:14px"><b>Zachary Newell</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/zekker6> - <img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/> - <br /> - <sub style="font-size:14px"><b>Zakhar Bessarab</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/zhzy0077> - <img src=https://avatars.githubusercontent.com/u/8717471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhiyuan Zheng/> - <br /> - <sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Bpazy> - <img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/> - <br /> - <sub style="font-size:14px"><b>Ziyuan Han</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/caelansar> - <img src=https://avatars.githubusercontent.com/u/31852257?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=caelansar/> - <br /> - <sub style="font-size:14px"><b>caelansar</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/derelm> - <img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/> - <br /> - <sub style="font-size:14px"><b>derelm</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/dnaq> - <img src=https://avatars.githubusercontent.com/u/1299717?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dnaq/> - <br /> - <sub style="font-size:14px"><b>dnaq</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/nning> - <img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/> - <br /> - <sub style="font-size:14px"><b>henning mueller</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ignoramous> - <img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/> - <br /> - <sub style="font-size:14px"><b>ignoramous</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jimyag> - <img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/> - <br /> - <sub style="font-size:14px"><b>jimyag</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/magichuihui> - <img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/> - <br /> - <sub style="font-size:14px"><b>suhelen</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/lion24> - <img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sharkonet/> - <br /> - <sub style="font-size:14px"><b>sharkonet</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ma6174> - <img src=https://avatars.githubusercontent.com/u/1449133?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ma6174/> - <br /> - <sub style="font-size:14px"><b>ma6174</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/manju-rn> - <img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/> - <br /> - <sub style="font-size:14px"><b>manju-rn</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/nicholas-yap> - <img src=https://avatars.githubusercontent.com/u/38109533?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=nicholas-yap/> - <br /> - <sub style="font-size:14px"><b>nicholas-yap</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pernila> - <img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/> - <br /> - <sub style="font-size:14px"><b>Tommi Pernila</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/phpmalik> - <img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/> - <br /> - <sub style="font-size:14px"><b>phpmalik</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Wakeful-Cloud> - <img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful Cloud/> - <br /> - <sub style="font-size:14px"><b>Wakeful Cloud</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/xpzouying> - <img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/> - <br /> - <sub style="font-size:14px"><b>zy</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/atorregrosa-smd> - <img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/> - <br /> - <sub style="font-size:14px"><b>Àlex Torregrosa</b></sub> - </a> - </td> -</tr> -</table> +<a href="https://github.com/juanfont/headscale/graphs/contributors"> + <img src="https://contrib.rocks/image?repo=juanfont/headscale" /> +</a> + +Made with [contrib.rocks](https://contrib.rocks). diff --git a/cmd/headscale/cli/api_key.go b/cmd/headscale/cli/api_key.go index 372ec390..bd839b7b 100644 --- a/cmd/headscale/cli/api_key.go +++ b/cmd/headscale/cli/api_key.go @@ -54,7 +54,7 @@ var listAPIKeys = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { output, _ := cmd.Flags().GetString("output") - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -67,14 +67,10 @@ var listAPIKeys = &cobra.Command{ fmt.Sprintf("Error getting the list of keys: %s", err), output, ) - - return } if output != "" { SuccessOutput(response.GetApiKeys(), "", output) - - return } tableData := pterm.TableData{ @@ -102,8 +98,6 @@ var listAPIKeys = &cobra.Command{ fmt.Sprintf("Failed to render pterm table: %s", err), output, ) - - return } }, } @@ -119,9 +113,6 @@ If you loose a key, create a new one and revoke (expire) the old one.`, Run: func(cmd *cobra.Command, args []string) { output, _ := cmd.Flags().GetString("output") - log.Trace(). - Msg("Preparing to create ApiKey") - request := &v1.CreateApiKeyRequest{} durationStr, _ := cmd.Flags().GetString("expiration") @@ -133,19 +124,13 @@ If you loose a key, create a new one and revoke (expire) the old one.`, fmt.Sprintf("Could not parse duration: %s\n", err), output, ) - - return } expiration := time.Now().UTC().Add(time.Duration(duration)) - log.Trace(). - Dur("expiration", time.Duration(duration)). - Msg("expiration has been set") - request.Expiration = timestamppb.New(expiration) - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -156,8 +141,6 @@ If you loose a key, create a new one and revoke (expire) the old one.`, fmt.Sprintf("Cannot create Api Key: %s\n", err), output, ) - - return } SuccessOutput(response.GetApiKey(), response.GetApiKey(), output) @@ -178,11 +161,9 @@ var expireAPIKeyCmd = &cobra.Command{ fmt.Sprintf("Error getting prefix from CLI flag: %s", err), output, ) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -197,8 +178,6 @@ var expireAPIKeyCmd = &cobra.Command{ fmt.Sprintf("Cannot expire Api Key: %s\n", err), output, ) - - return } SuccessOutput(response, "Key expired", output) @@ -219,11 +198,9 @@ var deleteAPIKeyCmd = &cobra.Command{ fmt.Sprintf("Error getting prefix from CLI flag: %s", err), output, ) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -238,8 +215,6 @@ var deleteAPIKeyCmd = &cobra.Command{ fmt.Sprintf("Cannot delete Api Key: %s\n", err), output, ) - - return } SuccessOutput(response, "Key deleted", output) diff --git a/cmd/headscale/cli/configtest.go b/cmd/headscale/cli/configtest.go index 72744a7b..d469885b 100644 --- a/cmd/headscale/cli/configtest.go +++ b/cmd/headscale/cli/configtest.go @@ -14,7 +14,7 @@ var configTestCmd = &cobra.Command{ Short: "Test the configuration.", Long: "Run a test of the configuration and exit.", Run: func(cmd *cobra.Command, args []string) { - _, err := getHeadscaleApp() + _, err := newHeadscaleServerWithConfig() if err != nil { log.Fatal().Caller().Err(err).Msg("Error initializing") } diff --git a/cmd/headscale/cli/debug.go b/cmd/headscale/cli/debug.go index 054fc07f..72cde32d 100644 --- a/cmd/headscale/cli/debug.go +++ b/cmd/headscale/cli/debug.go @@ -64,11 +64,9 @@ var createNodeCmd = &cobra.Command{ user, err := cmd.Flags().GetString("user") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -79,8 +77,6 @@ var createNodeCmd = &cobra.Command{ fmt.Sprintf("Error getting node from flag: %s", err), output, ) - - return } machineKey, err := cmd.Flags().GetString("key") @@ -90,8 +86,6 @@ var createNodeCmd = &cobra.Command{ fmt.Sprintf("Error getting key from flag: %s", err), output, ) - - return } var mkey key.MachinePublic @@ -102,8 +96,6 @@ var createNodeCmd = &cobra.Command{ fmt.Sprintf("Failed to parse machine key from flag: %s", err), output, ) - - return } routes, err := cmd.Flags().GetStringSlice("route") @@ -113,8 +105,6 @@ var createNodeCmd = &cobra.Command{ fmt.Sprintf("Error getting routes from flag: %s", err), output, ) - - return } request := &v1.DebugCreateNodeRequest{ @@ -131,8 +121,6 @@ var createNodeCmd = &cobra.Command{ fmt.Sprintf("Cannot create node: %s", status.Convert(err).Message()), output, ) - - return } SuccessOutput(response.GetNode(), "Node created", output) diff --git a/cmd/headscale/cli/nodes.go b/cmd/headscale/cli/nodes.go index ac996245..b9e97a33 100644 --- a/cmd/headscale/cli/nodes.go +++ b/cmd/headscale/cli/nodes.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "net/netip" + "slices" "strconv" "strings" "time" @@ -97,6 +98,8 @@ func init() { tagCmd.Flags(). StringSliceP("tags", "t", []string{}, "List of tags to add to the node") nodeCmd.AddCommand(tagCmd) + + nodeCmd.AddCommand(backfillNodeIPsCmd) } var nodeCmd = &cobra.Command{ @@ -113,11 +116,9 @@ var registerNodeCmd = &cobra.Command{ user, err := cmd.Flags().GetString("user") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -128,8 +129,6 @@ var registerNodeCmd = &cobra.Command{ fmt.Sprintf("Error getting node key from flag: %s", err), output, ) - - return } request := &v1.RegisterNodeRequest{ @@ -147,8 +146,6 @@ var registerNodeCmd = &cobra.Command{ ), output, ) - - return } SuccessOutput( @@ -166,17 +163,13 @@ var listNodesCmd = &cobra.Command{ user, err := cmd.Flags().GetString("user") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output) - - return } showTags, err := cmd.Flags().GetBool("tags") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -191,21 +184,15 @@ var listNodesCmd = &cobra.Command{ fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response.GetNodes(), "", output) - - return } tableData, err := nodesToPtables(user, showTags, response.GetNodes()) if err != nil { ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output) - - return } err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render() @@ -215,8 +202,6 @@ var listNodesCmd = &cobra.Command{ fmt.Sprintf("Failed to render pterm table: %s", err), output, ) - - return } }, } @@ -240,7 +225,7 @@ var expireNodeCmd = &cobra.Command{ return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -283,7 +268,7 @@ var renameNodeCmd = &cobra.Command{ return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -332,7 +317,7 @@ var deleteNodeCmd = &cobra.Command{ return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -432,7 +417,7 @@ var moveNodeCmd = &cobra.Command{ return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -477,6 +462,57 @@ var moveNodeCmd = &cobra.Command{ }, } +var backfillNodeIPsCmd = &cobra.Command{ + Use: "backfillips", + Short: "Backfill IPs missing from nodes", + Long: ` +Backfill IPs can be used to add/remove IPs from nodes +based on the current configuration of Headscale. + +If there are nodes that does not have IPv4 or IPv6 +even if prefixes for both are configured in the config, +this command can be used to assign IPs of the sort to +all nodes that are missing. + +If you remove IPv4 or IPv6 prefixes from the config, +it can be run to remove the IPs that should no longer +be assigned to nodes.`, + Run: func(cmd *cobra.Command, args []string) { + var err error + output, _ := cmd.Flags().GetString("output") + + confirm := false + prompt := &survey.Confirm{ + Message: "Are you sure that you want to assign/remove IPs to/from nodes?", + } + err = survey.AskOne(prompt, &confirm) + if err != nil { + return + } + if confirm { + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() + defer cancel() + defer conn.Close() + + changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm}) + if err != nil { + ErrorOutput( + err, + fmt.Sprintf( + "Error backfilling IPs: %s", + status.Convert(err).Message(), + ), + output, + ) + + return + } + + SuccessOutput(changes, "Node IPs backfilled successfully", output) + } + }, +} + func nodesToPtables( currentUser string, showTags bool, @@ -564,14 +600,14 @@ func nodesToPtables( forcedTags = strings.TrimLeft(forcedTags, ",") var invalidTags string for _, tag := range node.GetInvalidTags() { - if !contains(node.GetForcedTags(), tag) { + if !slices.Contains(node.GetForcedTags(), tag) { invalidTags += "," + pterm.LightRed(tag) } } invalidTags = strings.TrimLeft(invalidTags, ",") var validTags string for _, tag := range node.GetValidTags() { - if !contains(node.GetForcedTags(), tag) { + if !slices.Contains(node.GetForcedTags(), tag) { validTags += "," + pterm.LightGreen(tag) } } @@ -627,7 +663,7 @@ var tagCmd = &cobra.Command{ Aliases: []string{"tags", "t"}, Run: func(cmd *cobra.Command, args []string) { output, _ := cmd.Flags().GetString("output") - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() diff --git a/cmd/headscale/cli/policy.go b/cmd/headscale/cli/policy.go new file mode 100644 index 00000000..d1349b5a --- /dev/null +++ b/cmd/headscale/cli/policy.go @@ -0,0 +1,87 @@ +package cli + +import ( + "fmt" + "io" + "os" + + v1 "github.com/juanfont/headscale/gen/go/headscale/v1" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(policyCmd) + policyCmd.AddCommand(getPolicy) + + setPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format") + if err := setPolicy.MarkFlagRequired("file"); err != nil { + log.Fatal().Err(err).Msg("") + } + policyCmd.AddCommand(setPolicy) +} + +var policyCmd = &cobra.Command{ + Use: "policy", + Short: "Manage the Headscale ACL Policy", +} + +var getPolicy = &cobra.Command{ + Use: "get", + Short: "Print the current ACL Policy", + Aliases: []string{"show", "view", "fetch"}, + Run: func(cmd *cobra.Command, args []string) { + output, _ := cmd.Flags().GetString("output") + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() + defer cancel() + defer conn.Close() + + request := &v1.GetPolicyRequest{} + + response, err := client.GetPolicy(ctx, request) + if err != nil { + ErrorOutput(err, fmt.Sprintf("Failed loading ACL Policy: %s", err), output) + } + + // TODO(pallabpain): Maybe print this better? + // This does not pass output as we dont support yaml, json or json-line + // output for this command. It is HuJSON already. + SuccessOutput("", response.GetPolicy(), "") + }, +} + +var setPolicy = &cobra.Command{ + Use: "set", + Short: "Updates the ACL Policy", + Long: ` + Updates the existing ACL Policy with the provided policy. The policy must be a valid HuJSON object. + This command only works when the acl.policy_mode is set to "db", and the policy will be stored in the database.`, + Aliases: []string{"put", "update"}, + Run: func(cmd *cobra.Command, args []string) { + output, _ := cmd.Flags().GetString("output") + policyPath, _ := cmd.Flags().GetString("file") + + f, err := os.Open(policyPath) + if err != nil { + ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output) + } + defer f.Close() + + policyBytes, err := io.ReadAll(f) + if err != nil { + ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output) + } + + request := &v1.SetPolicyRequest{Policy: string(policyBytes)} + + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() + defer cancel() + defer conn.Close() + + if _, err := client.SetPolicy(ctx, request); err != nil { + ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output) + } + + SuccessOutput(nil, "Policy updated.", "") + }, +} diff --git a/cmd/headscale/cli/preauthkeys.go b/cmd/headscale/cli/preauthkeys.go index cc3b1b76..0074e029 100644 --- a/cmd/headscale/cli/preauthkeys.go +++ b/cmd/headscale/cli/preauthkeys.go @@ -60,11 +60,9 @@ var listPreAuthKeys = &cobra.Command{ user, err := cmd.Flags().GetString("user") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -85,8 +83,6 @@ var listPreAuthKeys = &cobra.Command{ if output != "" { SuccessOutput(response.GetPreAuthKeys(), "", output) - - return } tableData := pterm.TableData{ @@ -134,8 +130,6 @@ var listPreAuthKeys = &cobra.Command{ fmt.Sprintf("Failed to render pterm table: %s", err), output, ) - - return } }, } @@ -150,20 +144,12 @@ var createPreAuthKeyCmd = &cobra.Command{ user, err := cmd.Flags().GetString("user") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output) - - return } reusable, _ := cmd.Flags().GetBool("reusable") ephemeral, _ := cmd.Flags().GetBool("ephemeral") tags, _ := cmd.Flags().GetStringSlice("tags") - log.Trace(). - Bool("reusable", reusable). - Bool("ephemeral", ephemeral). - Str("user", user). - Msg("Preparing to create preauthkey") - request := &v1.CreatePreAuthKeyRequest{ User: user, Reusable: reusable, @@ -180,8 +166,6 @@ var createPreAuthKeyCmd = &cobra.Command{ fmt.Sprintf("Could not parse duration: %s\n", err), output, ) - - return } expiration := time.Now().UTC().Add(time.Duration(duration)) @@ -192,7 +176,7 @@ var createPreAuthKeyCmd = &cobra.Command{ request.Expiration = timestamppb.New(expiration) - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -203,8 +187,6 @@ var createPreAuthKeyCmd = &cobra.Command{ fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err), output, ) - - return } SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output) @@ -227,11 +209,9 @@ var expirePreAuthKeyCmd = &cobra.Command{ user, err := cmd.Flags().GetString("user") if err != nil { ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -247,8 +227,6 @@ var expirePreAuthKeyCmd = &cobra.Command{ fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err), output, ) - - return } SuccessOutput(response, "Key expired", output) diff --git a/cmd/headscale/cli/root.go b/cmd/headscale/cli/root.go index 40a9b18a..7bac79ce 100644 --- a/cmd/headscale/cli/root.go +++ b/cmd/headscale/cli/root.go @@ -9,6 +9,7 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" + "github.com/spf13/viper" "github.com/tcnksm/go-latest" ) @@ -49,26 +50,21 @@ func initConfig() { } } - cfg, err := types.GetHeadscaleConfig() - if err != nil { - log.Fatal().Caller().Err(err).Msg("Failed to get headscale configuration") - } - machineOutput := HasMachineOutputFlag() - zerolog.SetGlobalLevel(cfg.Log.Level) - // If the user has requested a "node" readable format, // then disable login so the output remains valid. if machineOutput { zerolog.SetGlobalLevel(zerolog.Disabled) } - if cfg.Log.Format == types.JSONLogFormat { - log.Logger = log.Output(os.Stdout) - } + // logFormat := viper.GetString("log.format") + // if logFormat == types.JSONLogFormat { + // log.Logger = log.Output(os.Stdout) + // } - if !cfg.DisableUpdateCheck && !machineOutput { + disableUpdateCheck := viper.GetBool("disable_check_updates") + if !disableUpdateCheck && !machineOutput { if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") && Version != "dev" { githubTag := &latest.GithubTag{ @@ -78,7 +74,7 @@ func initConfig() { res, err := latest.Check(githubTag, Version) if err == nil && res.Outdated { //nolint - fmt.Printf( + log.Warn().Msgf( "An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n", res.Current, Version, diff --git a/cmd/headscale/cli/routes.go b/cmd/headscale/cli/routes.go index 86ef295c..96227b31 100644 --- a/cmd/headscale/cli/routes.go +++ b/cmd/headscale/cli/routes.go @@ -64,11 +64,9 @@ var listRoutesCmd = &cobra.Command{ fmt.Sprintf("Error getting machine id from flag: %s", err), output, ) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -82,14 +80,10 @@ var listRoutesCmd = &cobra.Command{ fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response.GetRoutes(), "", output) - - return } routes = response.GetRoutes() @@ -103,14 +97,10 @@ var listRoutesCmd = &cobra.Command{ fmt.Sprintf("Cannot get routes for node %d: %s", machineID, status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response.GetRoutes(), "", output) - - return } routes = response.GetRoutes() @@ -119,8 +109,6 @@ var listRoutesCmd = &cobra.Command{ tableData := routesToPtables(routes) if err != nil { ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output) - - return } err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render() @@ -130,8 +118,6 @@ var listRoutesCmd = &cobra.Command{ fmt.Sprintf("Failed to render pterm table: %s", err), output, ) - - return } }, } @@ -150,11 +136,9 @@ var enableRouteCmd = &cobra.Command{ fmt.Sprintf("Error getting machine id from flag: %s", err), output, ) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -167,14 +151,10 @@ var enableRouteCmd = &cobra.Command{ fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response, "", output) - - return } }, } @@ -193,11 +173,9 @@ var disableRouteCmd = &cobra.Command{ fmt.Sprintf("Error getting machine id from flag: %s", err), output, ) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -210,14 +188,10 @@ var disableRouteCmd = &cobra.Command{ fmt.Sprintf("Cannot disable route %d: %s", routeID, status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response, "", output) - - return } }, } @@ -236,11 +210,9 @@ var deleteRouteCmd = &cobra.Command{ fmt.Sprintf("Error getting machine id from flag: %s", err), output, ) - - return } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -253,14 +225,10 @@ var deleteRouteCmd = &cobra.Command{ fmt.Sprintf("Cannot delete route %d: %s", routeID, status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response, "", output) - - return } }, } diff --git a/cmd/headscale/cli/server.go b/cmd/headscale/cli/serve.go similarity index 67% rename from cmd/headscale/cli/server.go rename to cmd/headscale/cli/serve.go index a1d19600..91597400 100644 --- a/cmd/headscale/cli/server.go +++ b/cmd/headscale/cli/serve.go @@ -1,6 +1,9 @@ package cli import ( + "errors" + "net/http" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -16,14 +19,14 @@ var serveCmd = &cobra.Command{ return nil }, Run: func(cmd *cobra.Command, args []string) { - app, err := getHeadscaleApp() + app, err := newHeadscaleServerWithConfig() if err != nil { log.Fatal().Caller().Err(err).Msg("Error initializing") } err = app.Serve() - if err != nil { - log.Fatal().Caller().Err(err).Msg("Error starting server") + if err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Fatal().Caller().Err(err).Msg("Headscale ran into an error and had to shut down.") } }, } diff --git a/cmd/headscale/cli/users.go b/cmd/headscale/cli/users.go index e6463d6f..d04d7568 100644 --- a/cmd/headscale/cli/users.go +++ b/cmd/headscale/cli/users.go @@ -44,7 +44,7 @@ var createUserCmd = &cobra.Command{ userName := args[0] - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -63,8 +63,6 @@ var createUserCmd = &cobra.Command{ ), output, ) - - return } SuccessOutput(response.GetUser(), "User created", output) @@ -91,7 +89,7 @@ var destroyUserCmd = &cobra.Command{ Name: userName, } - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -102,8 +100,6 @@ var destroyUserCmd = &cobra.Command{ fmt.Sprintf("Error: %s", status.Convert(err).Message()), output, ) - - return } confirm := false @@ -134,8 +130,6 @@ var destroyUserCmd = &cobra.Command{ ), output, ) - - return } SuccessOutput(response, "User destroyed", output) } else { @@ -151,7 +145,7 @@ var listUsersCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { output, _ := cmd.Flags().GetString("output") - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -164,14 +158,10 @@ var listUsersCmd = &cobra.Command{ fmt.Sprintf("Cannot get users: %s", status.Convert(err).Message()), output, ) - - return } if output != "" { SuccessOutput(response.GetUsers(), "", output) - - return } tableData := pterm.TableData{{"ID", "Name", "Created"}} @@ -192,8 +182,6 @@ var listUsersCmd = &cobra.Command{ fmt.Sprintf("Failed to render pterm table: %s", err), output, ) - - return } }, } @@ -213,7 +201,7 @@ var renameUserCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { output, _ := cmd.Flags().GetString("output") - ctx, client, conn, cancel := getHeadscaleCLIClient() + ctx, client, conn, cancel := newHeadscaleCLIWithConfig() defer cancel() defer conn.Close() @@ -232,8 +220,6 @@ var renameUserCmd = &cobra.Command{ ), output, ) - - return } SuccessOutput(response.GetUser(), "User renamed", output) diff --git a/cmd/headscale/cli/utils.go b/cmd/headscale/cli/utils.go index a193d17d..ff1137be 100644 --- a/cmd/headscale/cli/utils.go +++ b/cmd/headscale/cli/utils.go @@ -6,11 +6,9 @@ import ( "encoding/json" "fmt" "os" - "reflect" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol" - "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" @@ -25,8 +23,8 @@ const ( SocketWritePermissions = 0o666 ) -func getHeadscaleApp() (*hscontrol.Headscale, error) { - cfg, err := types.GetHeadscaleConfig() +func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) { + cfg, err := types.LoadServerConfig() if err != nil { return nil, fmt.Errorf( "failed to load configuration while creating headscale instance: %w", @@ -39,26 +37,11 @@ func getHeadscaleApp() (*hscontrol.Headscale, error) { return nil, err } - // We are doing this here, as in the future could be cool to have it also hot-reload - - if cfg.ACL.PolicyPath != "" { - aclPath := util.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath) - pol, err := policy.LoadACLPolicyFromPath(aclPath) - if err != nil { - log.Fatal(). - Str("path", aclPath). - Err(err). - Msg("Could not load the ACL policy") - } - - app.ACLPolicy = pol - } - return app, nil } -func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) { - cfg, err := types.GetHeadscaleConfig() +func newHeadscaleCLIWithConfig() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) { + cfg, err := types.LoadCLIConfig() if err != nil { log.Fatal(). Err(err). @@ -89,7 +72,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc. // Try to give the user better feedback if we cannot write to the headscale // socket. - socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint + socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) // nolint if err != nil { if os.IsPermission(err) { log.Fatal(). @@ -147,7 +130,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc. return ctx, client, conn, cancel } -func SuccessOutput(result interface{}, override string, outputFormat string) { +func output(result interface{}, override string, outputFormat string) string { var jsonBytes []byte var err error switch outputFormat { @@ -167,22 +150,27 @@ func SuccessOutput(result interface{}, override string, outputFormat string) { log.Fatal().Err(err).Msg("failed to unmarshal output") } default: - //nolint - fmt.Println(override) - - return + // nolint + return override } - //nolint - fmt.Println(string(jsonBytes)) + return string(jsonBytes) } +// SuccessOutput prints the result to stdout and exits with status code 0. +func SuccessOutput(result interface{}, override string, outputFormat string) { + fmt.Println(output(result, override, outputFormat)) + os.Exit(0) +} + +// ErrorOutput prints an error message to stderr and exits with status code 1. func ErrorOutput(errResult error, override string, outputFormat string) { type errOutput struct { Error string `json:"error"` } - SuccessOutput(errOutput{errResult.Error()}, override, outputFormat) + fmt.Fprintf(os.Stderr, "%s\n", output(errOutput{errResult.Error()}, override, outputFormat)) + os.Exit(1) } func HasMachineOutputFlag() bool { @@ -212,13 +200,3 @@ func (t tokenAuth) GetRequestMetadata( func (tokenAuth) RequireTransportSecurity() bool { return true } - -func contains[T string](ts []T, t T) bool { - for _, v := range ts { - if reflect.DeepEqual(v, t) { - return true - } - } - - return false -} diff --git a/cmd/headscale/headscale.go b/cmd/headscale/headscale.go index 3f3322e2..fa17bf6d 100644 --- a/cmd/headscale/headscale.go +++ b/cmd/headscale/headscale.go @@ -4,7 +4,7 @@ import ( "os" "time" - "github.com/efekarakus/termcolor" + "github.com/jagottsicher/termcolor" "github.com/juanfont/headscale/cmd/headscale/cli" "github.com/rs/zerolog" "github.com/rs/zerolog/log" diff --git a/cmd/headscale/headscale_test.go b/cmd/headscale/headscale_test.go index c27fa20a..00c4a276 100644 --- a/cmd/headscale/headscale_test.go +++ b/cmd/headscale/headscale_test.go @@ -4,7 +4,6 @@ import ( "io/fs" "os" "path/filepath" - "strings" "testing" "github.com/juanfont/headscale/hscontrol/types" @@ -63,7 +62,6 @@ func (*Suite) TestConfigFileLoading(c *check.C) { c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "") c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http") c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01") - c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1") c.Assert( util.GetFileMode("unix_socket_permission"), check.Equals, @@ -106,7 +104,6 @@ func (*Suite) TestConfigLoading(c *check.C) { c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "") c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http") c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01") - c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1") c.Assert( util.GetFileMode("unix_socket_permission"), check.Equals, @@ -115,93 +112,3 @@ func (*Suite) TestConfigLoading(c *check.C) { c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false) c.Assert(viper.GetBool("randomize_client_port"), check.Equals, false) } - -func (*Suite) TestDNSConfigLoading(c *check.C) { - tmpDir, err := os.MkdirTemp("", "headscale") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - path, err := os.Getwd() - if err != nil { - c.Fatal(err) - } - - // Symlink the example config file - err = os.Symlink( - filepath.Clean(path+"/../../config-example.yaml"), - filepath.Join(tmpDir, "config.yaml"), - ) - if err != nil { - c.Fatal(err) - } - - // Load example config, it should load without validation errors - err = types.LoadConfig(tmpDir, false) - c.Assert(err, check.IsNil) - - dnsConfig, baseDomain := types.GetDNSConfig() - - c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1") - c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1") - c.Assert(dnsConfig.Proxied, check.Equals, true) - c.Assert(baseDomain, check.Equals, "example.com") -} - -func writeConfig(c *check.C, tmpDir string, configYaml []byte) { - // Populate a custom config file - configFile := filepath.Join(tmpDir, "config.yaml") - err := os.WriteFile(configFile, configYaml, 0o600) - if err != nil { - c.Fatalf("Couldn't write file %s", configFile) - } -} - -func (*Suite) TestTLSConfigValidation(c *check.C) { - tmpDir, err := os.MkdirTemp("", "headscale") - if err != nil { - c.Fatal(err) - } - // defer os.RemoveAll(tmpDir) - configYaml := []byte(`--- -tls_letsencrypt_hostname: example.com -tls_letsencrypt_challenge_type: "" -tls_cert_path: abc.pem -noise: - private_key_path: noise_private.key`) - writeConfig(c, tmpDir, configYaml) - - // Check configuration validation errors (1) - err = types.LoadConfig(tmpDir, false) - c.Assert(err, check.NotNil) - // check.Matches can not handle multiline strings - tmp := strings.ReplaceAll(err.Error(), "\n", "***") - c.Assert( - tmp, - check.Matches, - ".*Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both.*", - ) - c.Assert( - tmp, - check.Matches, - ".*Fatal config error: the only supported values for tls_letsencrypt_challenge_type are.*", - ) - c.Assert( - tmp, - check.Matches, - ".*Fatal config error: server_url must start with https:// or http://.*", - ) - - // Check configuration validation errors (2) - configYaml = []byte(`--- -noise: - private_key_path: noise_private.key -server_url: http://127.0.0.1:8080 -tls_letsencrypt_hostname: example.com -tls_letsencrypt_challenge_type: TLS-ALPN-01 -`) - writeConfig(c, tmpDir, configYaml) - err = types.LoadConfig(tmpDir, false) - c.Assert(err, check.IsNil) -} diff --git a/config-example.yaml b/config-example.yaml index ba81ba5d..04a2f342 100644 --- a/config-example.yaml +++ b/config-example.yaml @@ -61,6 +61,11 @@ prefixes: v6: fd7a:115c:a1e0::/48 v4: 100.64.0.0/10 + # Strategy used for allocation of IPs to nodes, available options: + # - sequential (default): assigns the next free IP from the previous given IP. + # - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand). + allocation: sequential + # DERP is a relay system that Tailscale uses when a direct # connection cannot be established. # https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp @@ -100,7 +105,7 @@ derp: automatically_add_embedded_derp_region: true # For better connection stability (especially when using an Exit-Node and DNS is not working), - # it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using: + # it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using: ipv4: 1.2.3.4 ipv6: 2001:db8::1 @@ -132,20 +137,40 @@ disable_check_updates: false # Time before an inactive ephemeral node is deleted? ephemeral_node_inactivity_timeout: 30m -# Period to check for node updates within the tailnet. A value too low will severely affect -# CPU consumption of Headscale. A value too high (over 60s) will cause problems -# for the nodes, as they won't get updates or keep alive messages frequently enough. -# In case of doubts, do not touch the default 10s. -node_update_check_interval: 10s - database: + # Database type. Available options: sqlite, postgres + # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons. + # All new development, testing and optimisations are done with SQLite in mind. type: sqlite + # Enable debug mode. This setting requires the log.level to be set to "debug" or "trace". + debug: false + + # GORM configuration settings. + gorm: + # Enable prepared statements. + prepare_stmt: true + + # Enable parameterized queries. + parameterized_queries: true + + # Skip logging "record not found" errors. + skip_err_record_not_found: true + + # Threshold for slow queries in milliseconds. + slow_threshold: 1000 + # SQLite config sqlite: path: /var/lib/headscale/db.sqlite + # Enable WAL mode for SQLite. This is recommended for production environments. + # https://www.sqlite.org/wal.html + write_ahead_log: true + # # Postgres config + # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons. + # See database.type for more information. # postgres: # # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank. # host: localhost @@ -200,10 +225,17 @@ log: format: text level: info -# Path to a file containg ACL policies. -# ACLs can be defined as YAML or HUJSON. -# https://tailscale.com/kb/1018/acls/ -acl_policy_path: "" +## Policy +# headscale supports Tailscale's ACL policies. +# Please have a look to their KB to better +# understand the concepts: https://tailscale.com/kb/1018/acls/ +policy: + # The mode can be "file" or "database" that defines + # where the ACL policies are stored and read from. + mode: file + # If the mode is set to "file", the path to a + # HuJSON file containing ACL policies. + path: "" ## DNS # @@ -214,43 +246,60 @@ acl_policy_path: "" # - https://tailscale.com/kb/1081/magicdns/ # - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/ # -dns_config: - # Whether to prefer using Headscale provided DNS or use local. - override_local_dns: true +# Please note that for the DNS configuration to have any effect, +# clients must have the `--accept-dns=true` option enabled. This is the +# default for the Tailscale client. This option is enabled by default +# in the Tailscale client. +# +# Setting _any_ of the configuration and `--accept-dns=true` on the +# clients will integrate with the DNS manager on the client or +# overwrite /etc/resolv.conf. +# https://tailscale.com/kb/1235/resolv-conf +# +# If you want stop Headscale from managing the DNS configuration +# all the fields under `dns` should be set to empty values. +dns: + # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/). + # Only works if there is at least a nameserver defined. + magic_dns: true + + # Defines the base domain to create the hostnames for MagicDNS. + # This domain _must_ be different from the server_url domain. + # `base_domain` must be a FQDN, without the trailing dot. + # The FQDN of the hosts will be + # `hostname.base_domain` (e.g., _myhost.example.com_). + base_domain: example.com # List of DNS servers to expose to clients. nameservers: - - 1.1.1.1 + global: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 - # NextDNS (see https://tailscale.com/kb/1218/nextdns/). - # "abc123" is example NextDNS ID, replace with yours. - # - # With metadata sharing: - # nameservers: - # - https://dns.nextdns.io/abc123 - # - # Without metadata sharing: - # nameservers: - # - 2a07:a8c0::ab:c123 - # - 2a07:a8c1::ab:c123 + # NextDNS (see https://tailscale.com/kb/1218/nextdns/). + # "abc123" is example NextDNS ID, replace with yours. + # - https://dns.nextdns.io/abc123 - # Split DNS (see https://tailscale.com/kb/1054/dns/), - # list of search domains and the DNS to query for each one. - # - # restricted_nameservers: - # foo.bar.com: - # - 1.1.1.1 - # darp.headscale.net: - # - 1.1.1.1 - # - 8.8.8.8 + # Split DNS (see https://tailscale.com/kb/1054/dns/), + # a map of domains and which DNS server to use for each. + split: + {} + # foo.bar.com: + # - 1.1.1.1 + # darp.headscale.net: + # - 1.1.1.1 + # - 8.8.8.8 - # Search domains to inject. - domains: [] + # Set custom DNS search domains. With MagicDNS enabled, + # your tailnet base_domain is always the first search domain. + search_domains: [] # Extra DNS records # so far only A-records are supported (on the tailscale side) # See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations - # extra_records: + extra_records: [] # - name: "grafana.myvpn.example.com" # type: "A" # value: "100.64.0.3" @@ -258,15 +307,14 @@ dns_config: # # you can also put it in one line # - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" } - # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/). - # Only works if there is at least a nameserver defined. - magic_dns: true - - # Defines the base domain to create the hostnames for MagicDNS. - # `base_domain` must be a FQDNs, without the trailing dot. - # The FQDN of the hosts will be - # `hostname.user.base_domain` (e.g., _myhost.myuser.example.com_). - base_domain: example.com + # DEPRECATED + # Use the username as part of the DNS name for nodes, with this option enabled: + # node1.username.example.com + # while when this is disabled: + # node1.example.com + # This is a legacy option as Headscale has have this wrongly implemented + # while in upstream Tailscale, the username is not included. + use_username_in_magic_dns: false # Unix socket used for the CLI to connect without authentication # Note: for production you will want to set this to something like: diff --git a/docs/acls.md b/docs/acls.md index 096dbea0..4ab8fb46 100644 --- a/docs/acls.md +++ b/docs/acls.md @@ -3,7 +3,7 @@ Headscale implements the same policy ACLs as Tailscale.com, adapted to the self- For instance, instead of referring to users when defining groups you must use users (which are the equivalent to user/logins in Tailscale.com). -Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples. +Please check https://tailscale.com/kb/1018/acls/ for further information. When using ACL's the User borders are no longer applied. All machines whichever the User have the ability to communicate with other hosts as @@ -43,8 +43,7 @@ servers. Note: Users will be created automatically when users authenticate with the Headscale server. -ACLs could be written either on [huJSON](https://github.com/tailscale/hujson) -or YAML. Check the [test ACLs](../tests/acls) for further information. +ACLs have to be written in [huJSON](https://github.com/tailscale/hujson). When registering the servers we will need to add the flag `--advertise-tags=tag:<tag1>,tag:<tag2>`, and the user that is @@ -53,7 +52,7 @@ a server they can register, the check of the tags is done on headscale server and only valid tags are applied. A tag is valid if the user that is registering it is allowed to do it. -To use ACLs in headscale, you must edit your config.yaml file. In there you will find a `acl_policy_path: ""` parameter. This will need to point to your ACL file. More info on how these policies are written can be found [here](https://tailscale.com/kb/1018/acls/). +To use ACLs in headscale, you must edit your `config.yaml` file. In there you will find a `policy.path` parameter. This will need to point to your ACL file. More info on how these policies are written can be found [here](https://tailscale.com/kb/1018/acls/). Here are the ACL's to implement the same permissions as above: diff --git a/docs/android-client.md b/docs/android-client.md index d4f8129c..044b9fcf 100644 --- a/docs/android-client.md +++ b/docs/android-client.md @@ -8,12 +8,9 @@ This documentation has the goal of showing how a user can use the official Andro Install the official Tailscale Android client from the [Google Play Store](https://play.google.com/store/apps/details?id=com.tailscale.ipn) or [F-Droid](https://f-droid.org/packages/com.tailscale.ipn/). -Ensure that the installed version is at least 1.30.0, as that is the first release to support custom URLs. - ## Configuring the headscale URL -After opening the app, the kebab menu icon (three dots) on the top bar on the right must be repeatedly opened and closed until the _Change server_ option appears in the menu. This is where you can enter your headscale URL. - -A screen recording of this process can be seen in the `tailscale-android` PR which implemented this functionality: <https://github.com/tailscale/tailscale-android/pull/55> - -After saving and restarting the app, selecting the regular _Sign in_ option (non-SSO) should open up the headscale authentication page. +- Open the app and select the settings menu in the upper-right corner +- Tap on `Accounts` +- In the kebab menu icon (three dots) in the upper-right corner select `Use an alternate server` +- Enter your server URL (e.g `https://headscale.example.com`) and follow the instructions diff --git a/docs/apple-client.md b/docs/apple-client.md new file mode 100644 index 00000000..29ad4b45 --- /dev/null +++ b/docs/apple-client.md @@ -0,0 +1,51 @@ +# Connecting an Apple client + +## Goal + +This documentation has the goal of showing how a user can use the official iOS and macOS [Tailscale](https://tailscale.com) clients with `headscale`. + +!!! info "Instructions on your headscale instance" + + An endpoint with information on how to connect your Apple device + is also available at `/apple` on your running instance. + +## iOS + +### Installation + +Install the official Tailscale iOS client from the [App Store](https://apps.apple.com/app/tailscale/id1470499037). + +### Configuring the headscale URL + +- Open Tailscale and make sure you are _not_ logged in to any account +- Open Settings on the iOS device +- Scroll down to the `third party apps` section, under `Game Center` or `TV Provider` +- Find Tailscale and select it + - If the iOS device was previously logged into Tailscale, switch the `Reset Keychain` toggle to `on` +- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) under `Alternate Coordination Server URL` +- Restart the app by closing it from the iOS app switcher, open the app and select the regular sign in option + _(non-SSO)_. It should open up to the headscale authentication page. +- Enter your credentials and log in. Headscale should now be working on your iOS device. + +## macOS + +### Installation + +Choose one of the available [Tailscale clients for macOS](https://tailscale.com/kb/1065/macos-variants) and install it. + +### Configuring the headscale URL + +#### Command line + +Use Tailscale's login command to connect with your headscale instance (e.g `https://headscale.example.com`): + +``` +tailscale login --login-server <YOUR_HEADSCALE_URL> +``` + +#### GUI + +- ALT + Click the Tailscale icon in the menu and hover over the Debug menu +- Under `Custom Login Server`, select `Add Account...` +- Enter the URL of your headscale instance (e.g `https://headscale.example.com`) and press `Add Account` +- Follow the login procedure in the browser diff --git a/docs/dns-records.md b/docs/dns-records.md index c5a07fe9..6c8fc42a 100644 --- a/docs/dns-records.md +++ b/docs/dns-records.md @@ -18,23 +18,25 @@ An example use case is to serve apps on the same host via a reverse proxy like N 1. Change the `config.yaml` to contain the desired records like so: -```yaml -dns_config: - ... - extra_records: - - name: "prometheus.myvpn.example.com" - type: "A" - value: "100.64.0.3" + ```yaml + dns: + ... + extra_records: + - name: "prometheus.myvpn.example.com" + type: "A" + value: "100.64.0.3" - - name: "grafana.myvpn.example.com" - type: "A" - value: "100.64.0.3" - ... -``` + - name: "grafana.myvpn.example.com" + type: "A" + value: "100.64.0.3" + ... + ``` -2. Restart your headscale instance. +1. Restart your headscale instance. -Beware of the limitations listed later on! + !!! warning + + Beware of the limitations listed later on! ### 2. Verify that the records are set diff --git a/docs/exit-node.md b/docs/exit-node.md index 898b7811..1acd20a3 100644 --- a/docs/exit-node.md +++ b/docs/exit-node.md @@ -5,7 +5,7 @@ Register the node and make it advertise itself as an exit node: ```console -$ sudo tailscale up --login-server https://my-server.com --advertise-exit-node +$ sudo tailscale up --login-server https://headscale.example.com --advertise-exit-node ``` If the node is already registered, it can advertise exit capabilities like this: @@ -14,28 +14,30 @@ If the node is already registered, it can advertise exit capabilities like this: $ sudo tailscale set --advertise-exit-node ``` -To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP fowarding. +To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP forwarding. ## On the control server ```console $ # list nodes $ headscale routes list -ID | Machine | Prefix | Advertised | Enabled | Primary -1 | | 0.0.0.0/0 | false | false | - -2 | | ::/0 | false | false | - -3 | phobos | 0.0.0.0/0 | true | false | - -4 | phobos | ::/0 | true | false | - +ID | Node | Prefix | Advertised | Enabled | Primary +1 | | 0.0.0.0/0 | false | false | - +2 | | ::/0 | false | false | - +3 | phobos | 0.0.0.0/0 | true | false | - +4 | phobos | ::/0 | true | false | - + $ # enable routes for phobos $ headscale routes enable -r 3 $ headscale routes enable -r 4 + $ # Check node list again. The routes are now enabled. $ headscale routes list -ID | Machine | Prefix | Advertised | Enabled | Primary -1 | | 0.0.0.0/0 | false | false | - -2 | | ::/0 | false | false | - -3 | phobos | 0.0.0.0/0 | true | true | - -4 | phobos | ::/0 | true | true | - +ID | Node | Prefix | Advertised | Enabled | Primary +1 | | 0.0.0.0/0 | false | false | - +2 | | ::/0 | false | false | - +3 | phobos | 0.0.0.0/0 | true | true | - +4 | phobos | ::/0 | true | true | - ``` ## On the client @@ -46,4 +48,4 @@ The exit node can now be used with: $ sudo tailscale set --exit-node phobos ``` -Check the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes/?q=exit#step-3-use-the-exit-node) for how to do it on your device. +Check the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for how to do it on your device. diff --git a/docs/faq.md b/docs/faq.md index 6331c54a..2a459967 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -31,12 +31,12 @@ We are more than happy to exchange emails, or to have dedicated calls before a P ## When/Why is Feature X going to be implemented? -We don't know. We might be working on it. If you want to help, please send us a PR. +We don't know. We might be working on it. If you're interested in contributing, please post a feature request about it. Please be aware that there are a number of reasons why we might not accept specific contributions: - It is not possible to implement the feature in a way that makes sense in a self-hosted environment. -- Given that we are reverse-engineering Tailscale to satify our own curiosity, we might be interested in implementing the feature ourselves. +- Given that we are reverse-engineering Tailscale to satisfy our own curiosity, we might be interested in implementing the feature ourselves. - You are not sending unit and integration tests with it. ## Do you support Y method of deploying Headscale? @@ -51,3 +51,7 @@ For convenience, we also build Docker images with `headscale`. But **please be a ## Why is my reverse proxy not working with Headscale? We don't know. We don't use reverse proxies with `headscale` ourselves, so we don't have any experience with them. We have [community documentation](https://headscale.net/reverse-proxy/) on how to configure various reverse proxies, and a dedicated [Discord channel](https://discord.com/channels/896711691637780480/1070619818346164324) where you can ask for help to the community. + +## Can I use headscale and tailscale on the same machine? + +Running headscale on a machine that is also in the tailnet can cause problems with subnet routers, traffic relay nodes, and MagicDNS. It might work, but it is not supported. diff --git a/docs/glossary.md b/docs/glossary.md deleted file mode 100644 index f42941a6..00000000 --- a/docs/glossary.md +++ /dev/null @@ -1,6 +0,0 @@ -# Glossary - -| Term | Description | -| --------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| Machine | A machine is a single entity connected to `headscale`, typically an installation of Tailscale. Also known as **Node** | -| Namespace | A namespace was a logical grouping of machines "owned" by the same entity, in Tailscale, this is typically a User (This is now called user) | diff --git a/docs/iOS-client.md b/docs/iOS-client.md deleted file mode 100644 index 761dfcf0..00000000 --- a/docs/iOS-client.md +++ /dev/null @@ -1,30 +0,0 @@ -# Connecting an iOS client - -## Goal - -This documentation has the goal of showing how a user can use the official iOS [Tailscale](https://tailscale.com) client with `headscale`. - -## Installation - -Install the official Tailscale iOS client from the [App Store](https://apps.apple.com/app/tailscale/id1470499037). - -Ensure that the installed version is at least 1.38.1, as that is the first release to support alternate control servers. - -## Configuring the headscale URL - -!!! info "Apple devices" - - An endpoint with information on how to connect your Apple devices - (currently macOS only) is available at `/apple` on your running instance. - -Ensure that the tailscale app is logged out before proceeding. - -Go to iOS settings, scroll down past game center and tv provider to the tailscale app and select it. The headscale URL can be entered into the _"ALTERNATE COORDINATION SERVER URL"_ box. - -> **Note** -> -> If the app was previously logged into tailscale, toggle on the _Reset Keychain_ switch. - -Restart the app by closing it from the iOS app switcher, open the app and select the regular _Sign in_ option (non-SSO), and it should open up to the headscale authentication page. - -Enter your credentials and log in. Headscale should now be working on your iOS device. diff --git a/docs/images/headscale-sealos-grpc-url.png b/docs/images/headscale-sealos-grpc-url.png new file mode 100644 index 00000000..1b0df4f3 Binary files /dev/null and b/docs/images/headscale-sealos-grpc-url.png differ diff --git a/docs/images/headscale-sealos-url.png b/docs/images/headscale-sealos-url.png new file mode 100644 index 00000000..66233698 Binary files /dev/null and b/docs/images/headscale-sealos-url.png differ diff --git a/docs/images/windows-registry.png b/docs/images/windows-registry.png deleted file mode 100644 index 1324ca6c..00000000 Binary files a/docs/images/windows-registry.png and /dev/null differ diff --git a/docs/index.md b/docs/index.md index d13339d8..f1b6e1b1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -8,7 +8,7 @@ hide: `headscale` is an open source, self-hosted implementation of the Tailscale control server. -This page contains the documentation for the latest version of headscale. Please also check our [FAQ](/faq/). +This page contains the documentation for the latest version of headscale. Please also check our [FAQ](faq.md). Join our [Discord](https://discord.gg/c84AZQhmpx) server for a chat and community support. @@ -31,12 +31,7 @@ buttons available in the repo. Headscale is "Open Source, acknowledged contribution", this means that any contribution will have to be discussed with the Maintainers before being submitted. -This model has been chosen to reduce the risk of burnout by limiting the -maintenance overhead of reviewing and validating third-party code. - -Headscale is open to code contributions for bug fixes without discussion. - -If you find mistakes in the documentation, please submit a fix to the documentation. +Please see [CONTRIBUTING.md](https://github.com/juanfont/headscale/blob/main/CONTRIBUTING.md) for more information. ## About diff --git a/docs/packaging/headscale.systemd.service b/docs/packaging/headscale.systemd.service index 14e31618..37d5f5d3 100644 --- a/docs/packaging/headscale.systemd.service +++ b/docs/packaging/headscale.systemd.service @@ -9,6 +9,7 @@ Type=simple User=headscale Group=headscale ExecStart=/usr/bin/headscale serve +ExecReload=/usr/bin/kill -HUP $MAINPID Restart=always RestartSec=5 diff --git a/docs/proposals/001-acls.md b/docs/proposals/001-acls.md deleted file mode 100644 index 8a02e836..00000000 --- a/docs/proposals/001-acls.md +++ /dev/null @@ -1,362 +0,0 @@ -# ACLs - -A key component of tailscale is the notion of Tailnet. This notion is hidden -but the implications that it have on how to use tailscale are not. - -For tailscale an [tailnet](https://tailscale.com/kb/1136/tailnet/) is the -following: - -> For personal users, you are a tailnet of many devices and one person. Each -> device gets a private Tailscale IP address in the CGNAT range and every -> device can talk directly to every other device, wherever they are on the -> internet. -> -> For businesses and organizations, a tailnet is many devices and many users. -> It can be based on your Microsoft Active Directory, your Google Workspace, a -> GitHub organization, Okta tenancy, or other identity provider namespace. All -> of the devices and users in your tailnet can be seen by the tailnet -> administrators in the Tailscale admin console. There you can apply -> tailnet-wide configuration, such as ACLs that affect visibility of devices -> inside your tailnet, DNS settings, and more. - -## Current implementation and issues - -Currently in headscale, the namespaces are used both as tailnet and users. The -issue is that if we want to use the ACL's we can't use both at the same time. - -Tailnet's cannot communicate with each others. So we can't have an ACL that -authorize tailnet (namespace) A to talk to tailnet (namespace) B. - -We also can't write ACLs based on the users (namespaces in headscale) since all -devices belong to the same user. - -With the current implementation the only ACL that we can user is to associate -each headscale IP to a host manually then write the ACLs according to this -manual mapping. - -```json -{ - "hosts": { - "host1": "100.64.0.1", - "server": "100.64.0.2" - }, - "acls": [ - { "action": "accept", "users": ["host1"], "ports": ["host2:80,443"] } - ] -} -``` - -While this works, it requires a lot of manual editing on the configuration and -to keep track of all devices IP address. - -## Proposition for a next implementation - -In order to ease the use of ACL's we need to split the tailnet and users -notion. - -A solution could be to consider a headscale server (in it's entirety) as a -tailnet. - -For personal users the default behavior could either allow all communications -between all namespaces (like tailscale) or dissallow all communications between -namespaces (current behavior). - -For businesses and organisations, viewing a headscale instance a single tailnet -would allow users (namespace) to talk to each other with the ACLs. As described -in tailscale's documentation [[1]], a server should be tagged and personnal -devices should be tied to a user. Translated in headscale's terms each user can -have multiple devices and all those devices should be in the same namespace. -The servers should be tagged and used as such. - -This implementation would render useless the sharing feature that is currently -implemented since an ACL could do the same. Simplifying to only one user -interface to do one thing is easier and less confusing for the users. - -To better suit the ACLs in this proposition, it's advised to consider that each -namespaces belong to one person. This person can have multiple devices, they -will all be considered as the same user in the ACLs. OIDC feature wouldn't need -to map people to namespace, just create a namespace if the person isn't -registered yet. - -As a sidenote, users would like to write ACLs as YAML. We should offer users -the ability to rules in either format (HuJSON or YAML). - -[1]: https://tailscale.com/kb/1068/acl-tags/ - -## Example - -Let's build an example use case for a small business (It may be the place where -ACL's are the most useful). - -We have a small company with a boss, an admin, two developper and an intern. - -The boss should have access to all servers but not to the users hosts. Admin -should also have access to all hosts except that their permissions should be -limited to maintaining the hosts (for example purposes). The developers can do -anything they want on dev hosts, but only watch on productions hosts. Intern -can only interact with the development servers. - -Each user have at least a device connected to the network and we have some -servers. - -- database.prod -- database.dev -- app-server1.prod -- app-server1.dev -- billing.internal - -### Current headscale implementation - -Let's create some namespaces - -```bash -headscale namespaces create prod -headscale namespaces create dev -headscale namespaces create internal -headscale namespaces create users - -headscale nodes register -n users boss-computer -headscale nodes register -n users admin1-computer -headscale nodes register -n users dev1-computer -headscale nodes register -n users dev1-phone -headscale nodes register -n users dev2-computer -headscale nodes register -n users intern1-computer - -headscale nodes register -n prod database -headscale nodes register -n prod app-server1 - -headscale nodes register -n dev database -headscale nodes register -n dev app-server1 - -headscale nodes register -n internal billing - -headscale nodes list -ID | Name | Namespace | IP address -1 | boss-computer | users | 100.64.0.1 -2 | admin1-computer | users | 100.64.0.2 -3 | dev1-computer | users | 100.64.0.3 -4 | dev1-phone | users | 100.64.0.4 -5 | dev2-computer | users | 100.64.0.5 -6 | intern1-computer | users | 100.64.0.6 -7 | database | prod | 100.64.0.7 -8 | app-server1 | prod | 100.64.0.8 -9 | database | dev | 100.64.0.9 -10 | app-server1 | dev | 100.64.0.10 -11 | internal | internal | 100.64.0.11 -``` - -In order to only allow the communications related to our description above we -need to add the following ACLs - -```json -{ - "hosts": { - "boss-computer": "100.64.0.1", - "admin1-computer": "100.64.0.2", - "dev1-computer": "100.64.0.3", - "dev1-phone": "100.64.0.4", - "dev2-computer": "100.64.0.5", - "intern1-computer": "100.64.0.6", - "prod-app-server1": "100.64.0.8" - }, - "groups": { - "group:dev": ["dev1-computer", "dev1-phone", "dev2-computer"], - "group:admin": ["admin1-computer"], - "group:boss": ["boss-computer"], - "group:intern": ["intern1-computer"] - }, - "acls": [ - // boss have access to all servers but no users hosts - { - "action": "accept", - "users": ["group:boss"], - "ports": ["prod:*", "dev:*", "internal:*"] - }, - - // admin have access to adminstration port (lets only consider port 22 here) - { - "action": "accept", - "users": ["group:admin"], - "ports": ["prod:22", "dev:22", "internal:22"] - }, - - // dev can do anything on dev servers and check access on prod servers - { - "action": "accept", - "users": ["group:dev"], - "ports": ["dev:*", "prod-app-server1:80,443"] - }, - - // interns only have access to port 80 and 443 on dev servers (lame internship) - { "action": "accept", "users": ["group:intern"], "ports": ["dev:80,443"] }, - - // users can access their own devices - { - "action": "accept", - "users": ["dev1-computer"], - "ports": ["dev1-phone:*"] - }, - { - "action": "accept", - "users": ["dev1-phone"], - "ports": ["dev1-computer:*"] - }, - - // internal namespace communications should still be allowed within the namespace - { "action": "accept", "users": ["dev"], "ports": ["dev:*"] }, - { "action": "accept", "users": ["prod"], "ports": ["prod:*"] }, - { "action": "accept", "users": ["internal"], "ports": ["internal:*"] } - ] -} -``` - -Since communications between namespace isn't possible we also have to share the -devices between the namespaces. - -```bash - -// add boss host to prod, dev and internal network -headscale nodes share -i 1 -n prod -headscale nodes share -i 1 -n dev -headscale nodes share -i 1 -n internal - -// add admin computer to prod, dev and internal network -headscale nodes share -i 2 -n prod -headscale nodes share -i 2 -n dev -headscale nodes share -i 2 -n internal - -// add all dev to prod and dev network -headscale nodes share -i 3 -n dev -headscale nodes share -i 4 -n dev -headscale nodes share -i 3 -n prod -headscale nodes share -i 4 -n prod -headscale nodes share -i 5 -n dev -headscale nodes share -i 5 -n prod - -headscale nodes share -i 6 -n dev -``` - -This fake network have not been tested but it should work. Operating it could -be quite tedious if the company grows. Each time a new user join we have to add -it to a group, and share it to the correct namespaces. If the user want -multiple devices we have to allow communication to each of them one by one. If -business conduct a change in the organisations we may have to rewrite all acls -and reorganise all namespaces. - -If we add servers in production we should also update the ACLs to allow dev -access to certain category of them (only app servers for example). - -### example based on the proposition in this document - -Let's create the namespaces - -```bash -headscale namespaces create boss -headscale namespaces create admin1 -headscale namespaces create dev1 -headscale namespaces create dev2 -headscale namespaces create intern1 -``` - -We don't need to create namespaces for the servers because the servers will be -tagged. When registering the servers we will need to add the flag -`--advertised-tags=tag:<tag1>,tag:<tag2>`, and the user (namespace) that is -registering the server should be allowed to do it. Since anyone can add tags to -a server they can register, the check of the tags is done on headscale server -and only valid tags are applied. A tag is valid if the namespace that is -registering it is allowed to do it. - -Here are the ACL's to implement the same permissions as above: - -```json -{ - // groups are simpler and only list the namespaces name - "groups": { - "group:boss": ["boss"], - "group:dev": ["dev1", "dev2"], - "group:admin": ["admin1"], - "group:intern": ["intern1"] - }, - "tagOwners": { - // the administrators can add servers in production - "tag:prod-databases": ["group:admin"], - "tag:prod-app-servers": ["group:admin"], - - // the boss can tag any server as internal - "tag:internal": ["group:boss"], - - // dev can add servers for dev purposes as well as admins - "tag:dev-databases": ["group:admin", "group:dev"], - "tag:dev-app-servers": ["group:admin", "group:dev"] - - // interns cannot add servers - }, - "acls": [ - // boss have access to all servers - { - "action": "accept", - "users": ["group:boss"], - "ports": [ - "tag:prod-databases:*", - "tag:prod-app-servers:*", - "tag:internal:*", - "tag:dev-databases:*", - "tag:dev-app-servers:*" - ] - }, - - // admin have only access to administrative ports of the servers - { - "action": "accept", - "users": ["group:admin"], - "ports": [ - "tag:prod-databases:22", - "tag:prod-app-servers:22", - "tag:internal:22", - "tag:dev-databases:22", - "tag:dev-app-servers:22" - ] - }, - - { - "action": "accept", - "users": ["group:dev"], - "ports": [ - "tag:dev-databases:*", - "tag:dev-app-servers:*", - "tag:prod-app-servers:80,443" - ] - }, - - // servers should be able to talk to database. Database should not be able to initiate connections to server - { - "action": "accept", - "users": ["tag:dev-app-servers"], - "ports": ["tag:dev-databases:5432"] - }, - { - "action": "accept", - "users": ["tag:prod-app-servers"], - "ports": ["tag:prod-databases:5432"] - }, - - // interns have access to dev-app-servers only in reading mode - { - "action": "accept", - "users": ["group:intern"], - "ports": ["tag:dev-app-servers:80,443"] - }, - - // we still have to allow internal namespaces communications since nothing guarantees that each user have their own namespaces. This could be talked over. - { "action": "accept", "users": ["boss"], "ports": ["boss:*"] }, - { "action": "accept", "users": ["dev1"], "ports": ["dev1:*"] }, - { "action": "accept", "users": ["dev2"], "ports": ["dev2:*"] }, - { "action": "accept", "users": ["admin1"], "ports": ["admin1:*"] }, - { "action": "accept", "users": ["intern1"], "ports": ["intern1:*"] } - ] -} -``` - -With this implementation, the sharing step is not necessary. Maintenance cost -of the ACL file is lower and less tedious (no need to map hostname and IP's -into it). diff --git a/docs/proposals/002-better-routing.md b/docs/proposals/002-better-routing.md deleted file mode 100644 index c56a38ff..00000000 --- a/docs/proposals/002-better-routing.md +++ /dev/null @@ -1,48 +0,0 @@ -# Better route management - -As of today, route management in Headscale is very basic and does not allow for much flexibility, including implementing subnet HA, 4via6 or more advanced features. We also have a number of bugs (e.g., routes exposed by ephemeral nodes) - -This proposal aims to improve the route management. - -## Current situation - -Routes advertised by the nodes are read from the Hostinfo struct. If approved from the the CLI or via autoApprovers, the route is added to the EnabledRoutes field in `Machine`. - -This means that the advertised routes are not persisted in the database, as Hostinfo is always replaced. In the same way, EnabledRoutes can get out of sync with the actual routes in the node. - -In case of colliding routes (i.e., subnets that are exposed from multiple nodes), we are currently just sending all of them in `PrimaryRoutes`... and hope for the best. (`PrimaryRoutes` is the field in `Node` used for subnet failover). - -## Proposal - -The core part is to create a new `Route` struct (and DB table), with the following fields: - -```go -type Route struct { - ID uint64 `gorm:"primary_key"` - - Machine *Machine - Prefix IPPrefix - - Advertised bool - Enabled bool - IsPrimary bool - - - CreatedAt *time.Time - UpdatedAt *time.Time - DeletedAt *time.Time -} -``` - -- The `Advertised` field is set to true if the route is being advertised by the node. It is set to false if the route is removed. This way we can indicate if a later enabled route has stopped being advertised. A similar behaviour happens in the Tailscale.com control panel. - -- The `Enabled` field is set to true if the route is enabled - via CLI or autoApprovers. - -- `IsPrimary` indicates if Headscale has selected this route as the primary route for that particular subnet. This allows us to implement subnet failover. This would be fully automatic if there is more than subnet routers advertising the same network - which is the behaviour of Tailscale.com. - -## Stuff to bear in mind - -- We need to make sure to migrate the current `EnabledRoutes` of `Machine` into the new table. -- When a node stops sharing a subnet, I reckon we should mark it both as not `Advertised` and not `Enabled`. Users should re-enable it if the node advertises it again. -- If only one subnet router is advertising a subnet, we should mark it as primary. -- Regarding subnet failover, the current behaviour of Tailscale.com is to perform the failover after 15 seconds from the node disconnecting from their control panel. I reckon we cannot do the same currently. Our maximum granularity is the keep alive period. diff --git a/docs/remote-cli.md b/docs/remote-cli.md index 96a6333a..c641b789 100644 --- a/docs/remote-cli.md +++ b/docs/remote-cli.md @@ -1,13 +1,13 @@ # Controlling `headscale` with remote CLI -## Prerequisit +## Prerequisite - A workstation to run `headscale` (could be Linux, macOS, other supported platforms) - A `headscale` server (version `0.13.0` or newer) - Access to create API keys (local access to the `headscale` server) - `headscale` _must_ be served over TLS/HTTPS - Remote access does _not_ support unencrypted traffic. -- Port `50443` must be open in the firewall (or port overriden by `grpc_listen_addr` option) +- Port `50443` must be open in the firewall (or port overridden by `grpc_listen_addr` option) ## Goal @@ -47,40 +47,40 @@ headscale apikeys expire --prefix "<PREFIX>" 3. Make `headscale` executable: -```shell -chmod +x /usr/local/bin/headscale -``` + ```shell + chmod +x /usr/local/bin/headscale + ``` -4. Configure the CLI through Environment Variables +4. Configure the CLI through environment variables -```shell -export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:<PORT>" -export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>" -``` + ```shell + export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:<PORT>" + export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>" + ``` -for example: + for example: -```shell -export HEADSCALE_CLI_ADDRESS="headscale.example.com:50443" -export HEADSCALE_CLI_API_KEY="abcde12345" -``` + ```shell + export HEADSCALE_CLI_ADDRESS="headscale.example.com:50443" + export HEADSCALE_CLI_API_KEY="abcde12345" + ``` -This will tell the `headscale` binary to connect to a remote instance, instead of looking -for a local instance (which is what it does on the server). + This will tell the `headscale` binary to connect to a remote instance, instead of looking + for a local instance (which is what it does on the server). -The API key is needed to make sure that your are allowed to access the server. The key is _not_ -needed when running directly on the server, as the connection is local. + The API key is needed to make sure that you are allowed to access the server. The key is _not_ + needed when running directly on the server, as the connection is local. 5. Test the connection -Let us run the headscale command to verify that we can connect by listing our nodes: + Let us run the headscale command to verify that we can connect by listing our nodes: -```shell -headscale nodes list -``` + ```shell + headscale nodes list + ``` -You should now be able to see a list of your nodes from your workstation, and you can -now control the `headscale` server from your workstation. + You should now be able to see a list of your nodes from your workstation, and you can + now control the `headscale` server from your workstation. ## Behind a proxy @@ -97,4 +97,4 @@ Checklist: - Make sure you use version `0.13.0` or newer. - Verify that your TLS certificate is valid and trusted - If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or - - Set `HEADSCALE_CLI_INSECURE` to 0 in your environement + - Set `HEADSCALE_CLI_INSECURE` to 0 in your environment diff --git a/docs/requirements.txt b/docs/requirements.txt index 32bd08c1..bcbf7c0e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,4 @@ cairosvg~=2.7.1 -mkdocs-material~=9.4.14 +mkdocs-material~=9.5.18 mkdocs-minify-plugin~=0.7.1 pillow~=10.1.0 - diff --git a/docs/reverse-proxy.md b/docs/reverse-proxy.md index aab9f848..b042b348 100644 --- a/docs/reverse-proxy.md +++ b/docs/reverse-proxy.md @@ -11,9 +11,13 @@ Running headscale behind a reverse proxy is useful when running multiple applica ### WebSockets -The reverse proxy MUST be configured to support WebSockets, as it is needed for clients running Tailscale v1.30+. +The reverse proxy MUST be configured to support WebSockets to communicate with Tailscale clients. -WebSockets support is required when using the headscale embedded DERP server. In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml). +WebSockets support is also required when using the headscale embedded DERP server. In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml). + +### Cloudflare + +Running headscale behind a cloudflare proxy or cloudflare tunnel is not supported and will not work as Cloudflare does not support WebSocket POSTs as required by the Tailscale protocol. See [this issue](https://github.com/juanfont/headscale/issues/1468) ### TLS @@ -33,8 +37,7 @@ The following example configuration can be used in your nginx setup, substitutin ```Nginx map $http_upgrade $connection_upgrade { - default keep-alive; - 'websocket' upgrade; + default upgrade; '' close; } @@ -61,7 +64,7 @@ server { proxy_buffering off; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header X-Forwarded-Proto $scheme; add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always; } } @@ -77,7 +80,7 @@ Sending local reply with details upgrade_failed ### Envoy -You need add a new upgrade_type named `tailscale-control-protocol`. [see detail](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-upgradeconfig) +You need to add a new upgrade_type named `tailscale-control-protocol`. [see details](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-upgradeconfig) ### Istio @@ -116,7 +119,7 @@ The following Caddyfile is all that is necessary to use Caddy as a reverse proxy } ``` -Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certficate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary. +Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certificate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary. For a slightly more complex configuration which utilizes Docker containers to manage Caddy, Headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference. diff --git a/docs/running-headscale-container.md b/docs/running-headscale-container.md index 862ba032..087dae30 100644 --- a/docs/running-headscale-container.md +++ b/docs/running-headscale-container.md @@ -17,107 +17,93 @@ not work with alternatives like [Podman](https://podman.io). The Docker image ca 1. Prepare a directory on the host Docker node in your directory of choice, used to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database: -```shell -mkdir -p ./headscale/config -cd ./headscale -``` + ```shell + mkdir -p ./headscale/config + cd ./headscale + ``` -2. Create an empty SQlite datebase in the headscale directory: +1. **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. -```shell -touch ./config/db.sqlite -``` + - Using `wget`: -3. **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. + ```shell + wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml + ``` -Using wget: + - Using `curl`: -```shell -wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -``` + ```shell + curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml + ``` -Using curl: + Modify the config file to your preferences before launching Docker container. -```shell -curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml -``` + Alternatively, you can mount `/var/lib` and `/var/run` from your host system by adding + `--volume $(pwd)/lib:/var/lib/headscale` and `--volume $(pwd)/run:/var/run/headscale` + in the next step. -**(Advanced)** If you would like to hand craft a config file **instead** of downloading the example config file, create a blank `headscale` configuration in the headscale directory to edit: +1. Start the headscale server while working in the host headscale directory: -```shell -touch ./config/config.yaml -``` + ```shell + docker run \ + --name headscale \ + --detach \ + --volume $(pwd)/config:/etc/headscale/ \ + --publish 127.0.0.1:8080:8080 \ + --publish 127.0.0.1:9090:9090 \ + headscale/headscale:<VERSION> \ + serve + ``` -Modify the config file to your preferences before launching Docker container. -Here are some settings that you likely want: + Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally. -```yaml -# Change to your hostname or host IP -server_url: http://your-host-name:8080 -# Listen to 0.0.0.0 so it's accessible outside the container -metrics_listen_addr: 0.0.0.0:9090 -# The default /var/lib/headscale path is not writable in the container -noise: - private_key_path: /etc/headscale/noise_private.key -# The default /var/lib/headscale path is not writable in the container -derp: - private_key_path: /etc/headscale/private.key -# The default /var/run/headscale path is not writable in the container -unix_socket: /etc/headscale/headscale.sock -# The default /var/lib/headscale path is not writable in the container -database.type: sqlite3 -database.sqlite.path: /etc/headscale/db.sqlite -``` + This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the + `headscale` instance becomes available and then detach so headscale runs in the background. -Alternatively, you can mount `/var/lib` and `/var/run` from your host system by adding -`--volume $(pwd)/lib:/var/lib/headscale` and `--volume $(pwd)/run:/var/run/headscale` -in the next step. + Example `docker-compose.yaml` -4. Start the headscale server while working in the host headscale directory: + ```yaml + version: "3.7" -```shell -docker run \ - --name headscale \ - --detach \ - --volume $(pwd)/config:/etc/headscale/ \ - --publish 127.0.0.1:8080:8080 \ - --publish 127.0.0.1:9090:9090 \ - headscale/headscale:<VERSION> \ - headscale serve + services: + headscale: + image: headscale/headscale:<VERSION> + restart: unless-stopped + container_name: headscale + ports: + - "127.0.0.1:8080:8080" + - "127.0.0.1:9090:9090" + volumes: + # Please change <CONFIG_PATH> to the fullpath of the config folder just created + - <CONFIG_PATH>:/etc/headscale + command: serve + ``` -``` +1. Verify `headscale` is running: + Follow the container logs: -Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally. + ```shell + docker logs --follow headscale + ``` -This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the -`headscale` instance becomes available and then detach so headscale runs in the background. + Verify running containers: -5. Verify `headscale` is running: + ```shell + docker ps + ``` -Follow the container logs: + Verify `headscale` is available: -```shell -docker logs --follow headscale -``` + ```shell + curl http://127.0.0.1:9090/metrics + ``` -Verify running containers: +1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): -```shell -docker ps -``` - -Verify `headscale` is available: - -```shell -curl http://127.0.0.1:9090/metrics -``` - -6. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): - -```shell -docker exec headscale \ - headscale users create myfirstuser -``` + ```shell + docker exec headscale \ + headscale users create myfirstuser + ``` ### Register a machine (normal login) @@ -131,7 +117,7 @@ To register a machine when running `headscale` in a container, take the headscal ```shell docker exec headscale \ - headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY> + headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY> ``` ### Register machine using a pre authenticated key @@ -140,7 +126,7 @@ Generate a key using the command line: ```shell docker exec headscale \ - headscale --user myfirstuser preauthkeys create --reusable --expiration 24h + headscale preauthkeys create --user myfirstuser --reusable --expiration 24h ``` This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command: @@ -159,7 +145,7 @@ To run the debug Docker container, use the exact same commands as above, but rep ### Executing commands in the debug container -The default command in the debug container is to run `headscale`, which is located at `/bin/headscale` inside the container. +The default command in the debug container is to run `headscale`, which is located at `/ko-app/headscale` inside the container. Additionally, the debug container includes a minimalist Busybox shell. @@ -169,10 +155,10 @@ To launch a shell in the container, use: docker run -it headscale/headscale:x.x.x-debug sh ``` -You can also execute commands directly, such as `ls /bin` in this example: +You can also execute commands directly, such as `ls /ko-app` in this example: ``` -docker run headscale/headscale:x.x.x-debug ls /bin +docker run headscale/headscale:x.x.x-debug ls /ko-app ``` Using `docker exec` allows you to run commands in an existing container. diff --git a/docs/running-headscale-linux-manual.md b/docs/running-headscale-linux-manual.md index 20e88d54..3a0d91e0 100644 --- a/docs/running-headscale-linux-manual.md +++ b/docs/running-headscale-linux-manual.md @@ -1,92 +1,85 @@ # Running headscale on Linux -## Note: Outdated and "advanced" +!!! warning "Outdated and advanced" -This documentation is considered the "legacy"/advanced/manual version of the documentation, you most likely do not -want to use this documentation and rather look at the distro specific documentation (TODO LINK)[]. + This documentation is considered the "legacy"/advanced/manual version of the documentation, you most likely do not + want to use this documentation and rather look at the [distro specific documentation](./running-headscale-linux.md). ## Goal This documentation has the goal of showing a user how-to set up and run `headscale` on Linux. -In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd) +In additional to the "get up and running section", there is an optional [systemd section](#running-headscale-in-the-background-with-systemd) describing how to make `headscale` run properly in a server environment. ## Configure and run `headscale` 1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases): -```shell -wget --output-document=/usr/local/bin/headscale \ - https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH> -``` + ```shell + wget --output-document=/usr/local/bin/headscale \ + https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH> + ``` -2. Make `headscale` executable: +1. Make `headscale` executable: -```shell -chmod +x /usr/local/bin/headscale -``` + ```shell + chmod +x /usr/local/bin/headscale + ``` -3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database: +1. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database: -```shell -# Directory for configuration + ```shell + # Directory for configuration -mkdir -p /etc/headscale + mkdir -p /etc/headscale -# Directory for Database, and other variable data (like certificates) -mkdir -p /var/lib/headscale -# or if you create a headscale user: -useradd \ - --create-home \ - --home-dir /var/lib/headscale/ \ - --system \ - --user-group \ - --shell /usr/sbin/nologin \ - headscale -``` + # Directory for Database, and other variable data (like certificates) + mkdir -p /var/lib/headscale + # or if you create a headscale user: + useradd \ + --create-home \ + --home-dir /var/lib/headscale/ \ + --system \ + --user-group \ + --shell /usr/sbin/nologin \ + headscale + ``` -4. Create an empty SQLite database: +1. Create a `headscale` configuration: -```shell -touch /var/lib/headscale/db.sqlite -``` + ```shell + touch /etc/headscale/config.yaml + ``` -5. Create a `headscale` configuration: + **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. -```shell -touch /etc/headscale/config.yaml -``` +1. Start the headscale server: -**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. + ```shell + headscale serve + ``` -6. Start the headscale server: + This command will start `headscale` in the current terminal session. -```shell -headscale serve -``` + --- -This command will start `headscale` in the current terminal session. + To continue the tutorial, open a new terminal and let it run in the background. + Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/). ---- + To run `headscale` in the background, please follow the steps in the [systemd section](#running-headscale-in-the-background-with-systemd) before continuing. -To continue the tutorial, open a new terminal and let it run in the background. -Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/). +1. Verify `headscale` is running: + Verify `headscale` is available: -To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing. + ```shell + curl http://127.0.0.1:9090/metrics + ``` -7. Verify `headscale` is running: +1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): -Verify `headscale` is available: - -```shell -curl http://127.0.0.1:9090/metrics -``` - -8. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): - -```shell -headscale users create myfirstuser -``` + ```shell + headscale users create myfirstuser + ``` ### Register a machine (normal login) @@ -99,7 +92,7 @@ tailscale up --login-server YOUR_HEADSCALE_URL Register the machine: ```shell -headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY> +headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY> ``` ### Register machine using a pre authenticated key @@ -107,7 +100,7 @@ headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY> Generate a key using the command line: ```shell -headscale --user myfirstuser preauthkeys create --reusable --expiration 24h +headscale preauthkeys create --user myfirstuser --reusable --expiration 24h ``` This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command: @@ -116,83 +109,55 @@ This will return a pre-authenticated key that can be used to connect a node to ` tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY> ``` -## Running `headscale` in the background with SystemD +## Running `headscale` in the background with systemd -:warning: **Deprecated**: This part is very outdated and you should use the [pre-packaged Headscale for this](./running-headscale-linux.md - -This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/). +This section demonstrates how to run `headscale` as a service in the background with [systemd](https://systemd.io/). This should work on most modern Linux distributions. -1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing: +1. Copy [headscale's systemd service file](./packaging/headscale.systemd.service) to + `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need + to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`. -```systemd -[Unit] -Description=headscale controller -After=syslog.target -After=network.target + Note that when running as the headscale user ensure that, either you add your current user to the headscale group: -[Service] -Type=simple -User=headscale -Group=headscale -ExecStart=/usr/local/bin/headscale serve -Restart=always -RestartSec=5 + ```shell + usermod -a -G headscale current_user + ``` -# Optional security enhancements -NoNewPrivileges=yes -PrivateTmp=yes -ProtectSystem=strict -ProtectHome=yes -WorkingDirectory=/var/lib/headscale -ReadWritePaths=/var/lib/headscale /var/run/headscale -AmbientCapabilities=CAP_NET_BIND_SERVICE -RuntimeDirectory=headscale + or run all headscale commands as the headscale user: -[Install] -WantedBy=multi-user.target -``` + ```shell + su - headscale + ``` -Note that when running as the headscale user ensure that, either you add your current user to the headscale group: +1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group: -```shell -usermod -a -G headscale current_user -``` + ```yaml + unix_socket: /var/run/headscale/headscale.sock + ``` -or run all headscale commands as the headscale user: +1. Reload systemd to load the new configuration file: -```shell -su - headscale -``` + ```shell + systemctl daemon-reload + ``` -2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group: +1. Enable and start the new `headscale` service: -```yaml -unix_socket: /var/run/headscale/headscale.sock -``` + ```shell + systemctl enable --now headscale + ``` -3. Reload SystemD to load the new configuration file: +1. Verify the headscale service: -```shell -systemctl daemon-reload -``` + ```shell + systemctl status headscale + ``` -4. Enable and start the new `headscale` service: + Verify `headscale` is available: -```shell -systemctl enable --now headscale -``` - -5. Verify the headscale service: - -```shell -systemctl status headscale -``` - -Verify `headscale` is available: - -```shell -curl http://127.0.0.1:9090/metrics -``` + ```shell + curl http://127.0.0.1:9090/metrics + ``` `headscale` will now run in the background and start at boot. diff --git a/docs/running-headscale-linux.md b/docs/running-headscale-linux.md index 66ccc3d3..ffa510a6 100644 --- a/docs/running-headscale-linux.md +++ b/docs/running-headscale-linux.md @@ -8,54 +8,56 @@ Get Headscale up and running. -This includes running Headscale with SystemD. +This includes running Headscale with systemd. ## Migrating from manual install If you are migrating from the old manual install, the best thing would be to remove the files installed by following [the guide in reverse](./running-headscale-linux-manual.md). -You should _not_ delete the database (`/var/headscale/db.sqlite`) and the +You should _not_ delete the database (`/var/lib/headscale/db.sqlite`) and the configuration (`/etc/headscale/config.yaml`). ## Installation -1. Download the lastest Headscale package for your platform (`.deb` for Ubuntu and Debian) from [Headscale's releases page](https://github.com/juanfont/headscale/releases): +1. Download the [latest Headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian). -```shell -wget --output-document=headscale.deb \ - https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>.deb -``` + ```shell + HEADSCALE_VERSION="" # See above URL for latest version, e.g. "X.Y.Z" (NOTE: do not add the "v" prefix!) + HEADSCALE_ARCH="" # Your system architecture, e.g. "amd64" + wget --output-document=headscale.deb \ + "https://github.com/juanfont/headscale/releases/download/v${HEADSCALE_VERSION}/headscale_${HEADSCALE_VERSION}_linux_${HEADSCALE_ARCH}.deb" + ``` -2. Install Headscale: +1. Install Headscale: -```shell -sudo dpkg --install headscale.deb -``` + ```shell + sudo apt install ./headscale.deb + ``` -3. Enable Headscale service, this will start Headscale at boot: +1. Enable Headscale service, this will start Headscale at boot: -```shell -sudo systemctl enable headscale -``` + ```shell + sudo systemctl enable headscale + ``` -4. Configure Headscale by editing the configuration file: +1. Configure Headscale by editing the configuration file: -```shell -nano /etc/headscale/config.yaml -``` + ```shell + nano /etc/headscale/config.yaml + ``` -5. Start Headscale: +1. Start Headscale: -```shell -sudo systemctl start headscale -``` + ```shell + sudo systemctl start headscale + ``` -6. Check that Headscale is running as intended: +1. Check that Headscale is running as intended: -```shell -systemctl status headscale -``` + ```shell + systemctl status headscale + ``` ## Using Headscale @@ -76,7 +78,7 @@ tailscale up --login-server <YOUR_HEADSCALE_URL> Register the machine: ```shell -headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY> +headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY> ``` ### Register machine using a pre authenticated key @@ -84,7 +86,7 @@ headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY> Generate a key using the command line: ```shell -headscale --user myfirstuser preauthkeys create --reusable --expiration 24h +headscale preauthkeys create --user myfirstuser --reusable --expiration 24h ``` This will return a pre-authenticated key that is used to diff --git a/docs/running-headscale-openbsd.md b/docs/running-headscale-openbsd.md index b76c9135..449034ba 100644 --- a/docs/running-headscale-openbsd.md +++ b/docs/running-headscale-openbsd.md @@ -9,121 +9,114 @@ ## Goal -This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD 7.1. -In additional to the "get up and running section", there is an optional [rc.d section](#running-headscale-in-the-background-with-rcd) +This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD. +In addition to the "get up and running section", there is an optional [rc.d section](#running-headscale-in-the-background-with-rcd) describing how to make `headscale` run properly in a server environment. ## Install `headscale` -1. Install from ports (Not Recommend) +1. Install from ports - As of OpenBSD 7.2, there's a headscale in ports collection, however, it's severely outdated(v0.12.4). - You can install it via `pkg_add headscale`. + You can install headscale from ports by running `pkg_add headscale`. -2. Install from source on OpenBSD 7.2 +1. Install from source -```shell -# Install prerequistes -pkg_add go + ```shell + # Install prerequistes + pkg_add go -git clone https://github.com/juanfont/headscale.git + git clone https://github.com/juanfont/headscale.git -cd headscale + cd headscale -# optionally checkout a release -# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest -# option b. get latest tag, this may be a beta release -latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) + # optionally checkout a release + # option a. you can find official release at https://github.com/juanfont/headscale/releases/latest + # option b. get latest tag, this may be a beta release + latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) -git checkout $latestTag + git checkout $latestTag -go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale + go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale -# make it executable -chmod a+x headscale + # make it executable + chmod a+x headscale -# copy it to /usr/local/sbin -cp headscale /usr/local/sbin -``` + # copy it to /usr/local/sbin + cp headscale /usr/local/sbin + ``` -3. Install from source via cross compile +1. Install from source via cross compile -```shell -# Install prerequistes -# 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile -# 2. gmake: Makefile in the headscale repo is written in GNU make syntax + ```shell + # Install prerequistes + # 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile + # 2. gmake: Makefile in the headscale repo is written in GNU make syntax -git clone https://github.com/juanfont/headscale.git + git clone https://github.com/juanfont/headscale.git -cd headscale + cd headscale -# optionally checkout a release -# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest -# option b. get latest tag, this may be a beta release -latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) + # optionally checkout a release + # option a. you can find official release at https://github.com/juanfont/headscale/releases/latest + # option b. get latest tag, this may be a beta release + latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) -git checkout $latestTag + git checkout $latestTag -make build GOOS=openbsd + make build GOOS=openbsd -# copy headscale to openbsd machine and put it in /usr/local/sbin -``` + # copy headscale to openbsd machine and put it in /usr/local/sbin + ``` ## Configure and run `headscale` 1. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database: -```shell -# Directory for configuration + ```shell + # Directory for configuration -mkdir -p /etc/headscale + mkdir -p /etc/headscale -# Directory for Database, and other variable data (like certificates) -mkdir -p /var/lib/headscale -``` + # Directory for database, and other variable data (like certificates) + mkdir -p /var/lib/headscale + ``` -2. Create an empty SQLite database: +1. Create a `headscale` configuration: -```shell -touch /var/lib/headscale/db.sqlite -``` + ```shell + touch /etc/headscale/config.yaml + ``` -3. Create a `headscale` configuration: +**(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. -```shell -touch /etc/headscale/config.yaml -``` +1. Start the headscale server: -**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. + ```shell + headscale serve + ``` -4. Start the headscale server: + This command will start `headscale` in the current terminal session. -```shell -headscale serve -``` + *** -This command will start `headscale` in the current terminal session. + To continue the tutorial, open a new terminal and let it run in the background. + Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux). ---- + To run `headscale` in the background, please follow the steps in the [rc.d section](#running-headscale-in-the-background-with-rcd) before continuing. -To continue the tutorial, open a new terminal and let it run in the background. -Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux). +1. Verify `headscale` is running: -To run `headscale` in the background, please follow the steps in the [rc.d section](#running-headscale-in-the-background-with-rcd) before continuing. + Verify `headscale` is available: -5. Verify `headscale` is running: + ```shell + curl http://127.0.0.1:9090/metrics + ``` -Verify `headscale` is available: +1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): -```shell -curl http://127.0.0.1:9090/metrics -``` - -6. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): - -```shell -headscale users create myfirstuser -``` + ```shell + headscale users create myfirstuser + ``` ### Register a machine (normal login) @@ -136,7 +129,7 @@ tailscale up --login-server YOUR_HEADSCALE_URL Register the machine: ```shell -headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY> +headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY> ``` ### Register machine using a pre authenticated key @@ -144,7 +137,7 @@ headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY> Generate a key using the command line: ```shell -headscale --user myfirstuser preauthkeys create --reusable --expiration 24h +headscale preauthkeys create --user myfirstuser --reusable --expiration 24h ``` This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command: @@ -159,51 +152,51 @@ This section demonstrates how to run `headscale` as a service in the background 1. Create a rc.d service at `/etc/rc.d/headscale` containing: -```shell -#!/bin/ksh + ```shell + #!/bin/ksh -daemon="/usr/local/sbin/headscale" -daemon_logger="daemon.info" -daemon_user="root" -daemon_flags="serve" -daemon_timeout=60 + daemon="/usr/local/sbin/headscale" + daemon_logger="daemon.info" + daemon_user="root" + daemon_flags="serve" + daemon_timeout=60 -. /etc/rc.d/rc.subr + . /etc/rc.d/rc.subr -rc_bg=YES -rc_reload=NO + rc_bg=YES + rc_reload=NO -rc_cmd $1 -``` + rc_cmd $1 + ``` -2. `/etc/rc.d/headscale` needs execute permission: +1. `/etc/rc.d/headscale` needs execute permission: -```shell -chmod a+x /etc/rc.d/headscale -``` + ```shell + chmod a+x /etc/rc.d/headscale + ``` -3. Start `headscale` service: +1. Start `headscale` service: -```shell -rcctl start headscale -``` + ```shell + rcctl start headscale + ``` -4. Make `headscale` service start at boot: +1. Make `headscale` service start at boot: -```shell -rcctl enable headscale -``` + ```shell + rcctl enable headscale + ``` -5. Verify the headscale service: +1. Verify the headscale service: -```shell -rcctl check headscale -``` + ```shell + rcctl check headscale + ``` -Verify `headscale` is available: + Verify `headscale` is available: -```shell -curl http://127.0.0.1:9090/metrics -``` + ```shell + curl http://127.0.0.1:9090/metrics + ``` -`headscale` will now run in the background and start at boot. + `headscale` will now run in the background and start at boot. diff --git a/docs/running-headscale-sealos.md b/docs/running-headscale-sealos.md new file mode 100644 index 00000000..52f5c7ec --- /dev/null +++ b/docs/running-headscale-sealos.md @@ -0,0 +1,136 @@ +# Running headscale on Sealos + +!!! warning "Community documentation" + + This page is not actively maintained by the headscale authors and is + written by community members. It is _not_ verified by `headscale` developers. + + **It might be outdated and it might miss necessary steps**. + +## Goal + +This documentation has the goal of showing a user how-to run `headscale` on Sealos. + +## Running headscale server + +1. Click the following prebuilt template: + + [![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dheadscale) + +2. Click "Deploy Application" on the template page to start deployment. Upon completion, two applications appear: Headscale, and its [visual interface](https://github.com/GoodiesHQ/headscale-admin). +3. Once deployment concludes, click 'Details' on the Headscale application page to navigate to the application's details. +4. Wait for the application's status to switch to running. For accessing the headscale server, the Public Address associated with port 8080 is the address of the headscale server. To access the Headscale console, simply append `/admin/` to the Headscale public URL. + + ![](./images/headscale-sealos-url.png) + +5. Click on 'Terminal' button on the right side of the details to access the Terminal of the headscale application. then create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): + + ```bash + headscale users create myfirstuser + ``` + +### Register a machine (normal login) + +On a client machine, execute the `tailscale` login command: + +```bash +# replace <YOUR_HEADSCALE_URL> with the public domain provided by Sealos +tailscale up --login-server YOUR_HEADSCALE_URL +``` + +To register a machine when running headscale in [Sealos](https://sealos.io), click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then take the headscale command: + +```bash +headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY> +``` + +### Register machine using a pre authenticated key + +click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then generate a key using the command line: + +```bash +headscale preauthkeys create --user myfirstuser --reusable --expiration 24h +``` + +This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command: + +```bash +tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY> +``` + +## Controlling headscale with remote CLI + +This documentation has the goal of showing a user how-to set control a headscale instance from a remote machine with the headscale command line binary. + +### Create an API key + +We need to create an API key to authenticate our remote headscale when using it from our workstation. + +To create a API key, click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then generate a key: + +```bash +headscale apikeys create --expiration 90d +``` + +Copy the output of the command and save it for later. Please note that you can not retrieve a key again, if the key is lost, expire the old one, and create a new key. + +To list the keys currently assosicated with the server: + +```bash +headscale apikeys list +``` + +and to expire a key: + +```bash +headscale apikeys expire --prefix "<PREFIX>" +``` + +### Download and configure `headscale` client + +1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases): + +2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale` + +3. Make `headscale` executable: + +```shell +chmod +x /usr/local/bin/headscale +``` + +4. Configure the CLI through Environment Variables + +```shell +export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:443" +export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>" +``` + +In the headscale application's detail page, The Public Address corresponding to port 50443 corresponds to the value of <HEADSCALE ADDRESS>. + +![](./images/headscale-sealos-grpc-url.png) + +for example: + +```shell +export HEADSCALE_CLI_ADDRESS="pwnjnnly.cloud.sealos.io:443" +export HEADSCALE_CLI_API_KEY="abcde12345" +``` + +This will tell the `headscale` binary to connect to a remote instance, instead of looking +for a local instance. + +The API key is needed to make sure that your are allowed to access the server. The key is _not_ +needed when running directly on the server, as the connection is local. + +1. Test the connection + +Let us run the headscale command to verify that we can connect by listing our nodes: + +```shell +headscale nodes list +``` + +You should now be able to see a list of your nodes from your workstation, and you can +now control the `headscale` server from your workstation. + +> Reference: [Headscale Deployment and Usage Guide: Mastering Tailscale's Self-Hosting Basics](https://icloudnative.io/en/posts/how-to-set-up-or-migrate-headscale/) diff --git a/docs/web-ui.md b/docs/web-ui.md index d018666e..fae71be1 100644 --- a/docs/web-ui.md +++ b/docs/web-ui.md @@ -5,10 +5,11 @@ This page contains community contributions. The projects listed here are not maintained by the Headscale authors and are written by community members. -| Name | Repository Link | Description | Status | -| --------------- | ------------------------------------------------------- | ------------------------------------------------------------------------- | ------ | -| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha | -| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha | -| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha | +| Name | Repository Link | Description | Status | +| --------------- | ------------------------------------------------------- | --------------------------------------------------------------------------- | ------ | +| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha | +| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha | +| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha | +| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for Headscale | Beta | You can ask for support on our dedicated [Discord channel](https://discord.com/channels/896711691637780480/1105842846386356294). diff --git a/docs/windows-client.md b/docs/windows-client.md index 38d330b0..66c47279 100644 --- a/docs/windows-client.md +++ b/docs/windows-client.md @@ -4,39 +4,41 @@ This documentation has the goal of showing how a user can use the official Windows [Tailscale](https://tailscale.com) client with `headscale`. -## Add registry keys +!!! info "Instructions on your headscale instance" -To make the Windows client behave as expected and to run well with `headscale`, two registry keys **must** be set: - -- `HKLM:\SOFTWARE\Tailscale IPN\UnattendedMode` must be set to `always` as a `string` type, to allow Tailscale to run properly in the background -- `HKLM:\SOFTWARE\Tailscale IPN\LoginURL` must be set to `<YOUR HEADSCALE URL>` as a `string` type, to ensure Tailscale contacts the correct control server. - -You can set these using the Windows Registry Editor: - -![windows-registry](./images/windows-registry.png) - -Or via the following Powershell commands (right click Powershell icon and select "Run as administrator"): - -``` -New-Item -Path "HKLM:\SOFTWARE\Tailscale IPN" -New-ItemProperty -Path 'HKLM:\Software\Tailscale IPN' -Name UnattendedMode -PropertyType String -Value always -New-ItemProperty -Path 'HKLM:\Software\Tailscale IPN' -Name LoginURL -PropertyType String -Value https://YOUR-HEADSCALE-URL -``` - -The Tailscale Windows client has been observed to reset its configuration on logout/reboot and these two keys [resolves that issue](https://github.com/tailscale/tailscale/issues/2798). - -For a guide on how to edit registry keys, [check out Computer Hope](https://www.computerhope.com/issues/ch001348.htm). + An endpoint with information on how to connect your Windows device + is also available at `/windows` on your running instance. ## Installation Download the [Official Windows Client](https://tailscale.com/download/windows) and install it. -When the installation has finished, start Tailscale and log in (you might have to click the icon in the system tray). +## Configuring the headscale URL -The log in should open a browser Window and direct you to your `headscale` instance. +Open a Command Prompt or Powershell and use Tailscale's login command to connect with your headscale instance (e.g +`https://headscale.example.com`): + +``` +tailscale login --login-server <YOUR_HEADSCALE_URL> +``` + +Follow the instructions in the opened browser window to finish the configuration. ## Troubleshooting +### Unattended mode + +By default, Tailscale's Windows client is only running when the user is logged in. If you want to keep Tailscale running +all the time, please enable "Unattended mode": + +- Click on the Tailscale tray icon and select `Preferences` +- Enable `Run unattended` +- Confirm the "Unattended mode" message + +See also [Keep Tailscale running when I'm not logged in to my computer](https://tailscale.com/kb/1088/run-unattended) + +### Failing node registration + If you are seeing repeated messages like: ``` @@ -53,8 +55,7 @@ This typically means that the registry keys above was not set appropriately. To reset and try again, it is important to do the following: -1. Ensure the registry keys from the previous guide is correctly set. -2. Shut down the Tailscale service (or the client running in the tray) -3. Delete Tailscale Application data folder, located at `C:\Users\<USERNAME>\AppData\Local\Tailscale` and try to connect again. -4. Ensure the Windows node is deleted from headscale (to ensure fresh setup) -5. Start Tailscale on the windows machine and retry the login. +1. Shut down the Tailscale service (or the client running in the tray) +2. Delete Tailscale Application data folder, located at `C:\Users\<USERNAME>\AppData\Local\Tailscale` and try to connect again. +3. Ensure the Windows node is deleted from headscale (to ensure fresh setup) +4. Start Tailscale on the Windows machine and retry the login. diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index f9e85ff3..00000000 --- a/examples/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Examples - -This directory contains examples on how to run `headscale` on different platforms. - -All examples are provided by the community and they are not verified by the `headscale` authors. diff --git a/examples/kustomize/.gitignore b/examples/kustomize/.gitignore deleted file mode 100644 index 229058d2..00000000 --- a/examples/kustomize/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/**/site -/**/secrets diff --git a/examples/kustomize/README.md b/examples/kustomize/README.md deleted file mode 100644 index cc57f147..00000000 --- a/examples/kustomize/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Deploying headscale on Kubernetes - -**Note:** This is contributed by the community and not verified by the headscale authors. - -This directory contains [Kustomize](https://kustomize.io) templates that deploy -headscale in various configurations. - -These templates currently support Rancher k3s. Other clusters may require -adaptation, especially around volume claims and ingress. - -Commands below assume this directory is your current working directory. - -# Generate secrets and site configuration - -Run `./init.bash` to generate keys, passwords, and site configuration files. - -Edit `base/site/public.env`, changing `public-hostname` to the public DNS name -that will be used for your headscale deployment. - -Set `public-proto` to "https" if you're planning to use TLS & Let's Encrypt. - -Configure DERP servers by editing `base/site/derp.yaml` if needed. - -# Add the image to the registry - -You'll somehow need to get `headscale:latest` into your cluster image registry. - -An easy way to do this with k3s: - -- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`) -- `docker build -t headscale:latest ..` from here - -# Create the namespace - -If it doesn't already exist, `kubectl create ns headscale`. - -# Deploy headscale - -## sqlite - -`kubectl -n headscale apply -k ./sqlite` - -## postgres - -`kubectl -n headscale apply -k ./postgres` - -# TLS & Let's Encrypt - -Test a staging certificate with your configured DNS name and Let's Encrypt. - -`kubectl -n headscale apply -k ./staging-tls` - -Replace with a production certificate. - -`kubectl -n headscale apply -k ./production-tls` - -## Static / custom TLS certificates - -Only Let's Encrypt is supported. If you need other TLS settings, modify or patch the ingress. - -# Administration - -Use the wrapper script to remotely operate headscale to perform administrative -tasks like creating namespaces, authkeys, etc. - -``` -[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash - -headscale is an open source implementation of the Tailscale control server - -https://github.com/juanfont/headscale - -Usage: - headscale [command] - -Available Commands: - help Help about any command - namespace Manage the namespaces of headscale - node Manage the nodes of headscale - preauthkey Handle the preauthkeys in headscale - routes Manage the routes of headscale - serve Launches the headscale server - version Print the version. - -Flags: - -h, --help help for headscale - -o, --output string Output format. Empty for human-readable, 'json' or 'json-line' - -Use "headscale [command] --help" for more information about a command. - -``` - -# TODO / Ideas - -- Interpolate `email:` option to the ClusterIssuer from site configuration. - This probably needs to be done with a transformer, kustomize vars don't seem to work. -- Add kustomize examples for cloud-native ingress, load balancer -- CockroachDB for the backend -- DERP server deployment -- Tor hidden service diff --git a/examples/kustomize/base/configmap.yaml b/examples/kustomize/base/configmap.yaml deleted file mode 100644 index 0ac2d563..00000000 --- a/examples/kustomize/base/configmap.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: headscale-config -data: - server_url: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME) - listen_addr: "0.0.0.0:8080" - metrics_listen_addr: "127.0.0.1:9090" - ephemeral_node_inactivity_timeout: "30m" diff --git a/examples/kustomize/base/ingress.yaml b/examples/kustomize/base/ingress.yaml deleted file mode 100644 index 51da3427..00000000 --- a/examples/kustomize/base/ingress.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: headscale - annotations: - kubernetes.io/ingress.class: traefik -spec: - rules: - - host: $(PUBLIC_HOSTNAME) - http: - paths: - - backend: - service: - name: headscale - port: - number: 8080 - path: / - pathType: Prefix diff --git a/examples/kustomize/base/kustomization.yaml b/examples/kustomize/base/kustomization.yaml deleted file mode 100644 index 93278f7d..00000000 --- a/examples/kustomize/base/kustomization.yaml +++ /dev/null @@ -1,42 +0,0 @@ -namespace: headscale -resources: - - configmap.yaml - - ingress.yaml - - service.yaml -generatorOptions: - disableNameSuffixHash: true -configMapGenerator: - - name: headscale-site - files: - - derp.yaml=site/derp.yaml - envs: - - site/public.env - - name: headscale-etc - literals: - - config.json={} -secretGenerator: - - name: headscale - files: - - secrets/private-key -vars: - - name: PUBLIC_PROTO - objRef: - kind: ConfigMap - name: headscale-site - apiVersion: v1 - fieldRef: - fieldPath: data.public-proto - - name: PUBLIC_HOSTNAME - objRef: - kind: ConfigMap - name: headscale-site - apiVersion: v1 - fieldRef: - fieldPath: data.public-hostname - - name: CONTACT_EMAIL - objRef: - kind: ConfigMap - name: headscale-site - apiVersion: v1 - fieldRef: - fieldPath: data.contact-email diff --git a/examples/kustomize/base/service.yaml b/examples/kustomize/base/service.yaml deleted file mode 100644 index 39e67253..00000000 --- a/examples/kustomize/base/service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: headscale - labels: - app: headscale -spec: - selector: - app: headscale - ports: - - name: http - targetPort: http - port: 8080 diff --git a/examples/kustomize/headscale.bash b/examples/kustomize/headscale.bash deleted file mode 100755 index 66bfe92c..00000000 --- a/examples/kustomize/headscale.bash +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -set -eu -exec kubectl -n headscale exec -ti pod/headscale-0 -- /go/bin/headscale "$@" diff --git a/examples/kustomize/init.bash b/examples/kustomize/init.bash deleted file mode 100755 index e5b7965c..00000000 --- a/examples/kustomize/init.bash +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -eux -cd $(dirname $0) - -umask 022 -mkdir -p base/site/ -[ ! -e base/site/public.env ] && ( - cat >base/site/public.env <<EOF -public-hostname=localhost -public-proto=http -contact-email=headscale@example.com -EOF -) -[ ! -e base/site/derp.yaml ] && cp ../derp.yaml base/site/derp.yaml - -umask 077 -mkdir -p base/secrets/ -[ ! -e base/secrets/private-key ] && ( - wg genkey > base/secrets/private-key -) -mkdir -p postgres/secrets/ -[ ! -e postgres/secrets/password ] && (head -c 32 /dev/urandom | base64 -w0 > postgres/secrets/password) diff --git a/examples/kustomize/install-cert-manager.bash b/examples/kustomize/install-cert-manager.bash deleted file mode 100755 index 1a5ecacb..00000000 --- a/examples/kustomize/install-cert-manager.bash +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -set -eux -kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.4.0/cert-manager.yaml diff --git a/examples/kustomize/postgres/deployment.yaml b/examples/kustomize/postgres/deployment.yaml deleted file mode 100644 index 1dd88b41..00000000 --- a/examples/kustomize/postgres/deployment.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: headscale -spec: - replicas: 2 - selector: - matchLabels: - app: headscale - template: - metadata: - labels: - app: headscale - spec: - containers: - - name: headscale - image: "headscale:latest" - imagePullPolicy: IfNotPresent - command: ["/go/bin/headscale", "serve"] - env: - - name: SERVER_URL - value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME) - - name: LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: listen_addr - - name: METRICS_LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: metrics_listen_addr - - name: DERP_MAP_PATH - value: /vol/config/derp.yaml - - name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT - valueFrom: - configMapKeyRef: - name: headscale-config - key: ephemeral_node_inactivity_timeout - - name: DB_TYPE - value: postgres - - name: DB_HOST - value: postgres.headscale.svc.cluster.local - - name: DB_PORT - value: "5432" - - name: DB_USER - value: headscale - - name: DB_PASS - valueFrom: - secretKeyRef: - name: postgresql - key: password - - name: DB_NAME - value: headscale - ports: - - name: http - protocol: TCP - containerPort: 8080 - livenessProbe: - tcpSocket: - port: http - initialDelaySeconds: 30 - timeoutSeconds: 5 - periodSeconds: 15 - volumeMounts: - - name: config - mountPath: /vol/config - - name: secret - mountPath: /vol/secret - - name: etc - mountPath: /etc/headscale - volumes: - - name: config - configMap: - name: headscale-site - - name: etc - configMap: - name: headscale-etc - - name: secret - secret: - secretName: headscale diff --git a/examples/kustomize/postgres/kustomization.yaml b/examples/kustomize/postgres/kustomization.yaml deleted file mode 100644 index e732e3b9..00000000 --- a/examples/kustomize/postgres/kustomization.yaml +++ /dev/null @@ -1,13 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - deployment.yaml - - postgres-service.yaml - - postgres-statefulset.yaml -generatorOptions: - disableNameSuffixHash: true -secretGenerator: - - name: postgresql - files: - - secrets/password diff --git a/examples/kustomize/postgres/postgres-service.yaml b/examples/kustomize/postgres/postgres-service.yaml deleted file mode 100644 index 6252e7f9..00000000 --- a/examples/kustomize/postgres/postgres-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: postgres -spec: - selector: - app: postgres - ports: - - name: postgres - targetPort: postgres - port: 5432 diff --git a/examples/kustomize/postgres/postgres-statefulset.yaml b/examples/kustomize/postgres/postgres-statefulset.yaml deleted file mode 100644 index b81c9bf0..00000000 --- a/examples/kustomize/postgres/postgres-statefulset.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: postgres -spec: - serviceName: postgres - replicas: 1 - selector: - matchLabels: - app: postgres - template: - metadata: - labels: - app: postgres - spec: - containers: - - name: postgres - image: "postgres:13" - imagePullPolicy: IfNotPresent - env: - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: postgresql - key: password - - name: POSTGRES_USER - value: headscale - ports: - - name: postgres - protocol: TCP - containerPort: 5432 - livenessProbe: - tcpSocket: - port: 5432 - initialDelaySeconds: 30 - timeoutSeconds: 5 - periodSeconds: 15 - volumeMounts: - - name: pgdata - mountPath: /var/lib/postgresql/data - volumeClaimTemplates: - - metadata: - name: pgdata - spec: - storageClassName: local-path - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi diff --git a/examples/kustomize/production-tls/ingress-patch.yaml b/examples/kustomize/production-tls/ingress-patch.yaml deleted file mode 100644 index 9e6177fb..00000000 --- a/examples/kustomize/production-tls/ingress-patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Ingress -metadata: - name: headscale - annotations: - cert-manager.io/cluster-issuer: letsencrypt-production - traefik.ingress.kubernetes.io/router.tls: "true" -spec: - tls: - - hosts: - - $(PUBLIC_HOSTNAME) - secretName: production-cert diff --git a/examples/kustomize/production-tls/kustomization.yaml b/examples/kustomize/production-tls/kustomization.yaml deleted file mode 100644 index d3147f5f..00000000 --- a/examples/kustomize/production-tls/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - production-issuer.yaml -patches: - - path: ingress-patch.yaml - target: - kind: Ingress diff --git a/examples/kustomize/production-tls/production-issuer.yaml b/examples/kustomize/production-tls/production-issuer.yaml deleted file mode 100644 index f436090b..00000000 --- a/examples/kustomize/production-tls/production-issuer.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-production -spec: - acme: - # TODO: figure out how to get kustomize to interpolate this, or use a transformer - #email: $(CONTACT_EMAIL) - server: https://acme-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource used to store the account's private key. - name: letsencrypt-production-acc-key - solvers: - - http01: - ingress: - class: traefik diff --git a/examples/kustomize/sqlite/kustomization.yaml b/examples/kustomize/sqlite/kustomization.yaml deleted file mode 100644 index ca799419..00000000 --- a/examples/kustomize/sqlite/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - statefulset.yaml diff --git a/examples/kustomize/sqlite/statefulset.yaml b/examples/kustomize/sqlite/statefulset.yaml deleted file mode 100644 index 2321d39d..00000000 --- a/examples/kustomize/sqlite/statefulset.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: headscale -spec: - serviceName: headscale - replicas: 1 - selector: - matchLabels: - app: headscale - template: - metadata: - labels: - app: headscale - spec: - containers: - - name: headscale - image: "headscale:latest" - imagePullPolicy: IfNotPresent - command: ["/go/bin/headscale", "serve"] - env: - - name: SERVER_URL - value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME) - - name: LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: listen_addr - - name: METRICS_LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: metrics_listen_addr - - name: DERP_MAP_PATH - value: /vol/config/derp.yaml - - name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT - valueFrom: - configMapKeyRef: - name: headscale-config - key: ephemeral_node_inactivity_timeout - - name: DB_TYPE - value: sqlite3 - - name: DB_PATH - value: /vol/data/db.sqlite - ports: - - name: http - protocol: TCP - containerPort: 8080 - livenessProbe: - tcpSocket: - port: http - initialDelaySeconds: 30 - timeoutSeconds: 5 - periodSeconds: 15 - volumeMounts: - - name: config - mountPath: /vol/config - - name: data - mountPath: /vol/data - - name: secret - mountPath: /vol/secret - - name: etc - mountPath: /etc/headscale - volumes: - - name: config - configMap: - name: headscale-site - - name: etc - configMap: - name: headscale-etc - - name: secret - secret: - secretName: headscale - volumeClaimTemplates: - - metadata: - name: data - spec: - storageClassName: local-path - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi diff --git a/examples/kustomize/staging-tls/ingress-patch.yaml b/examples/kustomize/staging-tls/ingress-patch.yaml deleted file mode 100644 index 5a1daf0c..00000000 --- a/examples/kustomize/staging-tls/ingress-patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Ingress -metadata: - name: headscale - annotations: - cert-manager.io/cluster-issuer: letsencrypt-staging - traefik.ingress.kubernetes.io/router.tls: "true" -spec: - tls: - - hosts: - - $(PUBLIC_HOSTNAME) - secretName: staging-cert diff --git a/examples/kustomize/staging-tls/kustomization.yaml b/examples/kustomize/staging-tls/kustomization.yaml deleted file mode 100644 index 0900c583..00000000 --- a/examples/kustomize/staging-tls/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - staging-issuer.yaml -patches: - - path: ingress-patch.yaml - target: - kind: Ingress diff --git a/examples/kustomize/staging-tls/staging-issuer.yaml b/examples/kustomize/staging-tls/staging-issuer.yaml deleted file mode 100644 index cf290415..00000000 --- a/examples/kustomize/staging-tls/staging-issuer.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-staging -spec: - acme: - # TODO: figure out how to get kustomize to interpolate this, or use a transformer - #email: $(CONTACT_EMAIL) - server: https://acme-staging-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource used to store the account's private key. - name: letsencrypt-staging-acc-key - solvers: - - http01: - ingress: - class: traefik diff --git a/flake.lock b/flake.lock index 307d6339..9b66e4e0 100644 --- a/flake.lock +++ b/flake.lock @@ -5,11 +5,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1707451808, - "narHash": "sha256-UwDBUNHNRsYKFJzyTMVMTF5qS4xeJlWoeyJf+6vvamU=", + "lastModified": 1725534445, + "narHash": "sha256-Yd0FK9SkWy+ZPuNqUgmVPXokxDgMJoGuNpMEtkfcf84=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "442d407992384ed9c0e6d352de75b69079904e4e", + "rev": "9bb1e7571aadf31ddb4af77fc64b2d59580f9a39", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 4eae1d52..79dd58e8 100644 --- a/flake.nix +++ b/flake.nix @@ -20,8 +20,9 @@ { overlay = _: prev: let pkgs = nixpkgs.legacyPackages.${prev.system}; + buildGo = pkgs.buildGo123Module; in rec { - headscale = pkgs.buildGo122Module rec { + headscale = buildGo rec { pname = "headscale"; version = headscaleVersion; src = pkgs.lib.cleanSource self; @@ -31,30 +32,52 @@ # When updating go.mod or go.sum, a new sha will need to be calculated, # update this if you have a mismatch after doing a change to thos files. - vendorHash = "sha256-Yb5WaN0abPLZ4mPnuJGZoj6EMfoZjaZZ0f344KWva3o="; + vendorHash = "sha256-+8dOxPG/Q+wuHgRwwWqdphHOuop0W9dVyClyQuh7aRc="; subPackages = ["cmd/headscale"]; ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"]; }; - protoc-gen-grpc-gateway = pkgs.buildGoModule rec { + protoc-gen-grpc-gateway = buildGo rec { pname = "grpc-gateway"; - version = "2.19.1"; + version = "2.22.0"; src = pkgs.fetchFromGitHub { owner = "grpc-ecosystem"; repo = "grpc-gateway"; rev = "v${version}"; - sha256 = "sha256-CdGQpQfOSimeio8v1lZ7xzE/oAS2qFyu+uN+H9i7vpo="; + sha256 = "sha256-I1w3gfV06J8xG1xJ+XuMIGkV2/Ofszo7SCC+z4Xb6l4="; }; - vendorHash = "sha256-no7kZGpf/VOuceC3J+izGFQp5aMS3b+Rn+x4BFZ2zgs="; + vendorHash = "sha256-S4hcD5/BSGxM2qdJHMxOkxsJ5+Ks6m4lKHSS9+yZ17c="; nativeBuildInputs = [pkgs.installShellFiles]; subPackages = ["protoc-gen-grpc-gateway" "protoc-gen-openapiv2"]; }; + + # Upstream does not override buildGoModule properly, + # importing a specific module, so comment out for now. + # golangci-lint = prev.golangci-lint.override { + # buildGoModule = buildGo; + # }; + + goreleaser = prev.goreleaser.override { + buildGoModule = buildGo; + }; + + gotestsum = prev.gotestsum.override { + buildGoModule = buildGo; + }; + + gotests = prev.gotests.override { + buildGoModule = buildGo; + }; + + gofumpt = prev.gofumpt.override { + buildGoModule = buildGo; + }; }; } // flake-utils.lib.eachDefaultSystem @@ -63,7 +86,7 @@ overlays = [self.overlay]; inherit system; }; - buildDeps = with pkgs; [git go_1_22 gnumake]; + buildDeps = with pkgs; [git go_1_23 gnumake]; devDeps = with pkgs; buildDeps ++ [ @@ -74,6 +97,7 @@ nfpm gotestsum gotests + gofumpt ksh ko yq-go diff --git a/gen/go/headscale/v1/apikey.pb.go b/gen/go/headscale/v1/apikey.pb.go index d1a5f555..e6263522 100644 --- a/gen/go/headscale/v1/apikey.pb.go +++ b/gen/go/headscale/v1/apikey.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/apikey.proto @@ -512,7 +512,7 @@ func file_headscale_v1_apikey_proto_rawDescGZIP() []byte { } var file_headscale_v1_apikey_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_headscale_v1_apikey_proto_goTypes = []interface{}{ +var file_headscale_v1_apikey_proto_goTypes = []any{ (*ApiKey)(nil), // 0: headscale.v1.ApiKey (*CreateApiKeyRequest)(nil), // 1: headscale.v1.CreateApiKeyRequest (*CreateApiKeyResponse)(nil), // 2: headscale.v1.CreateApiKeyResponse @@ -543,7 +543,7 @@ func file_headscale_v1_apikey_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_headscale_v1_apikey_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ApiKey); i { case 0: return &v.state @@ -555,7 +555,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*CreateApiKeyRequest); i { case 0: return &v.state @@ -567,7 +567,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CreateApiKeyResponse); i { case 0: return &v.state @@ -579,7 +579,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExpireApiKeyRequest); i { case 0: return &v.state @@ -591,7 +591,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ExpireApiKeyResponse); i { case 0: return &v.state @@ -603,7 +603,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListApiKeysRequest); i { case 0: return &v.state @@ -615,7 +615,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListApiKeysResponse); i { case 0: return &v.state @@ -627,7 +627,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteApiKeyRequest); i { case 0: return &v.state @@ -639,7 +639,7 @@ func file_headscale_v1_apikey_proto_init() { return nil } } - file_headscale_v1_apikey_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_apikey_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteApiKeyResponse); i { case 0: return &v.state diff --git a/gen/go/headscale/v1/device.pb.go b/gen/go/headscale/v1/device.pb.go index 40e2e24f..66c31441 100644 --- a/gen/go/headscale/v1/device.pb.go +++ b/gen/go/headscale/v1/device.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/device.proto @@ -925,7 +925,7 @@ func file_headscale_v1_device_proto_rawDescGZIP() []byte { } var file_headscale_v1_device_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_headscale_v1_device_proto_goTypes = []interface{}{ +var file_headscale_v1_device_proto_goTypes = []any{ (*Latency)(nil), // 0: headscale.v1.Latency (*ClientSupports)(nil), // 1: headscale.v1.ClientSupports (*ClientConnectivity)(nil), // 2: headscale.v1.ClientConnectivity @@ -961,7 +961,7 @@ func file_headscale_v1_device_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_headscale_v1_device_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Latency); i { case 0: return &v.state @@ -973,7 +973,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSupports); i { case 0: return &v.state @@ -985,7 +985,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ClientConnectivity); i { case 0: return &v.state @@ -997,7 +997,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetDeviceRequest); i { case 0: return &v.state @@ -1009,7 +1009,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetDeviceResponse); i { case 0: return &v.state @@ -1021,7 +1021,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DeleteDeviceRequest); i { case 0: return &v.state @@ -1033,7 +1033,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*DeleteDeviceResponse); i { case 0: return &v.state @@ -1045,7 +1045,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*GetDeviceRoutesRequest); i { case 0: return &v.state @@ -1057,7 +1057,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*GetDeviceRoutesResponse); i { case 0: return &v.state @@ -1069,7 +1069,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*EnableDeviceRoutesRequest); i { case 0: return &v.state @@ -1081,7 +1081,7 @@ func file_headscale_v1_device_proto_init() { return nil } } - file_headscale_v1_device_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_device_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*EnableDeviceRoutesResponse); i { case 0: return &v.state diff --git a/gen/go/headscale/v1/headscale.pb.go b/gen/go/headscale/v1/headscale.pb.go index b1af2fa5..d6751864 100644 --- a/gen/go/headscale/v1/headscale.pb.go +++ b/gen/go/headscale/v1/headscale.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/headscale.proto @@ -36,205 +36,228 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{ 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x32, 0xfd, 0x17, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, - 0x72, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x68, 0x0a, 0x0a, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, - 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, - 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x11, 0x3a, 0x01, 0x2a, 0x22, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, - 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x82, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xcf, 0x1a, 0x0a, + 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x63, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x68, + 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x15, 0x12, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x68, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, - 0x29, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6f, - 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, - 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6c, 0x0a, 0x0a, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, - 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, - 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x15, 0x2a, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, - 0x72, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, - 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, - 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, - 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, - 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, - 0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, - 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, - 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, - 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, - 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x22, 0x19, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, - 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c, 0x69, 0x73, - 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, - 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x7d, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, - 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, - 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, - 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, - 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x66, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x12, - 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, - 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, - 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x6e, 0x0a, 0x07, - 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, - 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x74, 0x0a, 0x0c, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x15, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, - 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x69, 0x64, 0x7d, 0x12, 0x76, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, - 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1d, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x0a, - 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, - 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, - 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, - 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, - 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, - 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, - 0x6f, 0x64, 0x65, 0x12, 0x6e, 0x0a, 0x08, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, - 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, - 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, - 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, - 0x73, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x3a, + 0x01, 0x2a, 0x22, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, + 0x12, 0x82, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, + 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22, 0x29, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6c, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x2a, 0x13, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, + 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, + 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x87, 0x01, 0x0a, 0x10, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, + 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, + 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, + 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, + 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, + 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, + 0x12, 0x7d, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, + 0x66, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, + 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x6e, 0x0a, 0x07, 0x53, 0x65, 0x74, 0x54, 0x61, + 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, + 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x74, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, + 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, + 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, + 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, + 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, + 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, + 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x76, + 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, + 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, + 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, + 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, + 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, + 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, + 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x6e, + 0x0a, 0x08, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1d, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x1d, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, + 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x69, 0x70, + 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, + 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, - 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, + 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, - 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, - 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, - 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, - 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, - 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65, - 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, + 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, + 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, - 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, - 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, + 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, + 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, + 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, + 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, - 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, - 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, - 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, + 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, + 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, - 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, - 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x7d, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, - 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, + 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x7d, 0x12, + 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68, + 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, + 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x67, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x1a, 0x0e, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x29, + 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, + 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -var file_headscale_v1_headscale_proto_goTypes = []interface{}{ +var file_headscale_v1_headscale_proto_goTypes = []any{ (*GetUserRequest)(nil), // 0: headscale.v1.GetUserRequest (*CreateUserRequest)(nil), // 1: headscale.v1.CreateUserRequest (*RenameUserRequest)(nil), // 2: headscale.v1.RenameUserRequest @@ -252,41 +275,47 @@ var file_headscale_v1_headscale_proto_goTypes = []interface{}{ (*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest (*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest (*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest - (*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest - (*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest - (*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest - (*GetNodeRoutesRequest)(nil), // 20: headscale.v1.GetNodeRoutesRequest - (*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest - (*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest - (*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest - (*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest - (*DeleteApiKeyRequest)(nil), // 25: headscale.v1.DeleteApiKeyRequest - (*GetUserResponse)(nil), // 26: headscale.v1.GetUserResponse - (*CreateUserResponse)(nil), // 27: headscale.v1.CreateUserResponse - (*RenameUserResponse)(nil), // 28: headscale.v1.RenameUserResponse - (*DeleteUserResponse)(nil), // 29: headscale.v1.DeleteUserResponse - (*ListUsersResponse)(nil), // 30: headscale.v1.ListUsersResponse - (*CreatePreAuthKeyResponse)(nil), // 31: headscale.v1.CreatePreAuthKeyResponse - (*ExpirePreAuthKeyResponse)(nil), // 32: headscale.v1.ExpirePreAuthKeyResponse - (*ListPreAuthKeysResponse)(nil), // 33: headscale.v1.ListPreAuthKeysResponse - (*DebugCreateNodeResponse)(nil), // 34: headscale.v1.DebugCreateNodeResponse - (*GetNodeResponse)(nil), // 35: headscale.v1.GetNodeResponse - (*SetTagsResponse)(nil), // 36: headscale.v1.SetTagsResponse - (*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse - (*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse - (*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse - (*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse - (*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse - (*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse - (*GetRoutesResponse)(nil), // 43: headscale.v1.GetRoutesResponse - (*EnableRouteResponse)(nil), // 44: headscale.v1.EnableRouteResponse - (*DisableRouteResponse)(nil), // 45: headscale.v1.DisableRouteResponse - (*GetNodeRoutesResponse)(nil), // 46: headscale.v1.GetNodeRoutesResponse - (*DeleteRouteResponse)(nil), // 47: headscale.v1.DeleteRouteResponse - (*CreateApiKeyResponse)(nil), // 48: headscale.v1.CreateApiKeyResponse - (*ExpireApiKeyResponse)(nil), // 49: headscale.v1.ExpireApiKeyResponse - (*ListApiKeysResponse)(nil), // 50: headscale.v1.ListApiKeysResponse - (*DeleteApiKeyResponse)(nil), // 51: headscale.v1.DeleteApiKeyResponse + (*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest + (*GetRoutesRequest)(nil), // 18: headscale.v1.GetRoutesRequest + (*EnableRouteRequest)(nil), // 19: headscale.v1.EnableRouteRequest + (*DisableRouteRequest)(nil), // 20: headscale.v1.DisableRouteRequest + (*GetNodeRoutesRequest)(nil), // 21: headscale.v1.GetNodeRoutesRequest + (*DeleteRouteRequest)(nil), // 22: headscale.v1.DeleteRouteRequest + (*CreateApiKeyRequest)(nil), // 23: headscale.v1.CreateApiKeyRequest + (*ExpireApiKeyRequest)(nil), // 24: headscale.v1.ExpireApiKeyRequest + (*ListApiKeysRequest)(nil), // 25: headscale.v1.ListApiKeysRequest + (*DeleteApiKeyRequest)(nil), // 26: headscale.v1.DeleteApiKeyRequest + (*GetPolicyRequest)(nil), // 27: headscale.v1.GetPolicyRequest + (*SetPolicyRequest)(nil), // 28: headscale.v1.SetPolicyRequest + (*GetUserResponse)(nil), // 29: headscale.v1.GetUserResponse + (*CreateUserResponse)(nil), // 30: headscale.v1.CreateUserResponse + (*RenameUserResponse)(nil), // 31: headscale.v1.RenameUserResponse + (*DeleteUserResponse)(nil), // 32: headscale.v1.DeleteUserResponse + (*ListUsersResponse)(nil), // 33: headscale.v1.ListUsersResponse + (*CreatePreAuthKeyResponse)(nil), // 34: headscale.v1.CreatePreAuthKeyResponse + (*ExpirePreAuthKeyResponse)(nil), // 35: headscale.v1.ExpirePreAuthKeyResponse + (*ListPreAuthKeysResponse)(nil), // 36: headscale.v1.ListPreAuthKeysResponse + (*DebugCreateNodeResponse)(nil), // 37: headscale.v1.DebugCreateNodeResponse + (*GetNodeResponse)(nil), // 38: headscale.v1.GetNodeResponse + (*SetTagsResponse)(nil), // 39: headscale.v1.SetTagsResponse + (*RegisterNodeResponse)(nil), // 40: headscale.v1.RegisterNodeResponse + (*DeleteNodeResponse)(nil), // 41: headscale.v1.DeleteNodeResponse + (*ExpireNodeResponse)(nil), // 42: headscale.v1.ExpireNodeResponse + (*RenameNodeResponse)(nil), // 43: headscale.v1.RenameNodeResponse + (*ListNodesResponse)(nil), // 44: headscale.v1.ListNodesResponse + (*MoveNodeResponse)(nil), // 45: headscale.v1.MoveNodeResponse + (*BackfillNodeIPsResponse)(nil), // 46: headscale.v1.BackfillNodeIPsResponse + (*GetRoutesResponse)(nil), // 47: headscale.v1.GetRoutesResponse + (*EnableRouteResponse)(nil), // 48: headscale.v1.EnableRouteResponse + (*DisableRouteResponse)(nil), // 49: headscale.v1.DisableRouteResponse + (*GetNodeRoutesResponse)(nil), // 50: headscale.v1.GetNodeRoutesResponse + (*DeleteRouteResponse)(nil), // 51: headscale.v1.DeleteRouteResponse + (*CreateApiKeyResponse)(nil), // 52: headscale.v1.CreateApiKeyResponse + (*ExpireApiKeyResponse)(nil), // 53: headscale.v1.ExpireApiKeyResponse + (*ListApiKeysResponse)(nil), // 54: headscale.v1.ListApiKeysResponse + (*DeleteApiKeyResponse)(nil), // 55: headscale.v1.DeleteApiKeyResponse + (*GetPolicyResponse)(nil), // 56: headscale.v1.GetPolicyResponse + (*SetPolicyResponse)(nil), // 57: headscale.v1.SetPolicyResponse } var file_headscale_v1_headscale_proto_depIdxs = []int32{ 0, // 0: headscale.v1.HeadscaleService.GetUser:input_type -> headscale.v1.GetUserRequest @@ -306,43 +335,49 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{ 14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest 15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest 16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest - 17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest - 18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest - 19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest - 20, // 20: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest - 21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest - 22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest - 23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest - 24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest - 25, // 25: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest - 26, // 26: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse - 27, // 27: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse - 28, // 28: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse - 29, // 29: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse - 30, // 30: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse - 31, // 31: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse - 32, // 32: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse - 33, // 33: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse - 34, // 34: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse - 35, // 35: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse - 36, // 36: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse - 37, // 37: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse - 38, // 38: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse - 39, // 39: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse - 40, // 40: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse - 41, // 41: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse - 42, // 42: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse - 43, // 43: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse - 44, // 44: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse - 45, // 45: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse - 46, // 46: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse - 47, // 47: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse - 48, // 48: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse - 49, // 49: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse - 50, // 50: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse - 51, // 51: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse - 26, // [26:52] is the sub-list for method output_type - 0, // [0:26] is the sub-list for method input_type + 17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest + 18, // 18: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest + 19, // 19: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest + 20, // 20: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest + 21, // 21: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest + 22, // 22: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest + 23, // 23: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest + 24, // 24: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest + 25, // 25: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest + 26, // 26: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest + 27, // 27: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest + 28, // 28: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest + 29, // 29: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse + 30, // 30: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse + 31, // 31: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse + 32, // 32: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse + 33, // 33: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse + 34, // 34: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse + 35, // 35: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse + 36, // 36: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse + 37, // 37: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse + 38, // 38: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse + 39, // 39: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse + 40, // 40: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse + 41, // 41: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse + 42, // 42: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse + 43, // 43: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse + 44, // 44: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse + 45, // 45: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse + 46, // 46: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse + 47, // 47: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse + 48, // 48: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse + 49, // 49: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse + 50, // 50: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse + 51, // 51: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse + 52, // 52: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse + 53, // 53: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse + 54, // 54: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse + 55, // 55: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse + 56, // 56: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse + 57, // 57: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse + 29, // [29:58] is the sub-list for method output_type + 0, // [0:29] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -358,6 +393,7 @@ func file_headscale_v1_headscale_proto_init() { file_headscale_v1_node_proto_init() file_headscale_v1_routes_proto_init() file_headscale_v1_apikey_proto_init() + file_headscale_v1_policy_proto_init() type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/gen/go/headscale/v1/headscale.pb.gw.go b/gen/go/headscale/v1/headscale.pb.gw.go index b46f383b..59a98ce3 100644 --- a/gen/go/headscale/v1/headscale.pb.gw.go +++ b/gen/go/headscale/v1/headscale.pb.gw.go @@ -795,6 +795,42 @@ func local_request_HeadscaleService_MoveNode_0(ctx context.Context, marshaler ru } +var ( + filter_HeadscaleService_BackfillNodeIPs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackfillNodeIPsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BackfillNodeIPs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackfillNodeIPsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BackfillNodeIPs(ctx, &protoReq) + return msg, metadata, err + +} + func request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetRoutesRequest var metadata runtime.ServerMetadata @@ -1143,10 +1179,55 @@ func local_request_HeadscaleService_DeleteApiKey_0(ctx context.Context, marshale } +func request_HeadscaleService_GetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPolicyRequest + var metadata runtime.ServerMetadata + + msg, err := client.GetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_HeadscaleService_GetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetPolicyRequest + var metadata runtime.ServerMetadata + + msg, err := server.GetPolicy(ctx, &protoReq) + return msg, metadata, err + +} + +func request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetPolicyRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.SetPolicy(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_HeadscaleService_SetPolicy_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetPolicyRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.SetPolicy(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterHeadscaleServiceHandlerServer registers the http handlers for service HeadscaleService to "mux". // UnaryRPC :call HeadscaleServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterHeadscaleServiceHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server HeadscaleServiceServer) error { mux.Handle("GET", pattern_HeadscaleService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -1574,6 +1655,31 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser }) + mux.Handle("POST", pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/BackfillNodeIPs", runtime.WithHTTPPathPattern("/api/v1/node/backfillips")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1799,27 +1905,77 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser }) + mux.Handle("GET", pattern_HeadscaleService_GetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetPolicy", runtime.WithHTTPPathPattern("/api/v1/policy")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_HeadscaleService_GetPolicy_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_GetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_HeadscaleService_SetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetPolicy", runtime.WithHTTPPathPattern("/api/v1/policy")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_HeadscaleService_SetPolicy_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } // RegisterHeadscaleServiceHandlerFromEndpoint is same as RegisterHeadscaleServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterHeadscaleServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.DialContext(ctx, endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -1837,7 +1993,7 @@ func RegisterHeadscaleServiceHandler(ctx context.Context, mux *runtime.ServeMux, // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "HeadscaleServiceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "HeadscaleServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "HeadscaleServiceClient" to call the correct interceptors. +// "HeadscaleServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client HeadscaleServiceClient) error { mux.Handle("GET", pattern_HeadscaleService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2214,6 +2370,28 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser }) + mux.Handle("POST", pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/BackfillNodeIPs", runtime.WithHTTPPathPattern("/api/v1/node/backfillips")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2412,6 +2590,50 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser }) + mux.Handle("GET", pattern_HeadscaleService_GetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetPolicy", runtime.WithHTTPPathPattern("/api/v1/policy")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_HeadscaleService_GetPolicy_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_GetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_HeadscaleService_SetPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetPolicy", runtime.WithHTTPPathPattern("/api/v1/policy")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_HeadscaleService_SetPolicy_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_SetPolicy_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -2450,6 +2672,8 @@ var ( pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, "")) + pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, "")) + pattern_HeadscaleService_GetRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "routes"}, "")) pattern_HeadscaleService_EnableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "enable"}, "")) @@ -2467,6 +2691,10 @@ var ( pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, "")) pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, "")) + + pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, "")) + + pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, "")) ) var ( @@ -2504,6 +2732,8 @@ var ( forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage + forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage + forward_HeadscaleService_GetRoutes_0 = runtime.ForwardResponseMessage forward_HeadscaleService_EnableRoute_0 = runtime.ForwardResponseMessage @@ -2521,4 +2751,8 @@ var ( forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage + + forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage + + forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage ) diff --git a/gen/go/headscale/v1/headscale_grpc.pb.go b/gen/go/headscale/v1/headscale_grpc.pb.go index 0d731adc..d57aa92e 100644 --- a/gen/go/headscale/v1/headscale_grpc.pb.go +++ b/gen/go/headscale/v1/headscale_grpc.pb.go @@ -36,6 +36,7 @@ const ( HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode" HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes" HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode" + HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs" HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes" HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute" HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute" @@ -45,6 +46,8 @@ const ( HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey" HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys" HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey" + HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy" + HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy" ) // HeadscaleServiceClient is the client API for HeadscaleService service. @@ -71,6 +74,7 @@ type HeadscaleServiceClient interface { RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) + BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) // --- Route start --- GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) @@ -82,6 +86,9 @@ type HeadscaleServiceClient interface { ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) + // --- Policy start --- + GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) + SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) } type headscaleServiceClient struct { @@ -245,6 +252,15 @@ func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeReque return out, nil } +func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) { + out := new(BackfillNodeIPsResponse) + err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) { out := new(GetRoutesResponse) err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...) @@ -326,6 +342,24 @@ func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApi return out, nil } +func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) { + out := new(GetPolicyResponse) + err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) { + out := new(SetPolicyResponse) + err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // HeadscaleServiceServer is the server API for HeadscaleService service. // All implementations must embed UnimplementedHeadscaleServiceServer // for forward compatibility @@ -350,6 +384,7 @@ type HeadscaleServiceServer interface { RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) + BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) // --- Route start --- GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error) @@ -361,6 +396,9 @@ type HeadscaleServiceServer interface { ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) + // --- Policy start --- + GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error) + SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) mustEmbedUnimplementedHeadscaleServiceServer() } @@ -419,6 +457,9 @@ func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodes func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method MoveNode not implemented") } +func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented") +} func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented") } @@ -446,6 +487,12 @@ func (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApi func (UnimplementedHeadscaleServiceServer) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteApiKey not implemented") } +func (UnimplementedHeadscaleServiceServer) GetPolicy(context.Context, *GetPolicyRequest) (*GetPolicyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPolicy not implemented") +} +func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicyRequest) (*SetPolicyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented") +} func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {} // UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service. @@ -765,6 +812,24 @@ func _HeadscaleService_MoveNode_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackfillNodeIPsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeadscaleService_BackfillNodeIPs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, req.(*BackfillNodeIPsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetRoutesRequest) if err := dec(in); err != nil { @@ -927,6 +992,42 @@ func _HeadscaleService_DeleteApiKey_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _HeadscaleService_GetPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeadscaleServiceServer).GetPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeadscaleService_GetPolicy_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeadscaleServiceServer).GetPolicy(ctx, req.(*GetPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HeadscaleService_SetPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeadscaleServiceServer).SetPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeadscaleService_SetPolicy_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeadscaleServiceServer).SetPolicy(ctx, req.(*SetPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + // HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1002,6 +1103,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{ MethodName: "MoveNode", Handler: _HeadscaleService_MoveNode_Handler, }, + { + MethodName: "BackfillNodeIPs", + Handler: _HeadscaleService_BackfillNodeIPs_Handler, + }, { MethodName: "GetRoutes", Handler: _HeadscaleService_GetRoutes_Handler, @@ -1038,6 +1143,14 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{ MethodName: "DeleteApiKey", Handler: _HeadscaleService_DeleteApiKey_Handler, }, + { + MethodName: "GetPolicy", + Handler: _HeadscaleService_GetPolicy_Handler, + }, + { + MethodName: "SetPolicy", + Handler: _HeadscaleService_SetPolicy_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "headscale/v1/headscale.proto", diff --git a/gen/go/headscale/v1/node.pb.go b/gen/go/headscale/v1/node.pb.go index ee031566..61ed4064 100644 --- a/gen/go/headscale/v1/node.pb.go +++ b/gen/go/headscale/v1/node.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/node.proto @@ -1141,6 +1141,100 @@ func (x *DebugCreateNodeResponse) GetNode() *Node { return nil } +type BackfillNodeIPsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Confirmed bool `protobuf:"varint,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"` +} + +func (x *BackfillNodeIPsRequest) Reset() { + *x = BackfillNodeIPsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_node_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackfillNodeIPsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillNodeIPsRequest) ProtoMessage() {} + +func (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_node_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillNodeIPsRequest.ProtoReflect.Descriptor instead. +func (*BackfillNodeIPsRequest) Descriptor() ([]byte, []int) { + return file_headscale_v1_node_proto_rawDescGZIP(), []int{19} +} + +func (x *BackfillNodeIPsRequest) GetConfirmed() bool { + if x != nil { + return x.Confirmed + } + return false +} + +type BackfillNodeIPsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Changes []string `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` +} + +func (x *BackfillNodeIPsResponse) Reset() { + *x = BackfillNodeIPsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_node_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackfillNodeIPsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillNodeIPsResponse) ProtoMessage() {} + +func (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_node_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillNodeIPsResponse.ProtoReflect.Descriptor instead. +func (*BackfillNodeIPsResponse) Descriptor() ([]byte, []int) { + return file_headscale_v1_node_proto_rawDescGZIP(), []int{20} +} + +func (x *BackfillNodeIPsResponse) GetChanges() []string { + if x != nil { + return x.Changes + } + return nil +} + var File_headscale_v1_node_proto protoreflect.FileDescriptor var file_headscale_v1_node_proto_rawDesc = []byte{ @@ -1260,18 +1354,25 @@ var file_headscale_v1_node_proto_rawDesc = []byte{ 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, - 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, - 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, - 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, - 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, - 0x49, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, - 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, - 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, - 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, - 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x36, 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b, + 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, + 0x22, 0x33, 0x0a, 0x17, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, + 0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x47, 0x49, + 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x47, + 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x41, 0x55, 0x54, + 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x47, 0x49, 0x53, + 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x10, 0x02, + 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, + 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, + 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1287,8 +1388,8 @@ func file_headscale_v1_node_proto_rawDescGZIP() []byte { } var file_headscale_v1_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_headscale_v1_node_proto_goTypes = []interface{}{ +var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_headscale_v1_node_proto_goTypes = []any{ (RegisterMethod)(0), // 0: headscale.v1.RegisterMethod (*Node)(nil), // 1: headscale.v1.Node (*RegisterNodeRequest)(nil), // 2: headscale.v1.RegisterNodeRequest @@ -1309,16 +1410,18 @@ var file_headscale_v1_node_proto_goTypes = []interface{}{ (*MoveNodeResponse)(nil), // 17: headscale.v1.MoveNodeResponse (*DebugCreateNodeRequest)(nil), // 18: headscale.v1.DebugCreateNodeRequest (*DebugCreateNodeResponse)(nil), // 19: headscale.v1.DebugCreateNodeResponse - (*User)(nil), // 20: headscale.v1.User - (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp - (*PreAuthKey)(nil), // 22: headscale.v1.PreAuthKey + (*BackfillNodeIPsRequest)(nil), // 20: headscale.v1.BackfillNodeIPsRequest + (*BackfillNodeIPsResponse)(nil), // 21: headscale.v1.BackfillNodeIPsResponse + (*User)(nil), // 22: headscale.v1.User + (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp + (*PreAuthKey)(nil), // 24: headscale.v1.PreAuthKey } var file_headscale_v1_node_proto_depIdxs = []int32{ - 20, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User - 21, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp - 21, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp - 22, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey - 21, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp + 22, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User + 23, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp + 23, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp + 24, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey + 23, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp 0, // 5: headscale.v1.Node.register_method:type_name -> headscale.v1.RegisterMethod 1, // 6: headscale.v1.RegisterNodeResponse.node:type_name -> headscale.v1.Node 1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node @@ -1343,7 +1446,7 @@ func file_headscale_v1_node_proto_init() { file_headscale_v1_preauthkey_proto_init() file_headscale_v1_user_proto_init() if !protoimpl.UnsafeEnabled { - file_headscale_v1_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Node); i { case 0: return &v.state @@ -1355,7 +1458,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RegisterNodeRequest); i { case 0: return &v.state @@ -1367,7 +1470,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*RegisterNodeResponse); i { case 0: return &v.state @@ -1379,7 +1482,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetNodeRequest); i { case 0: return &v.state @@ -1391,7 +1494,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetNodeResponse); i { case 0: return &v.state @@ -1403,7 +1506,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SetTagsRequest); i { case 0: return &v.state @@ -1415,7 +1518,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SetTagsResponse); i { case 0: return &v.state @@ -1427,7 +1530,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteNodeRequest); i { case 0: return &v.state @@ -1439,7 +1542,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteNodeResponse); i { case 0: return &v.state @@ -1451,7 +1554,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ExpireNodeRequest); i { case 0: return &v.state @@ -1463,7 +1566,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ExpireNodeResponse); i { case 0: return &v.state @@ -1475,7 +1578,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*RenameNodeRequest); i { case 0: return &v.state @@ -1487,7 +1590,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*RenameNodeResponse); i { case 0: return &v.state @@ -1499,7 +1602,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*ListNodesRequest); i { case 0: return &v.state @@ -1511,7 +1614,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*ListNodesResponse); i { case 0: return &v.state @@ -1523,7 +1626,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*MoveNodeRequest); i { case 0: return &v.state @@ -1535,7 +1638,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*MoveNodeResponse); i { case 0: return &v.state @@ -1547,7 +1650,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*DebugCreateNodeRequest); i { case 0: return &v.state @@ -1559,7 +1662,7 @@ func file_headscale_v1_node_proto_init() { return nil } } - file_headscale_v1_node_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_node_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*DebugCreateNodeResponse); i { case 0: return &v.state @@ -1571,6 +1674,30 @@ func file_headscale_v1_node_proto_init() { return nil } } + file_headscale_v1_node_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*BackfillNodeIPsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_headscale_v1_node_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*BackfillNodeIPsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1578,7 +1705,7 @@ func file_headscale_v1_node_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_headscale_v1_node_proto_rawDesc, NumEnums: 1, - NumMessages: 19, + NumMessages: 21, NumExtensions: 0, NumServices: 0, }, diff --git a/gen/go/headscale/v1/policy.pb.go b/gen/go/headscale/v1/policy.pb.go new file mode 100644 index 00000000..62a079be --- /dev/null +++ b/gen/go/headscale/v1/policy.pb.go @@ -0,0 +1,352 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: headscale/v1/policy.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SetPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"` +} + +func (x *SetPolicyRequest) Reset() { + *x = SetPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetPolicyRequest) ProtoMessage() {} + +func (x *SetPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetPolicyRequest) Descriptor() ([]byte, []int) { + return file_headscale_v1_policy_proto_rawDescGZIP(), []int{0} +} + +func (x *SetPolicyRequest) GetPolicy() string { + if x != nil { + return x.Policy + } + return "" +} + +type SetPolicyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` +} + +func (x *SetPolicyResponse) Reset() { + *x = SetPolicyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetPolicyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetPolicyResponse) ProtoMessage() {} + +func (x *SetPolicyResponse) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetPolicyResponse.ProtoReflect.Descriptor instead. +func (*SetPolicyResponse) Descriptor() ([]byte, []int) { + return file_headscale_v1_policy_proto_rawDescGZIP(), []int{1} +} + +func (x *SetPolicyResponse) GetPolicy() string { + if x != nil { + return x.Policy + } + return "" +} + +func (x *SetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +type GetPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetPolicyRequest) Reset() { + *x = GetPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPolicyRequest) ProtoMessage() {} + +func (x *GetPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetPolicyRequest) Descriptor() ([]byte, []int) { + return file_headscale_v1_policy_proto_rawDescGZIP(), []int{2} +} + +type GetPolicyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` +} + +func (x *GetPolicyResponse) Reset() { + *x = GetPolicyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPolicyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPolicyResponse) ProtoMessage() {} + +func (x *GetPolicyResponse) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPolicyResponse.ProtoReflect.Descriptor instead. +func (*GetPolicyResponse) Descriptor() ([]byte, []int) { + return file_headscale_v1_policy_proto_rawDescGZIP(), []int{3} +} + +func (x *GetPolicyResponse) GetPolicy() string { + if x != nil { + return x.Policy + } + return "" +} + +func (x *GetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +var File_headscale_v1_policy_proto protoreflect.FileDescriptor + +var file_headscale_v1_policy_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2a, 0x0a, 0x10, 0x53, 0x65, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x66, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12, + 0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x66, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, + 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_headscale_v1_policy_proto_rawDescOnce sync.Once + file_headscale_v1_policy_proto_rawDescData = file_headscale_v1_policy_proto_rawDesc +) + +func file_headscale_v1_policy_proto_rawDescGZIP() []byte { + file_headscale_v1_policy_proto_rawDescOnce.Do(func() { + file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_policy_proto_rawDescData) + }) + return file_headscale_v1_policy_proto_rawDescData +} + +var file_headscale_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_headscale_v1_policy_proto_goTypes = []any{ + (*SetPolicyRequest)(nil), // 0: headscale.v1.SetPolicyRequest + (*SetPolicyResponse)(nil), // 1: headscale.v1.SetPolicyResponse + (*GetPolicyRequest)(nil), // 2: headscale.v1.GetPolicyRequest + (*GetPolicyResponse)(nil), // 3: headscale.v1.GetPolicyResponse + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp +} +var file_headscale_v1_policy_proto_depIdxs = []int32{ + 4, // 0: headscale.v1.SetPolicyResponse.updated_at:type_name -> google.protobuf.Timestamp + 4, // 1: headscale.v1.GetPolicyResponse.updated_at:type_name -> google.protobuf.Timestamp + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_headscale_v1_policy_proto_init() } +func file_headscale_v1_policy_proto_init() { + if File_headscale_v1_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_headscale_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SetPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_headscale_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*SetPolicyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_headscale_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_headscale_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetPolicyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_headscale_v1_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_headscale_v1_policy_proto_goTypes, + DependencyIndexes: file_headscale_v1_policy_proto_depIdxs, + MessageInfos: file_headscale_v1_policy_proto_msgTypes, + }.Build() + File_headscale_v1_policy_proto = out.File + file_headscale_v1_policy_proto_rawDesc = nil + file_headscale_v1_policy_proto_goTypes = nil + file_headscale_v1_policy_proto_depIdxs = nil +} diff --git a/gen/go/headscale/v1/preauthkey.pb.go b/gen/go/headscale/v1/preauthkey.pb.go index 35a0dfe0..ede617f2 100644 --- a/gen/go/headscale/v1/preauthkey.pb.go +++ b/gen/go/headscale/v1/preauthkey.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/preauthkey.proto @@ -522,7 +522,7 @@ func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte { } var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_headscale_v1_preauthkey_proto_goTypes = []interface{}{ +var file_headscale_v1_preauthkey_proto_goTypes = []any{ (*PreAuthKey)(nil), // 0: headscale.v1.PreAuthKey (*CreatePreAuthKeyRequest)(nil), // 1: headscale.v1.CreatePreAuthKeyRequest (*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse @@ -551,7 +551,7 @@ func file_headscale_v1_preauthkey_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_headscale_v1_preauthkey_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*PreAuthKey); i { case 0: return &v.state @@ -563,7 +563,7 @@ func file_headscale_v1_preauthkey_proto_init() { return nil } } - file_headscale_v1_preauthkey_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*CreatePreAuthKeyRequest); i { case 0: return &v.state @@ -575,7 +575,7 @@ func file_headscale_v1_preauthkey_proto_init() { return nil } } - file_headscale_v1_preauthkey_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*CreatePreAuthKeyResponse); i { case 0: return &v.state @@ -587,7 +587,7 @@ func file_headscale_v1_preauthkey_proto_init() { return nil } } - file_headscale_v1_preauthkey_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExpirePreAuthKeyRequest); i { case 0: return &v.state @@ -599,7 +599,7 @@ func file_headscale_v1_preauthkey_proto_init() { return nil } } - file_headscale_v1_preauthkey_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ExpirePreAuthKeyResponse); i { case 0: return &v.state @@ -611,7 +611,7 @@ func file_headscale_v1_preauthkey_proto_init() { return nil } } - file_headscale_v1_preauthkey_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ListPreAuthKeysRequest); i { case 0: return &v.state @@ -623,7 +623,7 @@ func file_headscale_v1_preauthkey_proto_init() { return nil } } - file_headscale_v1_preauthkey_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_preauthkey_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ListPreAuthKeysResponse); i { case 0: return &v.state diff --git a/gen/go/headscale/v1/routes.pb.go b/gen/go/headscale/v1/routes.pb.go index d2273047..76806db8 100644 --- a/gen/go/headscale/v1/routes.pb.go +++ b/gen/go/headscale/v1/routes.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/routes.proto @@ -643,7 +643,7 @@ func file_headscale_v1_routes_proto_rawDescGZIP() []byte { } var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_headscale_v1_routes_proto_goTypes = []interface{}{ +var file_headscale_v1_routes_proto_goTypes = []any{ (*Route)(nil), // 0: headscale.v1.Route (*GetRoutesRequest)(nil), // 1: headscale.v1.GetRoutesRequest (*GetRoutesResponse)(nil), // 2: headscale.v1.GetRoutesResponse @@ -679,7 +679,7 @@ func file_headscale_v1_routes_proto_init() { } file_headscale_v1_node_proto_init() if !protoimpl.UnsafeEnabled { - file_headscale_v1_routes_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Route); i { case 0: return &v.state @@ -691,7 +691,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetRoutesRequest); i { case 0: return &v.state @@ -703,7 +703,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetRoutesResponse); i { case 0: return &v.state @@ -715,7 +715,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*EnableRouteRequest); i { case 0: return &v.state @@ -727,7 +727,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*EnableRouteResponse); i { case 0: return &v.state @@ -739,7 +739,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*DisableRouteRequest); i { case 0: return &v.state @@ -751,7 +751,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*DisableRouteResponse); i { case 0: return &v.state @@ -763,7 +763,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*GetNodeRoutesRequest); i { case 0: return &v.state @@ -775,7 +775,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*GetNodeRoutesResponse); i { case 0: return &v.state @@ -787,7 +787,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*DeleteRouteRequest); i { case 0: return &v.state @@ -799,7 +799,7 @@ func file_headscale_v1_routes_proto_init() { return nil } } - file_headscale_v1_routes_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_routes_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*DeleteRouteResponse); i { case 0: return &v.state diff --git a/gen/go/headscale/v1/user.pb.go b/gen/go/headscale/v1/user.pb.go index 17cb4b54..ff1a5689 100644 --- a/gen/go/headscale/v1/user.pb.go +++ b/gen/go/headscale/v1/user.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.34.2 // protoc (unknown) // source: headscale/v1/user.proto @@ -607,7 +607,7 @@ func file_headscale_v1_user_proto_rawDescGZIP() []byte { } var file_headscale_v1_user_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_headscale_v1_user_proto_goTypes = []interface{}{ +var file_headscale_v1_user_proto_goTypes = []any{ (*User)(nil), // 0: headscale.v1.User (*GetUserRequest)(nil), // 1: headscale.v1.GetUserRequest (*GetUserResponse)(nil), // 2: headscale.v1.GetUserResponse @@ -640,7 +640,7 @@ func file_headscale_v1_user_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_headscale_v1_user_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*User); i { case 0: return &v.state @@ -652,7 +652,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GetUserRequest); i { case 0: return &v.state @@ -664,7 +664,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*GetUserResponse); i { case 0: return &v.state @@ -676,7 +676,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*CreateUserRequest); i { case 0: return &v.state @@ -688,7 +688,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*CreateUserResponse); i { case 0: return &v.state @@ -700,7 +700,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*RenameUserRequest); i { case 0: return &v.state @@ -712,7 +712,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*RenameUserResponse); i { case 0: return &v.state @@ -724,7 +724,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*DeleteUserRequest); i { case 0: return &v.state @@ -736,7 +736,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteUserResponse); i { case 0: return &v.state @@ -748,7 +748,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ListUsersRequest); i { case 0: return &v.state @@ -760,7 +760,7 @@ func file_headscale_v1_user_proto_init() { return nil } } - file_headscale_v1_user_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_headscale_v1_user_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ListUsersResponse); i { case 0: return &v.state diff --git a/gen/openapiv2/headscale/v1/headscale.swagger.json b/gen/openapiv2/headscale/v1/headscale.swagger.json index 7fe0b696..9530ea4d 100644 --- a/gen/openapiv2/headscale/v1/headscale.swagger.json +++ b/gen/openapiv2/headscale/v1/headscale.swagger.json @@ -194,6 +194,36 @@ ] } }, + "/api/v1/node/backfillips": { + "post": { + "operationId": "HeadscaleService_BackfillNodeIPs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1BackfillNodeIPsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "confirmed", + "in": "query", + "required": false, + "type": "boolean" + } + ], + "tags": [ + "HeadscaleService" + ] + } + }, "/api/v1/node/register": { "post": { "operationId": "HeadscaleService_RegisterNode", @@ -465,6 +495,59 @@ ] } }, + "/api/v1/policy": { + "get": { + "summary": "--- Policy start ---", + "operationId": "HeadscaleService_GetPolicy", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1GetPolicyResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "tags": [ + "HeadscaleService" + ] + }, + "put": { + "operationId": "HeadscaleService_SetPolicy", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1SetPolicyResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1SetPolicyRequest" + } + } + ], + "tags": [ + "HeadscaleService" + ] + } + }, "/api/v1/preauthkey": { "get": { "operationId": "HeadscaleService_ListPreAuthKeys", @@ -886,6 +969,17 @@ } } }, + "v1BackfillNodeIPsResponse": { + "type": "object", + "properties": { + "changes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "v1CreateApiKeyRequest": { "type": "object", "properties": { @@ -1050,6 +1144,18 @@ } } }, + "v1GetPolicyResponse": { + "type": "object", + "properties": { + "policy": { + "type": "string" + }, + "updatedAt": { + "type": "string", + "format": "date-time" + } + } + }, "v1GetRoutesResponse": { "type": "object", "properties": { @@ -1305,6 +1411,26 @@ } } }, + "v1SetPolicyRequest": { + "type": "object", + "properties": { + "policy": { + "type": "string" + } + } + }, + "v1SetPolicyResponse": { + "type": "object", + "properties": { + "policy": { + "type": "string" + }, + "updatedAt": { + "type": "string", + "format": "date-time" + } + } + }, "v1SetTagsResponse": { "type": "object", "properties": { diff --git a/gen/openapiv2/headscale/v1/policy.swagger.json b/gen/openapiv2/headscale/v1/policy.swagger.json new file mode 100644 index 00000000..63057ed0 --- /dev/null +++ b/gen/openapiv2/headscale/v1/policy.swagger.json @@ -0,0 +1,44 @@ +{ + "swagger": "2.0", + "info": { + "title": "headscale/v1/policy.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/go.mod b/go.mod index be7be536..18089bbd 100644 --- a/go.mod +++ b/go.mod @@ -1,64 +1,63 @@ module github.com/juanfont/headscale -go 1.22 - -toolchain go1.22.0 +go 1.23.0 require ( github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/coreos/go-oidc/v3 v3.9.0 + github.com/coreos/go-oidc/v3 v3.11.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/deckarep/golang-set/v2 v2.6.0 - github.com/efekarakus/termcolor v1.0.1 - github.com/glebarez/sqlite v1.10.0 - github.com/go-gormigrate/gormigrate/v2 v2.1.1 - github.com/gofrs/uuid/v5 v5.0.0 + github.com/glebarez/sqlite v1.11.0 + github.com/go-gormigrate/gormigrate/v2 v2.1.2 + github.com/gofrs/uuid/v5 v5.3.0 github.com/google/go-cmp v0.6.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 - github.com/klauspost/compress v1.17.6 - github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282 - github.com/ory/dockertest/v3 v3.10.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 + github.com/jagottsicher/termcolor v1.0.2 + github.com/klauspost/compress v1.17.9 + github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 + github.com/ory/dockertest/v3 v3.11.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/philip-bui/grpc-zerolog v1.0.1 github.com/pkg/profile v1.7.0 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/common v0.46.0 - github.com/pterm/pterm v0.12.78 - github.com/puzpuzpuz/xsync/v3 v3.0.2 - github.com/rs/zerolog v1.32.0 - github.com/samber/lo v1.39.0 - github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/prometheus/client_golang v1.20.2 + github.com/prometheus/common v0.58.0 + github.com/pterm/pterm v0.12.79 + github.com/puzpuzpuz/xsync/v3 v3.4.0 + github.com/rs/zerolog v1.33.0 + github.com/samber/lo v1.47.0 + github.com/sasha-s/go-deadlock v0.3.5 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.20.0-alpha.6 + github.com/stretchr/testify v1.9.0 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b + github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1 github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.19.0 - golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 - golang.org/x/net v0.21.0 - golang.org/x/oauth2 v0.17.0 - golang.org/x/sync v0.6.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 - google.golang.org/grpc v1.61.0 - google.golang.org/protobuf v1.32.0 + golang.org/x/crypto v0.26.0 + golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 + golang.org/x/net v0.28.0 + golang.org/x/oauth2 v0.22.0 + golang.org/x/sync v0.8.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.66.0 + google.golang.org/protobuf v1.34.2 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/postgres v1.5.4 - gorm.io/gorm v1.25.5 - tailscale.com v1.58.2 + gorm.io/driver/postgres v1.5.9 + gorm.io/gorm v1.25.11 + tailscale.com v1.72.1 ) require ( atomicgo.dev/cursor v0.2.0 // indirect atomicgo.dev/keyboard v0.2.9 // indirect atomicgo.dev/schedule v0.1.0 // indirect - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/akutz/memconn v0.1.0 // indirect github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect @@ -77,49 +76,54 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/coder/websocket v1.8.12 // indirect github.com/containerd/console v1.0.4 // indirect github.com/containerd/continuity v0.4.3 // indirect - github.com/coreos/go-iptables v0.7.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect + github.com/creachadair/mds v0.14.5 // indirect github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect - github.com/docker/cli v25.0.3+incompatible // indirect - github.com/docker/docker v25.0.3+incompatible // indirect + github.com/docker/cli v27.2.0+incompatible // indirect + github.com/docker/docker v27.2.0+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/felixge/fgprof v0.9.3 // indirect + github.com/felixge/fgprof v0.9.5 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/fxamacker/cbor/v2 v2.6.0 // indirect + github.com/gaissmai/bart v0.11.1 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/go-github v17.0.0+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect + github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect + github.com/google/pprof v0.0.0-20240829160300-da1f7e9f2b25 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/csrf v1.7.2 // indirect github.com/gorilla/securecookie v1.1.2 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/hdevalence/ed25519consensus v0.2.0 // indirect github.com/illarion/gonotify v1.0.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect - github.com/jackc/pgx/v5 v5.5.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.6.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect @@ -130,12 +134,10 @@ require ( github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/lib/pq v1.10.7 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect - github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/sdnotify v1.0.0 // indirect @@ -143,39 +145,42 @@ require ( github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/miekg/dns v1.1.58 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc6 // indirect - github.com/opencontainers/runc v1.1.12 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.1.14 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus-community/pro-bing v0.4.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/safchain/ethtool v0.3.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect - github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780 // indirect + github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 // indirect github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect - github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0 // indirect - github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7 // indirect - github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 // indirect + github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 // indirect + github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 // indirect + github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 // indirect + github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 // indirect + github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 // indirect github.com/tcnksm/go-httpstat v0.2.0 // indirect github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect @@ -187,25 +192,19 @@ require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.uber.org/multierr v1.11.0 // indirect go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c // indirect - inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect - modernc.org/libc v1.41.0 // indirect + gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 // indirect + modernc.org/libc v1.60.1 // indirect modernc.org/mathutil v1.6.0 // indirect - modernc.org/memory v1.7.2 // indirect - modernc.org/sqlite v1.28.0 // indirect - nhooyr.io/websocket v1.8.10 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/sqlite v1.32.0 // indirect ) diff --git a/go.sum b/go.sum index d654db11..2213f423 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtE atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs= atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= @@ -29,8 +29,8 @@ github.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/ github.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE= github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4= github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -83,34 +83,45 @@ github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6 github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= +github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= -github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= -github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= +github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/mds v0.14.5 h1:2amuO4yCbQkaAyDoLO5iCbwbTRQZz4EpRhOejQbf4+8= +github.com/creachadair/mds v0.14.5/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -121,56 +132,70 @@ github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80N github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= -github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/docker/cli v27.2.0+incompatible h1:yHD1QEB1/0vr5eBNpu8tncu8gWxg8EydFPOSKHzXSMM= +github.com/docker/cli v27.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= +github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/efekarakus/termcolor v1.0.1 h1:YAKFO3bnLrqZGTWyNLcYoSIAQFKVOmbqmDnwsU/znzg= -github.com/efekarakus/termcolor v1.0.1/go.mod h1:AitrZNrE4nPO538fRsqf+p0WgLdAsGN5pUNrHEPsEMM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= +github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo= github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= -github.com/glebarez/sqlite v1.10.0 h1:u4gt8y7OND/cCei/NMHmfbLxF6xP2wgKcT/BJf2pYkc= -github.com/glebarez/sqlite v1.10.0/go.mod h1:IJ+lfSOmiekhQsFTJRx/lHtGYmCdtAiTaf5wI9u5uHA= -github.com/go-gormigrate/gormigrate/v2 v2.1.1 h1:eGS0WTFRV30r103lU8JNXY27KbviRnqqIDobW3EV3iY= -github.com/go-gormigrate/gormigrate/v2 v2.1.1/go.mod h1:L7nJ620PFDKei9QOhJzqA8kRCk+E3UbV2f5gv+1ndLc= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= +github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY= +github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= +github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= -github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk= +github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -178,17 +203,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= @@ -197,11 +218,12 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c h1:06RMfw+TMMHtRuUOroMeatRCCgSMWXCJQeABvHU69YQ= -github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c/go.mod h1:BVIYo3cdnT4qSylnYqcd5YtmXhr51cJPGtnLBe/uLBU= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240829160300-da1f7e9f2b25 h1:sEDPKUw6iPjczdu33njxFjO6tYa9bfc0z/QyB/zSsBw= +github.com/google/pprof v0.0.0-20240829160300-da1f7e9f2b25/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -218,17 +240,18 @@ github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kX github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -237,12 +260,16 @@ github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 h1:kD8PseueGeYii github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= -github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.3 h1:Ces6/M3wbDXYpM8JyyPD57ivTtJACFZJd885pdIaV2s= -github.com/jackc/pgx/v5 v5.5.3/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM= +github.com/jagottsicher/termcolor v1.0.2/go.mod h1:RcH8uFwF/0wbEdQmi83rjmlJ+QOKdMSE9Rc1BEB7zFo= +github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= +github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= @@ -251,6 +278,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= @@ -260,8 +288,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= @@ -280,24 +308,25 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= @@ -313,29 +342,34 @@ github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282 h1:TQMyrpijtkFyXpNI3rY5hsZQZw+paiH+BfAlsb81HBY= -github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282/go.mod h1:rW25Kyd08Wdn3UVn0YBsDTSvReu0jqpmJKzxITPSjks= +github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk= +github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= -github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= -github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2weR6w= +github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= -github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= +github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA= +github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA= github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ= github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -352,15 +386,17 @@ github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Q github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= +github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo= +github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI= github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg= github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE= @@ -368,10 +404,10 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE= github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8= github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s= -github.com/pterm/pterm v0.12.78 h1:QTWKaIAa4B32GKwqVXtu9m1DUMgWw3VRljMkMevX+b8= -github.com/pterm/pterm v0.12.78/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo= -github.com/puzpuzpuz/xsync/v3 v3.0.2 h1:3yESHrRFYr6xzkz61LLkvNiPFXxJEAABanTQpKbAaew= -github.com/puzpuzpuz/xsync/v3 v3.0.2/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/pterm/pterm v0.12.79 h1:lH3yrYMhdpeqX9y5Ep1u7DejyHy7NSQg9qrBjF9dFT4= +github.com/pterm/pterm v0.12.79/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo= +github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4= +github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -381,17 +417,17 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= -github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= -github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= @@ -402,20 +438,19 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.20.0-alpha.6 h1:f65Cr/+2qk4GfHC0xqT/isoupQppwN5+VLRztUGTDbY= +github.com/spf13/viper v1.20.0-alpha.6/go.mod h1:CGBZzv0c9fOUASm6rfus4wdeIjR/04NOLq1P4KRhX3k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -423,31 +458,38 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= -github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780 h1:U0J2CUrrTcc2wmr9tSLYEo+USfwNikRRsmxVLD4eZ7E= -github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0 h1:0bcWsoeSBbY3XWRS1F8yp/g343E5TQMakwy5cxJS+ZU= -github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0/go.mod h1:/8aqnX9aU8yubwQ2InR5mHi1OlfWQ8ei8Ea2eyLScOY= -github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b h1:FzqUT8XFn3OJTzTMteYMZlg3EUQMxoq7oJiaVj4SEBA= -github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b/go.mod h1:Nkao4BDbQqzxxg78ty4ejq+KgX/0Bxj00DxfxScuJoI= -github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7 h1:xAgOVncJuuxkFZ2oXXDKFTH4HDdFYSZRYdA6oMrCewg= -github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 h1:zwsem4CaamMdC3tFoTpzrsUSMDPV0K6rhnQdF7kXekQ= -github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 h1:6WsbDYsikRNmmbfZoRoyIEA9tfl0aspPAE0t7nBj2B4= +github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257/go.mod h1:hrq01/0LUDZf4mMkcZ7Ovmy33jvCi4RpESpb9kPxV6E= +github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 h1:zT+qB+2Ghulj50d5Wq6h6vQYqD2sPdhy4FF6+FHedVE= +github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185/go.mod h1:LoIjI6z/6efr9ebISQ5l2vjQmjc8QJrAYZdy3Ec3sVs= +github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1 h1:wmsnxEEuRlgK7Bhdkmm0JGrjjc0JoHZThLLo0WXXbLs= +github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1/go.mod h1:XN193fbz9RR/5stlWPMMIZR+TTa1BUkDJm5Azwzxwgw= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= +github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk= github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= @@ -456,8 +498,8 @@ github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2 github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM= github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok= github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI= -github.com/u-root/u-root v0.11.0 h1:6gCZLOeRyevw7gbTwMj3fKxnr9+yHFlgF3N7udUVNO8= -github.com/u-root/u-root v0.11.0/go.mod h1:DBkDtiZyONk9hzVEdB/PWI9B4TxDkElWlVTHseglrZY= +github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= +github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= @@ -491,20 +533,19 @@ go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wus go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 h1:/RIbNt/Zr7rVhIkQhooTxCxFcdWLGIKnZA4IXNFSrvo= -golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp/typeparams v0.0.0-20230905200255-921286631fa9 h1:j3D9DvWRpUfIyFfDPws7LoIZ2MAI1OJHdQXtTnYtN+k= -golang.org/x/exp/typeparams v0.0.0-20230905200255-921286631fa9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.12.0 h1:w13vZbU4o5rKOFFR8y7M+c4A5jXDC0uXTdHYRP8X2DQ= -golang.org/x/image v0.12.0/go.mod h1:Lu90jvHG7GfemOIcldsh9A2hS01ocl6oNO7ype5mEnk= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a h1:8qmSSA8Gz/1kTrCe0nqR0R3Gb/NDhykzWw2q2mWZydM= +golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= +golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -513,8 +554,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -524,14 +565,14 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -540,28 +581,25 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -573,26 +611,29 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -606,8 +647,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -618,37 +659,27 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -658,47 +689,47 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= -gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= -gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= -gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c h1:bYb98Ra11fJ8F2xFbZx0zg2VQ28lYqC1JxfaaF53xqY= -gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c/go.mod h1:AVgIgHMwK63XvmAzWG9vLQ41YnVHN0du0tEC46fI7yY= +gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= +gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= +gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg= +gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= -honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs= +honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -inet.af/peercred v0.0.0-20210906144145-0893ea02156a h1:qdkS8Q5/i10xU2ArJMKYhVa1DORzBfYS/qA2UK2jheg= -inet.af/peercred v0.0.0-20210906144145-0893ea02156a/go.mod h1:FjawnflS/udxX+SvpsMgZfdqx2aykOlkISeAsADi5IU= -inet.af/wf v0.0.0-20221017222439-36129f591884 h1:zg9snq3Cpy50lWuVqDYM7AIRVTtU50y5WXETMFohW/Q= -inet.af/wf v0.0.0-20221017222439-36129f591884/go.mod h1:bSAQ38BYbY68uwpasXOTZo22dKGy9SNvI6PZFeKomZE= -lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= -lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= -modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= -modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= -modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= -modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk= -modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY= +modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= +modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.21.0 h1:kKPI3dF7RIag8YcToh5ZwDcVMIv6VGa0ED5cvh0LMW4= +modernc.org/ccgo/v4 v4.21.0/go.mod h1:h6kt6H/A2+ew/3MW/p6KEoQmrq/i3pr0J/SiwiaF/g0= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.5.0 h1:bJ9ChznK1L1mUtAQtxi0wi5AtAs5jQuw4PrPHO5pb6M= +modernc.org/gc/v2 v2.5.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.60.1 h1:at373l8IFRTkJIkAU85BIuUoBM4T1b51ds0E1ovPG2s= +modernc.org/libc v1.60.1/go.mod h1:xJuobKuNxKH3RUatS7GjR+suWj+5c2K7bi4m/S5arOY= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= -modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= -modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= -modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.32.0 h1:6BM4uGza7bWypsw4fdLRsLxut6bHe4c58VeqjRgST8s= +modernc.org/sqlite v1.32.0/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -software.sslmate.com/src/go-pkcs12 v0.2.1 h1:tbT1jjaeFOF230tzOIRJ6U5S1jNqpsSyNjzDd58H3J8= -software.sslmate.com/src/go-pkcs12 v0.2.1/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= -tailscale.com v1.58.2 h1:5trkhh/fpUn7f6TUcGUQYJ0GokdNNfNrjh9ONJhoc5A= -tailscale.com v1.58.2/go.mod h1:faWR8XaXemnSKCDjHC7SAQzaagkUjA5x4jlLWiwxtuk= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +tailscale.com v1.72.1 h1:hk82jek36ph2S3Tfsh57NVWKEm/pZ9nfUonvlowpfaA= +tailscale.com v1.72.1/go.mod h1:v7OHtg0KLAnhOVf81Z8WrjNefj238QbFhgkWJQoKxbs= diff --git a/hscontrol/app.go b/hscontrol/app.go index a29e53dc..4a5b4679 100644 --- a/hscontrol/app.go +++ b/hscontrol/app.go @@ -8,8 +8,7 @@ import ( "io" "net" "net/http" - _ "net/http/pprof" //nolint - "net/netip" + _ "net/http/pprof" // nolint "os" "os/signal" "path/filepath" @@ -20,6 +19,7 @@ import ( "time" "github.com/coreos/go-oidc/v3/oidc" + "github.com/davecgh/go-spew/spew" "github.com/gorilla/mux" grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware" grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" @@ -28,6 +28,7 @@ import ( "github.com/juanfont/headscale/hscontrol/db" "github.com/juanfont/headscale/hscontrol/derp" derpServer "github.com/juanfont/headscale/hscontrol/derp/server" + "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/notifier" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" @@ -55,6 +56,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/types/key" + "tailscale.com/util/dnsname" ) var ( @@ -69,7 +71,7 @@ var ( const ( AuthPrefix = "Bearer " - updateInterval = 5000 + updateInterval = 5 * time.Second privateKeyFileMode = 0o600 headscaleDirPerm = 0o700 @@ -83,12 +85,14 @@ type Headscale struct { db *db.HSDatabase ipAlloc *db.IPAllocator noisePrivateKey *key.MachinePrivate + ephemeralGC *db.EphemeralGarbageCollector DERPMap *tailcfg.DERPMap DERPServer *derpServer.DERPServer ACLPolicy *policy.ACLPolicy + mapper *mapper.Mapper nodeNotifier *notifier.Notifier oidcProvider *oidc.Provider @@ -96,15 +100,16 @@ type Headscale struct { registrationCache *cache.Cache - shutdownChan chan struct{} pollNetMapStreamWG sync.WaitGroup } var ( - profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED") + profilingEnabled = envknob.Bool("HEADSCALE_DEBUG_PROFILING_ENABLED") + profilingPath = envknob.String("HEADSCALE_DEBUG_PROFILING_PATH") tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED") tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR") tailsqlTSKey = envknob.String("TS_AUTHKEY") + dumpConfig = envknob.Bool("HEADSCALE_DEBUG_DUMP_CONFIG") ) func NewHeadscale(cfg *types.Config) (*Headscale, error) { @@ -128,7 +133,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { noisePrivateKey: noisePrivateKey, registrationCache: registrationCache, pollNetMapStreamWG: sync.WaitGroup{}, - nodeNotifier: notifier.NewNotifier(), + nodeNotifier: notifier.NewNotifier(cfg), } app.db, err = db.NewHeadscaleDatabase( @@ -138,11 +143,17 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { return nil, err } - app.ipAlloc, err = db.NewIPAllocator(app.db, *cfg.PrefixV4, *cfg.PrefixV6) + app.ipAlloc, err = db.NewIPAllocator(app.db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation) if err != nil { return nil, err } + app.ephemeralGC = db.NewEphemeralGarbageCollector(func(ni types.NodeID) { + if err := app.db.DeleteEphemeralNode(ni); err != nil { + log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to delete ephemeral node") + } + }) + if cfg.OIDC.Issuer != "" { err = app.initOIDC() if err != nil { @@ -156,7 +167,15 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { if app.cfg.DNSConfig != nil && app.cfg.DNSConfig.Proxied { // if MagicDNS // TODO(kradalby): revisit why this takes a list. - magicDNSDomains := util.GenerateMagicDNSRootDomains([]netip.Prefix{*cfg.PrefixV4, *cfg.PrefixV6}) + + var magicDNSDomains []dnsname.FQDN + if cfg.PrefixV4 != nil { + magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...) + } + if cfg.PrefixV6 != nil { + magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...) + } + // we might have routes already from Split DNS if app.cfg.DNSConfig.Routes == nil { app.cfg.DNSConfig.Routes = make(map[string][]*dnstype.Resolver) @@ -199,54 +218,36 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, target, http.StatusFound) } -// expireEphemeralNodes deletes ephemeral node records that have not been -// seen for longer than h.cfg.EphemeralNodeInactivityTimeout. -func (h *Headscale) expireEphemeralNodes(milliSeconds int64) { - ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond) - - var update types.StateUpdate - var changed bool - for range ticker.C { - if err := h.db.DB.Transaction(func(tx *gorm.DB) error { - update, changed = db.ExpireEphemeralNodes(tx, h.cfg.EphemeralNodeInactivityTimeout) - - return nil - }); err != nil { - log.Error().Err(err).Msg("database error while expiring ephemeral nodes") - continue - } - - if changed && update.Valid() { - ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na") - h.nodeNotifier.NotifyAll(ctx, update) - } - } -} - -// expireExpiredMachines expires nodes that have an explicit expiry set +// expireExpiredNodes expires nodes that have an explicit expiry set // after that expiry time has passed. -func (h *Headscale) expireExpiredMachines(intervalMs int64) { - interval := time.Duration(intervalMs) * time.Millisecond - ticker := time.NewTicker(interval) +func (h *Headscale) expireExpiredNodes(ctx context.Context, every time.Duration) { + ticker := time.NewTicker(every) lastCheck := time.Unix(0, 0) var update types.StateUpdate var changed bool - for range ticker.C { - if err := h.db.DB.Transaction(func(tx *gorm.DB) error { - lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck) + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + if err := h.db.Write(func(tx *gorm.DB) error { + lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck) - return nil - }); err != nil { - log.Error().Err(err).Msg("database error while expiring nodes") - continue - } + return nil + }); err != nil { + log.Error().Err(err).Msg("database error while expiring nodes") + continue + } - log.Trace().Str("nodes", update.ChangeNodes.String()).Msgf("expiring nodes") - if changed && update.Valid() { - ctx := types.NotifyCtx(context.Background(), "expire-expired", "na") - h.nodeNotifier.NotifyAll(ctx, update) + if changed { + log.Trace().Interface("nodes", update.ChangePatches).Msgf("expiring nodes") + + ctx := types.NotifyCtx(context.Background(), "expire-expired", "na") + h.nodeNotifier.NotifyAll(ctx, update) + } } } } @@ -272,14 +273,11 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) { h.DERPMap.Regions[region.RegionID] = ®ion } - stateUpdate := types.StateUpdate{ + ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ Type: types.StateDERPUpdated, DERPMap: h.DERPMap, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na") - h.nodeNotifier.NotifyAll(ctx, stateUpdate) - } + }) } } } @@ -292,7 +290,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, // Check if the request is coming from the on-server client. // This is not secure, but it is to maintain maintainability // with the "legacy" database-based client - // It is also neede for grpc-gateway to be able to connect to + // It is also needed for grpc-gateway to be able to connect to // the server client, _ := peer.FromContext(ctx) @@ -303,11 +301,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, meta, ok := metadata.FromIncomingContext(ctx) if !ok { - log.Error(). - Caller(). - Str("client_address", client.Addr.String()). - Msg("Retrieving metadata is failed") - return ctx, status.Errorf( codes.InvalidArgument, "Retrieving metadata is failed", @@ -316,11 +309,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, authHeader, ok := meta["authorization"] if !ok { - log.Error(). - Caller(). - Str("client_address", client.Addr.String()). - Msg("Authorization token is not supplied") - return ctx, status.Errorf( codes.Unauthenticated, "Authorization token is not supplied", @@ -330,11 +318,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, token := authHeader[0] if !strings.HasPrefix(token, AuthPrefix) { - log.Error(). - Caller(). - Str("client_address", client.Addr.String()). - Msg(`missing "Bearer " prefix in "Authorization" header`) - return ctx, status.Error( codes.Unauthenticated, `missing "Bearer " prefix in "Authorization" header`, @@ -343,12 +326,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix)) if err != nil { - log.Error(). - Caller(). - Err(err). - Str("client_address", client.Addr.String()). - Msg("failed to validate token") - return ctx, status.Error(codes.Internal, "failed to validate token") } @@ -446,7 +423,7 @@ func (h *Headscale) ensureUnixSocketIsAbsent() error { func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { router := mux.NewRouter() - router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux) + router.Use(prometheusMiddleware) router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost) @@ -460,8 +437,6 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig). Methods(http.MethodGet) router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet) - router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig). - Methods(http.MethodGet) // TODO(kristoffer): move swagger into a package router.HandleFunc("/swagger", headscale.SwaggerUI).Methods(http.MethodGet) @@ -483,16 +458,16 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { return router } -// Serve launches a GIN server with the Headscale API. +// Serve launches the HTTP and gRPC server service Headscale and the API. func (h *Headscale) Serve() error { - if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile { - if profilePath, ok := os.LookupEnv("HEADSCALE_PROFILING_PATH"); ok { - err := os.MkdirAll(profilePath, os.ModePerm) + if profilingEnabled { + if profilingPath != "" { + err := os.MkdirAll(profilingPath, os.ModePerm) if err != nil { log.Fatal().Err(err).Msg("failed to create profiling directory") } - defer profile.Start(profile.ProfilePath(profilePath)).Stop() + defer profile.Start(profile.ProfilePath(profilingPath)).Stop() } else { defer profile.Start().Stop() } @@ -500,8 +475,17 @@ func (h *Headscale) Serve() error { var err error + if err = h.loadACLPolicy(); err != nil { + return fmt.Errorf("failed to load ACL policy: %w", err) + } + + if dumpConfig { + spew.Dump(h.cfg) + } + // Fetch an initial DERP Map before we start serving h.DERPMap = derp.GetDERPMap(h.cfg.DERP) + h.mapper = mapper.NewMapper(h.db, h.cfg, h.DERPMap, h.nodeNotifier) if h.cfg.DERP.ServerEnabled { // When embedded DERP is enabled we always need a STUN server @@ -511,7 +495,7 @@ func (h *Headscale) Serve() error { region, err := h.DERPServer.GenerateRegion() if err != nil { - return err + return fmt.Errorf("generating DERP region for embedded server: %w", err) } if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion { @@ -531,10 +515,22 @@ func (h *Headscale) Serve() error { return errEmptyInitialDERPMap } - // TODO(kradalby): These should have cancel channels and be cleaned - // up on shutdown. - go h.expireEphemeralNodes(updateInterval) - go h.expireExpiredMachines(updateInterval) + // Start ephemeral node garbage collector and schedule all nodes + // that are already in the database and ephemeral. If they are still + // around between restarts, they will reconnect and the GC will + // be cancelled. + go h.ephemeralGC.Start() + ephmNodes, err := h.db.ListEphemeralNodes() + if err != nil { + return fmt.Errorf("failed to list ephemeral nodes: %w", err) + } + for _, node := range ephmNodes { + h.ephemeralGC.Schedule(node.ID, h.cfg.EphemeralNodeInactivityTimeout) + } + + expireNodeCtx, expireNodeCancel := context.WithCancel(context.Background()) + defer expireNodeCancel() + go h.expireExpiredNodes(expireNodeCtx, updateInterval) if zl.GlobalLevel() == zl.TraceLevel { zerolog.RespLog = true @@ -586,14 +582,14 @@ func (h *Headscale) Serve() error { }..., ) if err != nil { - return err + return fmt.Errorf("setting up gRPC gateway via socket: %w", err) } // Connect to the gRPC server over localhost to skip // the authentication. err = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn) if err != nil { - return err + return fmt.Errorf("registering Headscale API service to gRPC: %w", err) } // Start the local gRPC server without TLS and without authentication @@ -614,9 +610,7 @@ func (h *Headscale) Serve() error { tlsConfig, err := h.getTLSSettings() if err != nil { - log.Error().Err(err).Msg("Failed to set up TLS configuration") - - return err + return fmt.Errorf("configuring TLS settings: %w", err) } // @@ -675,18 +669,17 @@ func (h *Headscale) Serve() error { // HTTP setup // // This is the regular router that we expose - // over our main Addr. It also serves the legacy Tailcale API + // over our main Addr router := h.createRouter(grpcGatewayMux) httpServer := &http.Server{ Addr: h.cfg.Addr, Handler: router, - ReadTimeout: types.HTTPReadTimeout, - // Go does not handle timeouts in HTTP very well, and there is - // no good way to handle streaming timeouts, therefore we need to - // keep this at unlimited and be careful to clean up connections - // https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/#aboutstreaming - WriteTimeout: 0, + ReadTimeout: types.HTTPTimeout, + + // Long polling should not have any timeout, this is overridden + // further down the chain + WriteTimeout: types.HTTPTimeout, } var httpListener net.Listener @@ -705,27 +698,30 @@ func (h *Headscale) Serve() error { log.Info(). Msgf("listening and serving HTTP on: %s", h.cfg.Addr) - promMux := http.NewServeMux() - promMux.Handle("/metrics", promhttp.Handler()) + debugMux := http.NewServeMux() + debugMux.Handle("/debug/pprof/", http.DefaultServeMux) + debugMux.HandleFunc("/debug/notifier", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(h.nodeNotifier.String())) + }) + debugMux.Handle("/metrics", promhttp.Handler()) - promHTTPServer := &http.Server{ + debugHTTPServer := &http.Server{ Addr: h.cfg.MetricsAddr, - Handler: promMux, - ReadTimeout: types.HTTPReadTimeout, + Handler: debugMux, + ReadTimeout: types.HTTPTimeout, WriteTimeout: 0, } - var promHTTPListener net.Listener - promHTTPListener, err = net.Listen("tcp", h.cfg.MetricsAddr) - + debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr) if err != nil { return fmt.Errorf("failed to bind to TCP address: %w", err) } - errorGroup.Go(func() error { return promHTTPServer.Serve(promHTTPListener) }) + errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) }) log.Info(). - Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr) + Msgf("listening and serving debug and metrics on: %s", h.cfg.MetricsAddr) var tailsqlContext context.Context if tailsqlEnabled { @@ -742,7 +738,6 @@ func (h *Headscale) Serve() error { } // Handle common process-killing signals so we can gracefully shut down: - h.shutdownChan = make(chan struct{}) sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, @@ -761,17 +756,12 @@ func (h *Headscale) Serve() error { Msg("Received SIGHUP, reloading ACL and Config") // TODO(kradalby): Reload config on SIGHUP + if err := h.loadACLPolicy(); err != nil { + log.Error().Err(err).Msg("failed to reload ACL policy") + } - if h.cfg.ACL.PolicyPath != "" { - aclPath := util.AbsolutePathFromConfigPath(h.cfg.ACL.PolicyPath) - pol, err := policy.LoadACLPolicyFromPath(aclPath) - if err != nil { - log.Error().Err(err).Msg("Failed to reload ACL policy") - } - - h.ACLPolicy = pol + if h.ACLPolicy != nil { log.Info(). - Str("path", aclPath). Msg("ACL policy successfully reloaded, notifying nodes of change") ctx := types.NotifyCtx(context.Background(), "acl-sighup", "na") @@ -779,50 +769,64 @@ func (h *Headscale) Serve() error { Type: types.StateFullUpdate, }) } - default: + info := func(msg string) { log.Info().Msg(msg) } log.Info(). Str("signal", sig.String()). Msg("Received signal to stop, shutting down gracefully") - close(h.shutdownChan) - - h.pollNetMapStreamWG.Wait() + expireNodeCancel() + h.ephemeralGC.Close() // Gracefully shut down servers ctx, cancel := context.WithTimeout( context.Background(), types.HTTPShutdownTimeout, ) - if err := promHTTPServer.Shutdown(ctx); err != nil { - log.Error().Err(err).Msg("Failed to shutdown prometheus http") + info("shutting down debug http server") + if err := debugHTTPServer.Shutdown(ctx); err != nil { + log.Error().Err(err).Msg("failed to shutdown prometheus http") } + info("shutting down main http server") if err := httpServer.Shutdown(ctx); err != nil { - log.Error().Err(err).Msg("Failed to shutdown http") + log.Error().Err(err).Msg("failed to shutdown http") } + + info("closing node notifier") + h.nodeNotifier.Close() + + info("waiting for netmap stream to close") + h.pollNetMapStreamWG.Wait() + + info("shutting down grpc server (socket)") grpcSocket.GracefulStop() if grpcServer != nil { + info("shutting down grpc server (external)") grpcServer.GracefulStop() grpcListener.Close() } if tailsqlContext != nil { + info("shutting down tailsql") tailsqlContext.Done() } // Close network listeners - promHTTPListener.Close() + info("closing network listeners") + debugHTTPListener.Close() httpListener.Close() grpcGatewayConn.Close() // Stop listening (and unlink the socket if unix type): + info("closing socket listener") socketListener.Close() // Close db connections + info("closing database connection") err = h.db.Close() if err != nil { - log.Error().Err(err).Msg("Failed to close db") + log.Error().Err(err).Msg("failed to close db") } log.Info(). @@ -877,7 +881,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) { server := &http.Server{ Addr: h.cfg.TLS.LetsEncrypt.Listen, Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)), - ReadTimeout: types.HTTPReadTimeout, + ReadTimeout: types.HTTPTimeout, } go func() { @@ -974,3 +978,74 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) { return &machineKey, nil } + +func (h *Headscale) loadACLPolicy() error { + var ( + pol *policy.ACLPolicy + err error + ) + + switch h.cfg.Policy.Mode { + case types.PolicyModeFile: + path := h.cfg.Policy.Path + + // It is fine to start headscale without a policy file. + if len(path) == 0 { + return nil + } + + absPath := util.AbsolutePathFromConfigPath(path) + pol, err = policy.LoadACLPolicyFromPath(absPath) + if err != nil { + return fmt.Errorf("failed to load ACL policy from file: %w", err) + } + + // Validate and reject configuration that would error when applied + // when creating a map response. This requires nodes, so there is still + // a scenario where they might be allowed if the server has no nodes + // yet, but it should help for the general case and for hot reloading + // configurations. + // Note that this check is only done for file-based policies in this function + // as the database-based policies are checked in the gRPC API where it is not + // allowed to be written to the database. + nodes, err := h.db.ListNodes() + if err != nil { + return fmt.Errorf("loading nodes from database to validate policy: %w", err) + } + + _, err = pol.CompileFilterRules(nodes) + if err != nil { + return fmt.Errorf("verifying policy rules: %w", err) + } + + if len(nodes) > 0 { + _, err = pol.CompileSSHPolicy(nodes[0], nodes) + if err != nil { + return fmt.Errorf("verifying SSH rules: %w", err) + } + } + + case types.PolicyModeDB: + p, err := h.db.GetPolicy() + if err != nil { + if errors.Is(err, types.ErrPolicyNotFound) { + return nil + } + + return fmt.Errorf("failed to get policy from database: %w", err) + } + + pol, err = policy.LoadACLPolicyFromBytes([]byte(p.Data)) + if err != nil { + return fmt.Errorf("failed to parse policy: %w", err) + } + default: + log.Fatal(). + Str("mode", string(h.cfg.Policy.Mode)). + Msg("Unknown ACL policy mode") + } + + h.ACLPolicy = pol + + return nil +} diff --git a/hscontrol/auth.go b/hscontrol/auth.go index b199fa55..8b8557ba 100644 --- a/hscontrol/auth.go +++ b/hscontrol/auth.go @@ -16,6 +16,7 @@ import ( "gorm.io/gorm" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/types/ptr" ) func logAuthFunc( @@ -62,18 +63,18 @@ func logAuthFunc( func (h *Headscale) handleRegister( writer http.ResponseWriter, req *http.Request, - registerRequest tailcfg.RegisterRequest, + regReq tailcfg.RegisterRequest, machineKey key.MachinePublic, ) { - logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey) + logInfo, logTrace, _ := logAuthFunc(regReq, machineKey) now := time.Now().UTC() logTrace("handleRegister called, looking up machine in DB") - node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey) + node, err := h.db.GetNodeByAnyKey(machineKey, regReq.NodeKey, regReq.OldNodeKey) logTrace("handleRegister database lookup has returned") if errors.Is(err, gorm.ErrRecordNotFound) { // If the node has AuthKey set, handle registration via PreAuthKeys - if registerRequest.Auth.AuthKey != "" { - h.handleAuthKey(writer, registerRequest, machineKey) + if regReq.Auth != nil && regReq.Auth.AuthKey != "" { + h.handleAuthKey(writer, regReq, machineKey) return } @@ -86,7 +87,7 @@ func (h *Headscale) handleRegister( // This is not implemented yet, as it is no strictly required. The only side-effect // is that the client will hammer headscale with requests until it gets a // successful RegisterResponse. - if registerRequest.Followup != "" { + if regReq.Followup != "" { logTrace("register request is a followup") if _, ok := h.registrationCache.Get(machineKey.String()); ok { logTrace("Node is waiting for interactive login") @@ -95,7 +96,7 @@ func (h *Headscale) handleRegister( case <-req.Context().Done(): return case <-time.After(registrationHoldoff): - h.handleNewNode(writer, registerRequest, machineKey) + h.handleNewNode(writer, regReq, machineKey) return } @@ -104,32 +105,21 @@ func (h *Headscale) handleRegister( logInfo("Node not found in database, creating new") - givenName, err := h.db.GenerateGivenName( - machineKey, - registerRequest.Hostinfo.Hostname, - ) - if err != nil { - logErr(err, "Failed to generate given name for node") - - return - } - // The node did not have a key to authenticate, which means // that we rely on a method that calls back some how (OpenID or CLI) // We create the node and then keep it around until a callback // happens newNode := types.Node{ MachineKey: machineKey, - Hostname: registerRequest.Hostinfo.Hostname, - GivenName: givenName, - NodeKey: registerRequest.NodeKey, + Hostname: regReq.Hostinfo.Hostname, + NodeKey: regReq.NodeKey, LastSeen: &now, Expiry: &time.Time{}, } - if !registerRequest.Expiry.IsZero() { + if !regReq.Expiry.IsZero() { logTrace("Non-zero expiry time requested") - newNode.Expiry = ®isterRequest.Expiry + newNode.Expiry = ®Req.Expiry } h.registrationCache.Set( @@ -138,7 +128,7 @@ func (h *Headscale) handleRegister( registerCacheExpiration, ) - h.handleNewNode(writer, registerRequest, machineKey) + h.handleNewNode(writer, regReq, machineKey) return } @@ -169,11 +159,11 @@ func (h *Headscale) handleRegister( // - Trying to log out (sending a expiry in the past) // - A valid, registered node, looking for /map // - Expired node wanting to reauthenticate - if node.NodeKey.String() == registerRequest.NodeKey.String() { + if node.NodeKey.String() == regReq.NodeKey.String() { // The client sends an Expiry in the past if the client is requesting to expire the key (aka logout) // https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648 - if !registerRequest.Expiry.IsZero() && - registerRequest.Expiry.UTC().Before(now) { + if !regReq.Expiry.IsZero() && + regReq.Expiry.UTC().Before(now) { h.handleNodeLogOut(writer, *node, machineKey) return @@ -189,11 +179,11 @@ func (h *Headscale) handleRegister( } // The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration - if node.NodeKey.String() == registerRequest.OldNodeKey.String() && + if node.NodeKey.String() == regReq.OldNodeKey.String() && !node.IsExpired() { h.handleNodeKeyRefresh( writer, - registerRequest, + regReq, *node, machineKey, ) @@ -202,11 +192,11 @@ func (h *Headscale) handleRegister( } // When logged out and reauthenticating with OIDC, the OldNodeKey is not passed, but the NodeKey has changed - if node.NodeKey.String() != registerRequest.NodeKey.String() && - registerRequest.OldNodeKey.IsZero() && !node.IsExpired() { + if node.NodeKey.String() != regReq.NodeKey.String() && + regReq.OldNodeKey.IsZero() && !node.IsExpired() { h.handleNodeKeyRefresh( writer, - registerRequest, + regReq, *node, machineKey, ) @@ -214,7 +204,7 @@ func (h *Headscale) handleRegister( return } - if registerRequest.Followup != "" { + if regReq.Followup != "" { select { case <-req.Context().Done(): return @@ -223,7 +213,7 @@ func (h *Headscale) handleRegister( } // The node has expired or it is logged out - h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey) + h.handleNodeExpiredOrLoggedOut(writer, regReq, *node, machineKey) // TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use node.Expiry = &time.Time{} @@ -232,7 +222,7 @@ func (h *Headscale) handleRegister( // we need to make sure the NodeKey matches the one in the request // TODO(juan): What happens when using fast user switching between two // headscale-managed tailnets? - node.NodeKey = registerRequest.NodeKey + node.NodeKey = regReq.NodeKey h.registrationCache.Set( machineKey.String(), *node, @@ -273,8 +263,6 @@ func (h *Headscale) handleAuthKey( Err(err). Msg("Cannot encode message") http.Error(writer, "Internal server error", http.StatusInternalServerError) - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() return } @@ -294,13 +282,6 @@ func (h *Headscale) handleAuthKey( Str("node", registerRequest.Hostinfo.Hostname). Msg("Failed authentication via AuthKey") - if pak != nil { - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() - } else { - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", "unknown").Inc() - } - return } @@ -323,14 +304,20 @@ func (h *Headscale) handleAuthKey( Msg("node was already registered before, refreshing with new auth key") node.NodeKey = nodeKey - node.AuthKeyID = uint(pak.ID) - err := h.db.NodeSetExpiry(node.ID, registerRequest.Expiry) + if pak.ID != 0 { + node.AuthKeyID = ptr.To(pak.ID) + } + + node.Expiry = ®isterRequest.Expiry + node.User = pak.User + node.UserID = pak.UserID + err := h.db.DB.Save(node).Error if err != nil { log.Error(). Caller(). Str("node", node.Hostname). Err(err). - Msg("Failed to refresh node") + Msg("failed to save node after logging in with auth key") return } @@ -339,7 +326,6 @@ func (h *Headscale) handleAuthKey( if len(aclTags) > 0 { // This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login err = h.db.SetTags(node.ID, aclTags) - if err != nil { log.Error(). Caller(). @@ -352,31 +338,13 @@ func (h *Headscale) handleAuthKey( } } - mkey := node.MachineKey - update := types.StateUpdateExpire(node.ID, registerRequest.Expiry) - - if update.Valid() { - ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na") - h.nodeNotifier.NotifyWithIgnore(ctx, update, mkey.String()) - } + ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{Type: types.StatePeerChanged, ChangeNodes: []types.NodeID{node.ID}}) } else { now := time.Now().UTC() - givenName, err := h.db.GenerateGivenName(machineKey, registerRequest.Hostinfo.Hostname) - if err != nil { - log.Error(). - Caller(). - Str("func", "RegistrationHandler"). - Str("hostinfo.name", registerRequest.Hostinfo.Hostname). - Err(err). - Msg("Failed to generate given name for node") - - return - } - nodeToRegister := types.Node{ Hostname: registerRequest.Hostinfo.Hostname, - GivenName: givenName, UserID: pak.User.ID, User: pak.User, MachineKey: machineKey, @@ -384,11 +352,10 @@ func (h *Headscale) handleAuthKey( Expiry: ®isterRequest.Expiry, NodeKey: nodeKey, LastSeen: &now, - AuthKeyID: uint(pak.ID), ForcedTags: pak.Proto().GetAclTags(), } - addrs, err := h.ipAlloc.Next() + ipv4, ipv6, err := h.ipAlloc.Next() if err != nil { log.Error(). Caller(). @@ -400,24 +367,26 @@ func (h *Headscale) handleAuthKey( return } + pakID := uint(pak.ID) + if pakID != 0 { + nodeToRegister.AuthKeyID = ptr.To(pak.ID) + } node, err = h.db.RegisterNode( nodeToRegister, - addrs, + ipv4, ipv6, ) if err != nil { log.Error(). Caller(). Err(err). Msg("could not register node") - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } } - err = h.db.DB.Transaction(func(tx *gorm.DB) error { + h.db.Write(func(tx *gorm.DB) error { return db.UsePreAuthKey(tx, pak) }) if err != nil { @@ -425,8 +394,6 @@ func (h *Headscale) handleAuthKey( Caller(). Err(err). Msg("Failed to use pre-auth key") - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return @@ -445,14 +412,10 @@ func (h *Headscale) handleAuthKey( Str("node", registerRequest.Hostinfo.Hostname). Err(err). Msg("Cannot encode message") - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "success", pak.User.Name). - Inc() writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) _, err = writer.Write(respBody) @@ -466,7 +429,6 @@ func (h *Headscale) handleAuthKey( log.Info(). Str("node", registerRequest.Hostinfo.Hostname). - Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")). Msg("Successfully authenticated via AuthKey") } @@ -538,11 +500,8 @@ func (h *Headscale) handleNodeLogOut( return } - stateUpdate := types.StateUpdateExpire(node.ID, now) - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na") - h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na") + h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID) resp.AuthURL = "" resp.MachineAuthorized = false @@ -572,7 +531,7 @@ func (h *Headscale) handleNodeLogOut( } if node.IsEphemeral() { - err = h.db.DeleteNode(&node, h.nodeNotifier.ConnectedMap()) + changedNodes, err := h.db.DeleteNode(&node, h.nodeNotifier.LikelyConnectedMap()) if err != nil { log.Error(). Err(err). @@ -580,13 +539,16 @@ func (h *Headscale) handleNodeLogOut( Msg("Cannot delete ephemeral node from the database") } - stateUpdate := types.StateUpdate{ + ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ Type: types.StatePeerRemoved, - Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)}, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na") - h.nodeNotifier.NotifyAll(ctx, stateUpdate) + Removed: []types.NodeID{node.ID}, + }) + if changedNodes != nil { + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changedNodes, + }) } return @@ -622,14 +584,10 @@ func (h *Headscale) handleNodeWithValidRegistration( Caller(). Err(err). Msg("Cannot encode message") - nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } - nodeRegistrations.WithLabelValues("update", "web", "success", node.User.Name). - Inc() writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) @@ -660,7 +618,7 @@ func (h *Headscale) handleNodeKeyRefresh( Str("node", node.Hostname). Msg("We have the OldNodeKey in the database. This is a key refresh") - err := h.db.DB.Transaction(func(tx *gorm.DB) error { + err := h.db.Write(func(tx *gorm.DB) error { return db.NodeSetNodeKey(tx, &node, registerRequest.NodeKey) }) if err != nil { @@ -706,14 +664,14 @@ func (h *Headscale) handleNodeKeyRefresh( func (h *Headscale) handleNodeExpiredOrLoggedOut( writer http.ResponseWriter, - registerRequest tailcfg.RegisterRequest, + regReq tailcfg.RegisterRequest, node types.Node, machineKey key.MachinePublic, ) { resp := tailcfg.RegisterResponse{} - if registerRequest.Auth.AuthKey != "" { - h.handleAuthKey(writer, registerRequest, machineKey) + if regReq.Auth != nil && regReq.Auth.AuthKey != "" { + h.handleAuthKey(writer, regReq, machineKey) return } @@ -723,8 +681,8 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut( Caller(). Str("node", node.Hostname). Str("machine_key", machineKey.ShortString()). - Str("node_key", registerRequest.NodeKey.ShortString()). - Str("node_key_old", registerRequest.OldNodeKey.ShortString()). + Str("node_key", regReq.NodeKey.ShortString()). + Str("node_key_old", regReq.OldNodeKey.ShortString()). Msg("Node registration has expired or logged out. Sending a auth url to register") if h.oauth2Config != nil { @@ -743,14 +701,10 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut( Caller(). Err(err). Msg("Cannot encode message") - nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } - nodeRegistrations.WithLabelValues("reauth", "web", "success", node.User.Name). - Inc() writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) @@ -765,8 +719,8 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut( log.Trace(). Caller(). Str("machine_key", machineKey.ShortString()). - Str("node_key", registerRequest.NodeKey.ShortString()). - Str("node_key_old", registerRequest.OldNodeKey.ShortString()). + Str("node_key", regReq.NodeKey.ShortString()). + Str("node_key_old", regReq.OldNodeKey.ShortString()). Str("node", node.Hostname). Msg("Node logged out. Sent AuthURL for reauthentication") } diff --git a/hscontrol/auth_noise.go b/hscontrol/auth_noise.go index 323a49b0..6659dfa5 100644 --- a/hscontrol/auth_noise.go +++ b/hscontrol/auth_noise.go @@ -33,7 +33,6 @@ func (ns *noiseServer) NoiseRegistrationHandler( Caller(). Err(err). Msg("Cannot parse RegisterRequest") - nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc() http.Error(writer, "Internal error", http.StatusInternalServerError) return diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index 870ad599..accf439e 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "net/netip" "path/filepath" "strconv" "strings" @@ -12,13 +13,12 @@ import ( "github.com/glebarez/sqlite" "github.com/go-gormigrate/gormigrate/v2" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" "gorm.io/driver/postgres" "gorm.io/gorm" "gorm.io/gorm/logger" - - "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" ) var errDatabaseNotSupported = errors.New("database type not supported") @@ -51,8 +51,8 @@ func NewHeadscaleDatabase( dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{ - // New migrations should be added as transactions at the end of this list. - // The initial commit here is quite messy, completely out of order and + // New migrations must be added as transactions at the end of this list. + // The initial migration here is quite messy, completely out of order and // has no versioning and is the tech debt of not having versioned migrations // prior to this point. This first migration is all DB changes to bring a DB // up to 0.23.0. @@ -90,6 +90,7 @@ func NewHeadscaleDatabase( _ = tx.Migrator(). RenameColumn(&types.Node{}, "nickname", "given_name") + dbConn.Model(&types.Node{}).Where("auth_key_id = ?", 0).Update("auth_key_id", nil) // If the Node table has a column for registered, // find all occourences of "false" and drop them. Then // remove the column. @@ -122,6 +123,13 @@ func NewHeadscaleDatabase( } } + // Remove any invalid routes associated with a node that does not exist. + if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) { + err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error + if err != nil { + return err + } + } err = tx.AutoMigrate(&types.Route{}) if err != nil { return err @@ -317,23 +325,98 @@ func NewHeadscaleDatabase( // no longer used. ID: "202402151347", Migrate: func(tx *gorm.DB) error { - err := tx.Migrator().DropColumn(&types.Node{}, "last_successful_update") - if err != nil && strings.Contains(err.Error(), `of relation "nodes" does not exist`) { - return nil - } else { - return err - } - - return err + _ = tx.Migrator().DropColumn(&types.Node{}, "last_successful_update") + return nil }, Rollback: func(tx *gorm.DB) error { return nil }, }, + { + // Replace column with IP address list with dedicated + // IP v4 and v6 column. + // Note that previously, the list _could_ contain more + // than two addresses, which should not really happen. + // In that case, the first occurrence of each type will + // be kept. + ID: "2024041121742", + Migrate: func(tx *gorm.DB) error { + _ = tx.Migrator().AddColumn(&types.Node{}, "ipv4") + _ = tx.Migrator().AddColumn(&types.Node{}, "ipv6") + + type node struct { + ID uint64 `gorm:"column:id"` + Addresses string `gorm:"column:ip_addresses"` + } + + var nodes []node + + _ = tx.Raw("SELECT id, ip_addresses FROM nodes").Scan(&nodes).Error + + for _, node := range nodes { + addrs := strings.Split(node.Addresses, ",") + + if len(addrs) == 0 { + return fmt.Errorf("no addresses found for node(%d)", node.ID) + } + + var v4 *netip.Addr + var v6 *netip.Addr + + for _, addrStr := range addrs { + addr, err := netip.ParseAddr(addrStr) + if err != nil { + return fmt.Errorf("parsing IP for node(%d) from database: %w", node.ID, err) + } + + if addr.Is4() && v4 == nil { + v4 = &addr + } + + if addr.Is6() && v6 == nil { + v6 = &addr + } + } + + if v4 != nil { + err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv4", v4.String()).Error + if err != nil { + return fmt.Errorf("saving ip addresses to new columns: %w", err) + } + } + + if v6 != nil { + err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv6", v6.String()).Error + if err != nil { + return fmt.Errorf("saving ip addresses to new columns: %w", err) + } + } + } + + _ = tx.Migrator().DropColumn(&types.Node{}, "ip_addresses") + + return nil + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, + { + ID: "202406021630", + Migrate: func(tx *gorm.DB) error { + err := tx.AutoMigrate(&types.Policy{}) + if err != nil { + return err + } + + return nil + }, + Rollback: func(db *gorm.DB) error { return nil }, + }, }, ) - if err = migrations.Migrate(); err != nil { + if err := runMigrations(cfg, dbConn, migrations); err != nil { log.Fatal().Err(err).Msgf("Migration failed: %v", err) } @@ -350,7 +433,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { // TODO(kradalby): Integrate this with zerolog var dbLogger logger.Interface if cfg.Debug { - dbLogger = logger.Default + dbLogger = util.NewDBLogWrapper(&log.Logger, cfg.Gorm.SlowThreshold, cfg.Gorm.SkipErrRecordNotFound, cfg.Gorm.ParameterizedQueries) } else { dbLogger = logger.Default.LogMode(logger.Silent) } @@ -369,14 +452,30 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { Msg("Opening database") db, err := gorm.Open( - sqlite.Open(cfg.Sqlite.Path+"?_synchronous=1&_journal_mode=WAL"), + sqlite.Open(cfg.Sqlite.Path), &gorm.Config{ - DisableForeignKeyConstraintWhenMigrating: true, - Logger: dbLogger, + PrepareStmt: cfg.Gorm.PrepareStmt, + Logger: dbLogger, }, ) - db.Exec("PRAGMA foreign_keys=ON") + if err := db.Exec(` + PRAGMA foreign_keys=ON; + PRAGMA busy_timeout=10000; + PRAGMA auto_vacuum=INCREMENTAL; + PRAGMA synchronous=NORMAL; + `).Error; err != nil { + return nil, fmt.Errorf("enabling foreign keys: %w", err) + } + + if cfg.Sqlite.WriteAheadLog { + if err := db.Exec(` + PRAGMA journal_mode=WAL; + PRAGMA wal_autocheckpoint=0; + `).Error; err != nil { + return nil, fmt.Errorf("setting WAL mode: %w", err) + } + } // The pure Go SQLite library does not handle locking in // the same way as the C based one and we cant use the gorm @@ -418,8 +517,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { } db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{ - DisableForeignKeyConstraintWhenMigrating: true, - Logger: dbLogger, + Logger: dbLogger, }) if err != nil { return nil, err @@ -442,6 +540,70 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { ) } +func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormigrate.Gormigrate) error { + // Turn off foreign keys for the duration of the migration if using sqllite to + // prevent data loss due to the way the GORM migrator handles certain schema + // changes. + if cfg.Type == types.DatabaseSqlite { + var fkEnabled int + if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkEnabled).Error; err != nil { + return fmt.Errorf("checking foreign key status: %w", err) + } + if fkEnabled == 1 { + if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil { + return fmt.Errorf("disabling foreign keys: %w", err) + } + defer dbConn.Exec("PRAGMA foreign_keys = ON") + } + } + + if err := migrations.Migrate(); err != nil { + return err + } + + // Since we disabled foreign keys for the migration, we need to check for + // constraint violations manually at the end of the migration. + if cfg.Type == types.DatabaseSqlite { + type constraintViolation struct { + Table string + RowID int + Parent string + ConstraintIndex int + } + + var violatedConstraints []constraintViolation + + rows, err := dbConn.Raw("PRAGMA foreign_key_check").Rows() + if err != nil { + return err + } + + for rows.Next() { + var violation constraintViolation + if err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex); err != nil { + return err + } + + violatedConstraints = append(violatedConstraints, violation) + } + _ = rows.Close() + + if len(violatedConstraints) > 0 { + for _, violation := range violatedConstraints { + log.Error(). + Str("table", violation.Table). + Int("row_id", violation.RowID). + Str("parent", violation.Parent). + Msg("Foreign key constraint violated") + } + + return fmt.Errorf("foreign key constraints violated") + } + } + + return nil +} + func (hsdb *HSDatabase) PingDB(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() diff --git a/hscontrol/db/db_test.go b/hscontrol/db/db_test.go new file mode 100644 index 00000000..b32d93ce --- /dev/null +++ b/hscontrol/db/db_test.go @@ -0,0 +1,168 @@ +package db + +import ( + "fmt" + "io" + "net/netip" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/stretchr/testify/assert" + "gorm.io/gorm" +) + +func TestMigrations(t *testing.T) { + ipp := func(p string) types.IPPrefix { + return types.IPPrefix(netip.MustParsePrefix(p)) + } + r := func(id uint64, p string, a, e, i bool) types.Route { + return types.Route{ + NodeID: id, + Prefix: ipp(p), + Advertised: a, + Enabled: e, + IsPrimary: i, + } + } + tests := []struct { + dbPath string + wantFunc func(*testing.T, *HSDatabase) + wantErr string + }{ + { + dbPath: "testdata/0-22-3-to-0-23-0-routes-are-dropped-2063.sqlite", + wantFunc: func(t *testing.T, h *HSDatabase) { + routes, err := Read(h.DB, func(rx *gorm.DB) (types.Routes, error) { + return GetRoutes(rx) + }) + assert.NoError(t, err) + + assert.Len(t, routes, 10) + want := types.Routes{ + r(1, "0.0.0.0/0", true, true, false), + r(1, "::/0", true, true, false), + r(1, "10.9.110.0/24", true, true, true), + r(26, "172.100.100.0/24", true, true, true), + r(26, "172.100.100.0/24", true, false, false), + r(31, "0.0.0.0/0", true, true, false), + r(31, "0.0.0.0/0", true, false, false), + r(31, "::/0", true, true, false), + r(31, "::/0", true, false, false), + r(32, "192.168.0.24/32", true, true, true), + } + if diff := cmp.Diff(want, routes, cmpopts.IgnoreFields(types.Route{}, "Model", "Node"), cmp.Comparer(func(x, y types.IPPrefix) bool { + return x == y + })); diff != "" { + t.Errorf("TestMigrations() mismatch (-want +got):\n%s", diff) + } + }, + }, + { + dbPath: "testdata/0-22-3-to-0-23-0-routes-fail-foreign-key-2076.sqlite", + wantFunc: func(t *testing.T, h *HSDatabase) { + routes, err := Read(h.DB, func(rx *gorm.DB) (types.Routes, error) { + return GetRoutes(rx) + }) + assert.NoError(t, err) + + assert.Len(t, routes, 4) + want := types.Routes{ + // These routes exists, but have no nodes associated with them + // when the migration starts. + // r(1, "0.0.0.0/0", true, true, false), + // r(1, "::/0", true, true, false), + // r(3, "0.0.0.0/0", true, true, false), + // r(3, "::/0", true, true, false), + // r(5, "0.0.0.0/0", true, true, false), + // r(5, "::/0", true, true, false), + // r(6, "0.0.0.0/0", true, true, false), + // r(6, "::/0", true, true, false), + // r(6, "10.0.0.0/8", true, false, false), + // r(7, "0.0.0.0/0", true, true, false), + // r(7, "::/0", true, true, false), + // r(7, "10.0.0.0/8", true, false, false), + // r(9, "0.0.0.0/0", true, true, false), + // r(9, "::/0", true, true, false), + // r(9, "10.0.0.0/8", true, true, false), + // r(11, "0.0.0.0/0", true, true, false), + // r(11, "::/0", true, true, false), + // r(11, "10.0.0.0/8", true, true, true), + // r(12, "0.0.0.0/0", true, true, false), + // r(12, "::/0", true, true, false), + // r(12, "10.0.0.0/8", true, false, false), + // + // These nodes exists, so routes should be kept. + r(13, "10.0.0.0/8", true, false, false), + r(13, "0.0.0.0/0", true, true, false), + r(13, "::/0", true, true, false), + r(13, "10.18.80.2/32", true, true, true), + } + if diff := cmp.Diff(want, routes, cmpopts.IgnoreFields(types.Route{}, "Model", "Node"), cmp.Comparer(func(x, y types.IPPrefix) bool { + return x == y + })); diff != "" { + t.Errorf("TestMigrations() mismatch (-want +got):\n%s", diff) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.dbPath, func(t *testing.T) { + dbPath, err := testCopyOfDatabase(tt.dbPath) + if err != nil { + t.Fatalf("copying db for test: %s", err) + } + + hsdb, err := NewHeadscaleDatabase(types.DatabaseConfig{ + Type: "sqlite3", + Sqlite: types.SqliteConfig{ + Path: dbPath, + }, + }, "") + if err != nil && tt.wantErr != err.Error() { + t.Errorf("TestMigrations() unexpected error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantFunc != nil { + tt.wantFunc(t, hsdb) + } + }) + } +} + +func testCopyOfDatabase(src string) (string, error) { + sourceFileStat, err := os.Stat(src) + if err != nil { + return "", err + } + + if !sourceFileStat.Mode().IsRegular() { + return "", fmt.Errorf("%s is not a regular file", src) + } + + source, err := os.Open(src) + if err != nil { + return "", err + } + defer source.Close() + + tmpDir, err := os.MkdirTemp("", "hsdb-test-*") + if err != nil { + return "", err + } + + fn := filepath.Base(src) + dst := filepath.Join(tmpDir, fn) + + destination, err := os.Create(dst) + if err != nil { + return "", err + } + defer destination.Close() + _, err = io.Copy(destination, source) + return dst, err +} diff --git a/hscontrol/db/ip.go b/hscontrol/db/ip.go index dc49f8af..d0e030d6 100644 --- a/hscontrol/db/ip.go +++ b/hscontrol/db/ip.go @@ -1,13 +1,17 @@ package db import ( + "crypto/rand" + "database/sql" "errors" "fmt" + "math/big" "net/netip" "sync" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" + "github.com/rs/zerolog/log" "go4.org/netipx" "gorm.io/gorm" ) @@ -20,13 +24,16 @@ import ( type IPAllocator struct { mu sync.Mutex - prefix4 netip.Prefix - prefix6 netip.Prefix + prefix4 *netip.Prefix + prefix6 *netip.Prefix // Previous IPs handed out prev4 netip.Addr prev6 netip.Addr + // strategy used for handing out IP addresses. + strategy types.IPAllocationStrategy + // Set of all IPs handed out. // This might not be in sync with the database, // but it is more conservative. If saves to the @@ -40,40 +47,70 @@ type IPAllocator struct { // provided IPv4 and IPv6 prefix. It needs to be created // when headscale starts and needs to finish its read // transaction before any writes to the database occur. -func NewIPAllocator(db *HSDatabase, prefix4, prefix6 netip.Prefix) (*IPAllocator, error) { - var addressesSlices []string +func NewIPAllocator( + db *HSDatabase, + prefix4, prefix6 *netip.Prefix, + strategy types.IPAllocationStrategy, +) (*IPAllocator, error) { + ret := IPAllocator{ + prefix4: prefix4, + prefix6: prefix6, + + strategy: strategy, + } + + var v4s []sql.NullString + var v6s []sql.NullString if db != nil { - db.Read(func(rx *gorm.DB) error { - return rx.Model(&types.Node{}).Pluck("ip_addresses", &addressesSlices).Error + err := db.Read(func(rx *gorm.DB) error { + return rx.Model(&types.Node{}).Pluck("ipv4", &v4s).Error }) + if err != nil { + return nil, fmt.Errorf("reading IPv4 addresses from database: %w", err) + } + + err = db.Read(func(rx *gorm.DB) error { + return rx.Model(&types.Node{}).Pluck("ipv6", &v6s).Error + }) + if err != nil { + return nil, fmt.Errorf("reading IPv6 addresses from database: %w", err) + } } var ips netipx.IPSetBuilder // Add network and broadcast addrs to used pool so they // are not handed out to nodes. - network4, broadcast4 := util.GetIPPrefixEndpoints(prefix4) - network6, broadcast6 := util.GetIPPrefixEndpoints(prefix6) - ips.Add(network4) - ips.Add(broadcast4) - ips.Add(network6) - ips.Add(broadcast6) + if prefix4 != nil { + network4, broadcast4 := util.GetIPPrefixEndpoints(*prefix4) + ips.Add(network4) + ips.Add(broadcast4) + + // Use network as starting point, it will be used to call .Next() + // TODO(kradalby): Could potentially take all the IPs loaded from + // the database into account to start at a more "educated" location. + ret.prev4 = network4 + } + + if prefix6 != nil { + network6, broadcast6 := util.GetIPPrefixEndpoints(*prefix6) + ips.Add(network6) + ips.Add(broadcast6) + + ret.prev6 = network6 + } // Fetch all the IP Addresses currently handed out from the Database // and add them to the used IP set. - for _, slice := range addressesSlices { - var machineAddresses types.NodeAddresses - err := machineAddresses.Scan(slice) - if err != nil { - return nil, fmt.Errorf( - "parsing IPs from database %v: %w", machineAddresses, - err, - ) - } + for _, addrStr := range append(v4s, v6s...) { + if addrStr.Valid { + addr, err := netip.ParseAddr(addrStr.String) + if err != nil { + return nil, fmt.Errorf("parsing IP address from database: %w", err) + } - for _, ip := range machineAddresses { - ips.Add(ip) + ips.Add(addr) } } @@ -86,42 +123,61 @@ func NewIPAllocator(db *HSDatabase, prefix4, prefix6 netip.Prefix) (*IPAllocator ) } - return &IPAllocator{ - usedIPs: ips, + ret.usedIPs = ips - prefix4: prefix4, - prefix6: prefix6, - - // Use network as starting point, it will be used to call .Next() - // TODO(kradalby): Could potentially take all the IPs loaded from - // the database into account to start at a more "educated" location. - prev4: network4, - prev6: network6, - }, nil + return &ret, nil } -func (i *IPAllocator) Next() (types.NodeAddresses, error) { +func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) { i.mu.Lock() defer i.mu.Unlock() - v4, err := i.next(i.prev4, i.prefix4) - if err != nil { - return nil, fmt.Errorf("allocating IPv4 address: %w", err) + var err error + var ret4 *netip.Addr + var ret6 *netip.Addr + + if i.prefix4 != nil { + ret4, err = i.next(i.prev4, i.prefix4) + if err != nil { + return nil, nil, fmt.Errorf("allocating IPv4 address: %w", err) + } + i.prev4 = *ret4 } - v6, err := i.next(i.prev6, i.prefix6) - if err != nil { - return nil, fmt.Errorf("allocating IPv6 address: %w", err) + if i.prefix6 != nil { + ret6, err = i.next(i.prev6, i.prefix6) + if err != nil { + return nil, nil, fmt.Errorf("allocating IPv6 address: %w", err) + } + i.prev6 = *ret6 } - return types.NodeAddresses{*v4, *v6}, nil + return ret4, ret6, nil } var ErrCouldNotAllocateIP = errors.New("failed to allocate IP") -func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, error) { - // Get the first IP in our prefix - ip := prev.Next() +func (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) { + i.mu.Lock() + defer i.mu.Unlock() + + return i.next(prev, prefix) +} + +func (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) { + var err error + var ip netip.Addr + + switch i.strategy { + case types.IPAllocationStrategySequential: + // Get the first IP in our prefix + ip = prev.Next() + case types.IPAllocationStrategyRandom: + ip, err = randomNext(*prefix) + if err != nil { + return nil, fmt.Errorf("getting random IP: %w", err) + } + } // TODO(kradalby): maybe this can be done less often. set, err := i.usedIPs.IPSet() @@ -136,7 +192,15 @@ func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, e // Check if the IP has already been allocated. if set.Contains(ip) { - ip = ip.Next() + switch i.strategy { + case types.IPAllocationStrategySequential: + ip = ip.Next() + case types.IPAllocationStrategyRandom: + ip, err = randomNext(*prefix) + if err != nil { + return nil, fmt.Errorf("getting random IP: %w", err) + } + } continue } @@ -146,3 +210,120 @@ func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, e return &ip, nil } } + +func randomNext(pfx netip.Prefix) (netip.Addr, error) { + rang := netipx.RangeOfPrefix(pfx) + fromIP, toIP := rang.From(), rang.To() + + var from, to big.Int + + from.SetBytes(fromIP.AsSlice()) + to.SetBytes(toIP.AsSlice()) + + // Find the max, this is how we can do "random range", + // get the "max" as 0 -> to - from and then add back from + // after. + tempMax := big.NewInt(0).Sub(&to, &from) + + out, err := rand.Int(rand.Reader, tempMax) + if err != nil { + return netip.Addr{}, fmt.Errorf("generating random IP: %w", err) + } + + valInRange := big.NewInt(0).Add(&from, out) + + ip, ok := netip.AddrFromSlice(valInRange.Bytes()) + if !ok { + return netip.Addr{}, fmt.Errorf("generated ip bytes are invalid ip") + } + + if !pfx.Contains(ip) { + return netip.Addr{}, fmt.Errorf( + "generated ip(%s) not in prefix(%s)", + ip.String(), + pfx.String(), + ) + } + + return ip, nil +} + +// BackfillNodeIPs will take a database transaction, and +// iterate through all of the current nodes in headscale +// and ensure it has IP addresses according to the current +// configuration. +// This means that if both IPv4 and IPv6 is set in the +// config, and some nodes are missing that type of IP, +// it will be added. +// If a prefix type has been removed (IPv4 or IPv6), it +// will remove the IPs in that family from the node. +func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) { + var err error + var ret []string + err = db.Write(func(tx *gorm.DB) error { + if i == nil { + return errors.New("backfilling IPs: ip allocator was nil") + } + + log.Trace().Msgf("starting to backfill IPs") + + nodes, err := ListNodes(tx) + if err != nil { + return fmt.Errorf("listing nodes to backfill IPs: %w", err) + } + + for _, node := range nodes { + log.Trace().Uint64("node.id", node.ID.Uint64()).Msg("checking if need backfill") + + changed := false + // IPv4 prefix is set, but node ip is missing, alloc + if i.prefix4 != nil && node.IPv4 == nil { + ret4, err := i.nextLocked(i.prev4, i.prefix4) + if err != nil { + return fmt.Errorf("failed to allocate ipv4 for node(%d): %w", node.ID, err) + } + + node.IPv4 = ret4 + changed = true + ret = append(ret, fmt.Sprintf("assigned IPv4 %q to Node(%d) %q", ret4.String(), node.ID, node.Hostname)) + } + + // IPv6 prefix is set, but node ip is missing, alloc + if i.prefix6 != nil && node.IPv6 == nil { + ret6, err := i.nextLocked(i.prev6, i.prefix6) + if err != nil { + return fmt.Errorf("failed to allocate ipv6 for node(%d): %w", node.ID, err) + } + + node.IPv6 = ret6 + changed = true + ret = append(ret, fmt.Sprintf("assigned IPv6 %q to Node(%d) %q", ret6.String(), node.ID, node.Hostname)) + } + + // IPv4 prefix is not set, but node has IP, remove + if i.prefix4 == nil && node.IPv4 != nil { + ret = append(ret, fmt.Sprintf("removing IPv4 %q from Node(%d) %q", node.IPv4.String(), node.ID, node.Hostname)) + node.IPv4 = nil + changed = true + } + + // IPv6 prefix is not set, but node has IP, remove + if i.prefix6 == nil && node.IPv6 != nil { + ret = append(ret, fmt.Sprintf("removing IPv6 %q from Node(%d) %q", node.IPv6.String(), node.ID, node.Hostname)) + node.IPv6 = nil + changed = true + } + + if changed { + err := tx.Save(node).Error + if err != nil { + return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err) + } + } + } + + return nil + }) + + return ret, err +} diff --git a/hscontrol/db/ip_test.go b/hscontrol/db/ip_test.go index 17f39c81..ce9c134c 100644 --- a/hscontrol/db/ip_test.go +++ b/hscontrol/db/ip_test.go @@ -1,49 +1,43 @@ package db import ( + "database/sql" + "fmt" "net/netip" - "os" + "strings" "testing" "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" ) -func TestIPAllocator(t *testing.T) { - mpp := func(pref string) netip.Prefix { - return netip.MustParsePrefix(pref) - } - na := func(pref string) netip.Addr { - return netip.MustParseAddr(pref) - } - newDb := func() *HSDatabase { - tmpDir, err := os.MkdirTemp("", "headscale-db-test-*") - if err != nil { - t.Fatalf("creating temp dir: %s", err) - } - db, _ = NewHeadscaleDatabase( - types.DatabaseConfig{ - Type: "sqlite3", - Sqlite: types.SqliteConfig{ - Path: tmpDir + "/headscale_test.db", - }, - }, - "", - ) +var mpp = func(pref string) *netip.Prefix { + p := netip.MustParsePrefix(pref) + return &p +} - return db - } +var na = func(pref string) netip.Addr { + return netip.MustParseAddr(pref) +} +var nap = func(pref string) *netip.Addr { + n := na(pref) + return &n +} + +func TestIPAllocatorSequential(t *testing.T) { tests := []struct { name string dbFunc func() *HSDatabase - prefix4 netip.Prefix - prefix6 netip.Prefix + prefix4 *netip.Prefix + prefix6 *netip.Prefix getCount int - want []types.NodeAddresses + want4 []netip.Addr + want6 []netip.Addr }{ { name: "simple", @@ -56,23 +50,52 @@ func TestIPAllocator(t *testing.T) { getCount: 1, - want: []types.NodeAddresses{ - { - na("100.64.0.1"), - na("fd7a:115c:a1e0::1"), - }, + want4: []netip.Addr{ + na("100.64.0.1"), + }, + want6: []netip.Addr{ + na("fd7a:115c:a1e0::1"), + }, + }, + { + name: "simple-v4", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + + getCount: 1, + + want4: []netip.Addr{ + na("100.64.0.1"), + }, + }, + { + name: "simple-v6", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1, + + want6: []netip.Addr{ + na("fd7a:115c:a1e0::1"), }, }, { name: "simple-with-db", dbFunc: func() *HSDatabase { - db := newDb() + db := dbForTest(t, "simple-with-db") + user := types.User{Name: ""} + db.DB.Save(&user) db.DB.Save(&types.Node{ - IPAddresses: types.NodeAddresses{ - na("100.64.0.1"), - na("fd7a:115c:a1e0::1"), - }, + User: user, + IPv4: nap("100.64.0.1"), + IPv6: nap("fd7a:115c:a1e0::1"), }) return db @@ -83,23 +106,24 @@ func TestIPAllocator(t *testing.T) { getCount: 1, - want: []types.NodeAddresses{ - { - na("100.64.0.2"), - na("fd7a:115c:a1e0::2"), - }, + want4: []netip.Addr{ + na("100.64.0.2"), + }, + want6: []netip.Addr{ + na("fd7a:115c:a1e0::2"), }, }, { name: "before-after-free-middle-in-db", dbFunc: func() *HSDatabase { - db := newDb() + db := dbForTest(t, "before-after-free-middle-in-db") + user := types.User{Name: ""} + db.DB.Save(&user) db.DB.Save(&types.Node{ - IPAddresses: types.NodeAddresses{ - na("100.64.0.2"), - na("fd7a:115c:a1e0::2"), - }, + User: user, + IPv4: nap("100.64.0.2"), + IPv6: nap("fd7a:115c:a1e0::2"), }) return db @@ -110,15 +134,13 @@ func TestIPAllocator(t *testing.T) { getCount: 2, - want: []types.NodeAddresses{ - { - na("100.64.0.1"), - na("fd7a:115c:a1e0::1"), - }, - { - na("100.64.0.3"), - na("fd7a:115c:a1e0::3"), - }, + want4: []netip.Addr{ + na("100.64.0.1"), + na("100.64.0.3"), + }, + want6: []netip.Addr{ + na("fd7a:115c:a1e0::1"), + na("fd7a:115c:a1e0::3"), }, }, } @@ -127,24 +149,367 @@ func TestIPAllocator(t *testing.T) { t.Run(tt.name, func(t *testing.T) { db := tt.dbFunc() - alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6) + alloc, _ := NewIPAllocator( + db, + tt.prefix4, + tt.prefix6, + types.IPAllocationStrategySequential, + ) spew.Dump(alloc) - t.Logf("prefixes: %q, %q", tt.prefix4.String(), tt.prefix6.String()) - - var got []types.NodeAddresses + var got4s []netip.Addr + var got6s []netip.Addr for range tt.getCount { - gotSet, err := alloc.Next() + got4, got6, err := alloc.Next() if err != nil { t.Fatalf("allocating next IP: %s", err) } - got = append(got, gotSet) + if got4 != nil { + got4s = append(got4s, *got4) + } + + if got6 != nil { + got6s = append(got6s, *got6) + } } - if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { - t.Errorf("IPAllocator unexpected result (-want +got):\n%s", diff) + if diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != "" { + t.Errorf("IPAllocator 4s unexpected result (-want +got):\n%s", diff) + } + + if diff := cmp.Diff(tt.want6, got6s, util.Comparers...); diff != "" { + t.Errorf("IPAllocator 6s unexpected result (-want +got):\n%s", diff) + } + }) + } +} + +func TestIPAllocatorRandom(t *testing.T) { + tests := []struct { + name string + dbFunc func() *HSDatabase + + getCount int + + prefix4 *netip.Prefix + prefix6 *netip.Prefix + want4 bool + want6 bool + }{ + { + name: "simple", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1, + + want4: true, + want6: true, + }, + { + name: "simple-v4", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + + getCount: 1, + + want4: true, + want6: false, + }, + { + name: "simple-v6", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1, + + want4: false, + want6: true, + }, + { + name: "generate-lots-of-random", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1000, + + want4: true, + want6: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := tt.dbFunc() + + alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategyRandom) + + spew.Dump(alloc) + + for range tt.getCount { + got4, got6, err := alloc.Next() + if err != nil { + t.Fatalf("allocating next IP: %s", err) + } + + t.Logf("addrs ipv4: %v, ipv6: %v", got4, got6) + + if tt.want4 { + if got4 == nil { + t.Fatalf("expected ipv4 addr, got nil") + } + } + + if tt.want6 { + if got6 == nil { + t.Fatalf("expected ipv4 addr, got nil") + } + } + } + }) + } +} + +func TestBackfillIPAddresses(t *testing.T) { + fullNodeP := func(i int) *types.Node { + v4 := fmt.Sprintf("100.64.0.%d", i) + v6 := fmt.Sprintf("fd7a:115c:a1e0::%d", i) + return &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: v4, + }, + IPv4: nap(v4), + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: v6, + }, + IPv6: nap(v6), + } + } + tests := []struct { + name string + dbFunc func() *HSDatabase + + prefix4 *netip.Prefix + prefix6 *netip.Prefix + want types.Nodes + }{ + { + name: "simple-backfill-ipv6", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-ipv6") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: "100.64.0.1", + }, + IPv4: nap("100.64.0.1"), + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: "fd7a:115c:a1e0::1", + }, + IPv6: nap("fd7a:115c:a1e0::1"), + }, + }, + }, + { + name: "simple-backfill-ipv4", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-ipv4") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv6: nap("fd7a:115c:a1e0::1"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: "100.64.0.1", + }, + IPv4: nap("100.64.0.1"), + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: "fd7a:115c:a1e0::1", + }, + IPv6: nap("fd7a:115c:a1e0::1"), + }, + }, + }, + { + name: "simple-backfill-remove-ipv6", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-remove-ipv6") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + IPv6: nap("fd7a:115c:a1e0::1"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + + want: types.Nodes{ + &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: "100.64.0.1", + }, + IPv4: nap("100.64.0.1"), + }, + }, + }, + { + name: "simple-backfill-remove-ipv4", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-remove-ipv4") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + IPv6: nap("fd7a:115c:a1e0::1"), + }) + + return db + }, + + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + &types.Node{ + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: "fd7a:115c:a1e0::1", + }, + IPv6: nap("fd7a:115c:a1e0::1"), + }, + }, + }, + { + name: "multi-backfill-ipv6", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-ipv6") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + }) + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.2"), + }) + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.3"), + }) + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.4"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + fullNodeP(1), + fullNodeP(2), + fullNodeP(3), + fullNodeP(4), + }, + }, + } + + comps := append(util.Comparers, cmpopts.IgnoreFields(types.Node{}, + "ID", + "MachineKeyDatabaseField", + "NodeKeyDatabaseField", + "DiscoKeyDatabaseField", + "User", + "UserID", + "Endpoints", + "HostinfoDatabaseField", + "Hostinfo", + "Routes", + "CreatedAt", + "UpdatedAt", + )) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := tt.dbFunc() + + alloc, err := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategySequential) + if err != nil { + t.Fatalf("failed to set up ip alloc: %s", err) + } + + logs, err := db.BackfillNodeIPs(alloc) + if err != nil { + t.Fatalf("failed to backfill: %s", err) + } + + t.Logf("backfill log: \n%s", strings.Join(logs, "\n")) + + got, err := db.ListNodes() + if err != nil { + t.Fatalf("failed to get nodes: %s", err) + } + + if diff := cmp.Diff(tt.want, got, comps...); diff != "" { + t.Errorf("Backfill unexpected result (-want +got):\n%s", diff) } }) } diff --git a/hscontrol/db/node.go b/hscontrol/db/node.go index d02c2d39..c0f42de1 100644 --- a/hscontrol/db/node.go +++ b/hscontrol/db/node.go @@ -5,12 +5,13 @@ import ( "fmt" "net/netip" "sort" - "strings" + "sync" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/patrickmn/go-cache" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog/log" "gorm.io/gorm" "tailscale.com/tailcfg" @@ -34,27 +35,22 @@ var ( ) ) -func (hsdb *HSDatabase) ListPeers(node *types.Node) (types.Nodes, error) { +func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID) (types.Nodes, error) { return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) { - return ListPeers(rx, node) + return ListPeers(rx, nodeID) }) } // ListPeers returns all peers of node, regardless of any Policy or if the node is expired. -func ListPeers(tx *gorm.DB, node *types.Node) (types.Nodes, error) { - log.Trace(). - Caller(). - Str("node", node.Hostname). - Msg("Finding direct peers") - +func ListPeers(tx *gorm.DB, nodeID types.NodeID) (types.Nodes, error) { nodes := types.Nodes{} if err := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). Preload("Routes"). - Where("node_key <> ?", - node.NodeKey.String()).Find(&nodes).Error; err != nil { + Where("id <> ?", + nodeID).Find(&nodes).Error; err != nil { return types.Nodes{}, err } @@ -83,18 +79,15 @@ func ListNodes(tx *gorm.DB) (types.Nodes, error) { return nodes, nil } -func listNodesByGivenName(tx *gorm.DB, givenName string) (types.Nodes, error) { - nodes := types.Nodes{} - if err := tx. - Preload("AuthKey"). - Preload("AuthKey.User"). - Preload("User"). - Preload("Routes"). - Where("given_name = ?", givenName).Find(&nodes).Error; err != nil { - return nil, err - } +func (hsdb *HSDatabase) ListEphemeralNodes() (types.Nodes, error) { + return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) { + nodes := types.Nodes{} + if err := rx.Joins("AuthKey").Where(`"AuthKey"."ephemeral" = true`).Find(&nodes).Error; err != nil { + return nil, err + } - return nodes, nil + return nodes, nil + }) } func (hsdb *HSDatabase) getNode(user string, name string) (*types.Node, error) { @@ -119,14 +112,14 @@ func getNode(tx *gorm.DB, user string, name string) (*types.Node, error) { return nil, ErrNodeNotFound } -func (hsdb *HSDatabase) GetNodeByID(id uint64) (*types.Node, error) { +func (hsdb *HSDatabase) GetNodeByID(id types.NodeID) (*types.Node, error) { return Read(hsdb.DB, func(rx *gorm.DB) (*types.Node, error) { return GetNodeByID(rx, id) }) } // GetNodeByID finds a Node by ID and returns the Node struct. -func GetNodeByID(tx *gorm.DB, id uint64) (*types.Node, error) { +func GetNodeByID(tx *gorm.DB, id types.NodeID) (*types.Node, error) { mach := types.Node{} if result := tx. Preload("AuthKey"). @@ -197,7 +190,7 @@ func GetNodeByAnyKey( } func (hsdb *HSDatabase) SetTags( - nodeID uint64, + nodeID types.NodeID, tags []string, ) error { return hsdb.Write(func(tx *gorm.DB) error { @@ -208,14 +201,19 @@ func (hsdb *HSDatabase) SetTags( // SetTags takes a Node struct pointer and update the forced tags. func SetTags( tx *gorm.DB, - nodeID uint64, + nodeID types.NodeID, tags []string, ) error { if len(tags) == 0 { + // if no tags are provided, we remove all forced tags + if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("forced_tags", types.StringList{}).Error; err != nil { + return fmt.Errorf("failed to remove tags for node in the database: %w", err) + } + return nil } - newTags := types.StringList{} + var newTags types.StringList for _, tag := range tags { if !util.StringOrPrefixListContains(newTags, tag) { newTags = append(newTags, tag) @@ -230,23 +228,24 @@ func SetTags( } // RenameNode takes a Node struct and a new GivenName for the nodes -// and renames it. +// and renames it. If the name is not unique, it will return an error. func RenameNode(tx *gorm.DB, - nodeID uint64, newName string, + nodeID types.NodeID, newName string, ) error { err := util.CheckForFQDNRules( newName, ) if err != nil { - log.Error(). - Caller(). - Str("func", "RenameNode"). - Uint64("nodeID", nodeID). - Str("newName", newName). - Err(err). - Msg("failed to rename node") + return fmt.Errorf("renaming node: %w", err) + } - return err + uniq, err := isUnqiueName(tx, newName) + if err != nil { + return fmt.Errorf("checking if name is unique: %w", err) + } + + if !uniq { + return fmt.Errorf("name is not unique: %s", newName) } if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil { @@ -256,7 +255,7 @@ func RenameNode(tx *gorm.DB, return nil } -func (hsdb *HSDatabase) NodeSetExpiry(nodeID uint64, expiry time.Time) error { +func (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry time.Time) error { return hsdb.Write(func(tx *gorm.DB) error { return NodeSetExpiry(tx, nodeID, expiry) }) @@ -264,14 +263,14 @@ func (hsdb *HSDatabase) NodeSetExpiry(nodeID uint64, expiry time.Time) error { // NodeSetExpiry takes a Node struct and a new expiry time. func NodeSetExpiry(tx *gorm.DB, - nodeID uint64, expiry time.Time, + nodeID types.NodeID, expiry time.Time, ) error { return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("expiry", expiry).Error } -func (hsdb *HSDatabase) DeleteNode(node *types.Node, isConnected map[key.MachinePublic]bool) error { - return hsdb.Write(func(tx *gorm.DB) error { - return DeleteNode(tx, node, isConnected) +func (hsdb *HSDatabase) DeleteNode(node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) { + return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return DeleteNode(tx, node, isLikelyConnected) }) } @@ -279,24 +278,38 @@ func (hsdb *HSDatabase) DeleteNode(node *types.Node, isConnected map[key.Machine // Caller is responsible for notifying all of change. func DeleteNode(tx *gorm.DB, node *types.Node, - isConnected map[key.MachinePublic]bool, -) error { - err := deleteNodeRoutes(tx, node, map[key.MachinePublic]bool{}) + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { + changed, err := deleteNodeRoutes(tx, node, isLikelyConnected) if err != nil { - return err + return changed, err } // Unscoped causes the node to be fully removed from the database. - if err := tx.Unscoped().Delete(&node).Error; err != nil { - return err + if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil { + return changed, err } - return nil + return changed, nil } -// UpdateLastSeen sets a node's last seen field indicating that we +// DeleteEphemeralNode deletes a Node from the database, note that this method +// will remove it straight, and not notify any changes or consider any routes. +// It is intended for Ephemeral nodes. +func (hsdb *HSDatabase) DeleteEphemeralNode( + nodeID types.NodeID, +) error { + return hsdb.Write(func(tx *gorm.DB) error { + if err := tx.Unscoped().Delete(&types.Node{}, nodeID).Error; err != nil { + return err + } + return nil + }) +} + +// SetLastSeen sets a node's last seen field indicating that we // have recently communicating with this node. -func UpdateLastSeen(tx *gorm.DB, nodeID uint64, lastSeen time.Time) error { +func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error { return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("last_seen", lastSeen).Error } @@ -307,7 +320,8 @@ func RegisterNodeFromAuthCallback( userName string, nodeExpiry *time.Time, registrationMethod string, - addrs types.NodeAddresses, + ipv4 *netip.Addr, + ipv6 *netip.Addr, ) (*types.Node, error) { log.Debug(). Str("machine_key", mkey.ShortString()). @@ -343,7 +357,7 @@ func RegisterNodeFromAuthCallback( node, err := RegisterNode( tx, registrationNode, - addrs, + ipv4, ipv6, ) if err == nil { @@ -359,14 +373,14 @@ func RegisterNodeFromAuthCallback( return nil, ErrNodeNotFoundRegistrationCache } -func (hsdb *HSDatabase) RegisterNode(node types.Node, addrs types.NodeAddresses) (*types.Node, error) { +func (hsdb *HSDatabase) RegisterNode(node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) { return Write(hsdb.DB, func(tx *gorm.DB) (*types.Node, error) { - return RegisterNode(tx, node, addrs) + return RegisterNode(tx, node, ipv4, ipv6) }) } // RegisterNode is executed from the CLI to register a new Node using its MachineKey. -func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*types.Node, error) { +func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) { log.Debug(). Str("node", node.Hostname). Str("machine_key", node.MachineKey.ShortString()). @@ -374,10 +388,10 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ Str("user", node.User.Name). Msg("Registering node") - // If the node exists and we had already IPs for it, we just save it + // If the node exists and it already has IP(s), we just save it // so we store the node.Expire and node.Nodekey that has been set when // adding it to the registrationCache - if len(node.IPAddresses) > 0 { + if node.IPv4 != nil || node.IPv6 != nil { if err := tx.Save(&node).Error; err != nil { return nil, fmt.Errorf("failed register existing node in the database: %w", err) } @@ -393,7 +407,17 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ return &node, nil } - node.IPAddresses = addrs + node.IPv4 = ipv4 + node.IPv6 = ipv6 + + if node.GivenName == "" { + givenName, err := ensureUniqueGivenName(tx, node.Hostname) + if err != nil { + return nil, fmt.Errorf("failed to ensure unique given name: %w", err) + } + + node.GivenName = givenName + } if err := tx.Save(&node).Error; err != nil { return nil, fmt.Errorf("failed register(save) node in the database: %w", err) @@ -402,7 +426,6 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ log.Trace(). Caller(). Str("node", node.Hostname). - Str("ip", strings.Join(addrs.StringSlice(), ",")). Msg("Node registered with the database") return &node, nil @@ -456,16 +479,10 @@ func GetAdvertisedRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) Preload("Node"). Where("node_id = ? AND advertised = ?", node.ID, true).Find(&routes).Error if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error(). - Caller(). - Err(err). - Str("node", node.Hostname). - Msg("Could not get advertised routes for node") - - return nil, err + return nil, fmt.Errorf("getting advertised routes for node(%d): %w", node.ID, err) } - prefixes := []netip.Prefix{} + var prefixes []netip.Prefix for _, route := range routes { prefixes = append(prefixes, netip.Prefix(route.Prefix)) } @@ -488,16 +505,10 @@ func GetEnabledRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) { Where("node_id = ? AND advertised = ? AND enabled = ?", node.ID, true, true). Find(&routes).Error if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error(). - Caller(). - Err(err). - Str("node", node.Hostname). - Msg("Could not get enabled routes for node") - - return nil, err + return nil, fmt.Errorf("getting enabled routes for node(%d): %w", node.ID, err) } - prefixes := []netip.Prefix{} + var prefixes []netip.Prefix for _, route := range routes { prefixes = append(prefixes, netip.Prefix(route.Prefix)) } @@ -513,8 +524,6 @@ func IsRoutesEnabled(tx *gorm.DB, node *types.Node, routeStr string) bool { enabledRoutes, err := GetEnabledRoutes(tx, node) if err != nil { - log.Error().Err(err).Msg("Could not get enabled routes") - return false } @@ -606,7 +615,7 @@ func enableRoutes(tx *gorm.DB, return &types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, + ChangeNodes: []types.NodeID{node.ID}, Message: "created in db.enableRoutes", }, nil } @@ -637,40 +646,32 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) { return normalizedHostname, nil } -func (hsdb *HSDatabase) GenerateGivenName( - mkey key.MachinePublic, - suppliedName string, -) (string, error) { - return Read(hsdb.DB, func(rx *gorm.DB) (string, error) { - return GenerateGivenName(rx, mkey, suppliedName) - }) +func isUnqiueName(tx *gorm.DB, name string) (bool, error) { + nodes := types.Nodes{} + if err := tx. + Where("given_name = ?", name).Find(&nodes).Error; err != nil { + return false, err + } + + return len(nodes) == 0, nil } -func GenerateGivenName( +func ensureUniqueGivenName( tx *gorm.DB, - mkey key.MachinePublic, - suppliedName string, + name string, ) (string, error) { - givenName, err := generateGivenName(suppliedName, false) + givenName, err := generateGivenName(name, false) if err != nil { return "", err } - // Tailscale rules (may differ) https://tailscale.com/kb/1098/machine-names/ - nodes, err := listNodesByGivenName(tx, givenName) + unique, err := isUnqiueName(tx, givenName) if err != nil { return "", err } - var nodeFound *types.Node - for idx, node := range nodes { - if node.GivenName == givenName { - nodeFound = nodes[idx] - } - } - - if nodeFound != nil && nodeFound.MachineKey.String() != mkey.String() { - postfixedName, err := generateGivenName(suppliedName, true) + if !unique { + postfixedName, err := generateGivenName(name, true) if err != nil { return "", err } @@ -681,61 +682,6 @@ func GenerateGivenName( return givenName, nil } -func ExpireEphemeralNodes(tx *gorm.DB, - inactivityThreshhold time.Duration, -) (types.StateUpdate, bool) { - users, err := ListUsers(tx) - if err != nil { - log.Error().Err(err).Msg("Error listing users") - - return types.StateUpdate{}, false - } - - expired := make([]tailcfg.NodeID, 0) - for _, user := range users { - nodes, err := ListNodesByUser(tx, user.Name) - if err != nil { - log.Error(). - Err(err). - Str("user", user.Name). - Msg("Error listing nodes in user") - - return types.StateUpdate{}, false - } - - for idx, node := range nodes { - if node.IsEphemeral() && node.LastSeen != nil && - time.Now(). - After(node.LastSeen.Add(inactivityThreshhold)) { - expired = append(expired, tailcfg.NodeID(node.ID)) - - log.Info(). - Str("node", node.Hostname). - Msg("Ephemeral client removed from database") - - // empty isConnected map as ephemeral nodes are not routes - err = DeleteNode(tx, nodes[idx], map[key.MachinePublic]bool{}) - if err != nil { - log.Error(). - Err(err). - Str("node", node.Hostname). - Msg("🤮 Cannot delete ephemeral node from the database") - } - } - } - - // TODO(kradalby): needs to be moved out of transaction - } - if len(expired) > 0 { - return types.StateUpdate{ - Type: types.StatePeerRemoved, - Removed: expired, - }, true - } - - return types.StateUpdate{}, false -} - func ExpireExpiredNodes(tx *gorm.DB, lastCheck time.Time, ) (time.Time, types.StateUpdate, bool) { @@ -748,41 +694,14 @@ func ExpireExpiredNodes(tx *gorm.DB, nodes, err := ListNodes(tx) if err != nil { - log.Error(). - Err(err). - Msg("Error listing nodes to find expired nodes") - return time.Unix(0, 0), types.StateUpdate{}, false } - for index, node := range nodes { - if node.IsExpired() && - // TODO(kradalby): Replace this, it is very spammy - // It will notify about all nodes that has been expired. - // It should only notify about expired nodes since _last check_. - node.Expiry.After(lastCheck) { + for _, node := range nodes { + if node.IsExpired() && node.Expiry.After(lastCheck) { expired = append(expired, &tailcfg.PeerChange{ NodeID: tailcfg.NodeID(node.ID), KeyExpiry: node.Expiry, }) - - now := time.Now() - // Do not use setNodeExpiry as that has a notifier hook, which - // can cause a deadlock, we are updating all changed nodes later - // and there is no point in notifiying twice. - if err := tx.Model(&nodes[index]).Updates(types.Node{ - Expiry: &now, - }).Error; err != nil { - log.Error(). - Err(err). - Str("node", node.Hostname). - Str("name", node.GivenName). - Msg("🤮 Cannot expire node") - } else { - log.Info(). - Str("node", node.Hostname). - Str("name", node.GivenName). - Msg("Node successfully expired") - } } } @@ -795,3 +714,77 @@ func ExpireExpiredNodes(tx *gorm.DB, return started, types.StateUpdate{}, false } + +// EphemeralGarbageCollector is a garbage collector that will delete nodes after +// a certain amount of time. +// It is used to delete ephemeral nodes that have disconnected and should be +// cleaned up. +type EphemeralGarbageCollector struct { + mu sync.Mutex + + deleteFunc func(types.NodeID) + toBeDeleted map[types.NodeID]*time.Timer + + deleteCh chan types.NodeID + cancelCh chan struct{} +} + +// NewEphemeralGarbageCollector creates a new EphemeralGarbageCollector, it takes +// a deleteFunc that will be called when a node is scheduled for deletion. +func NewEphemeralGarbageCollector(deleteFunc func(types.NodeID)) *EphemeralGarbageCollector { + return &EphemeralGarbageCollector{ + toBeDeleted: make(map[types.NodeID]*time.Timer), + deleteCh: make(chan types.NodeID, 10), + cancelCh: make(chan struct{}), + deleteFunc: deleteFunc, + } +} + +// Close stops the garbage collector. +func (e *EphemeralGarbageCollector) Close() { + e.cancelCh <- struct{}{} +} + +// Schedule schedules a node for deletion after the expiry duration. +func (e *EphemeralGarbageCollector) Schedule(nodeID types.NodeID, expiry time.Duration) { + e.mu.Lock() + timer := time.NewTimer(expiry) + e.toBeDeleted[nodeID] = timer + e.mu.Unlock() + + go func() { + select { + case _, ok := <-timer.C: + if ok { + e.deleteCh <- nodeID + } + } + }() +} + +// Cancel cancels the deletion of a node. +func (e *EphemeralGarbageCollector) Cancel(nodeID types.NodeID) { + e.mu.Lock() + defer e.mu.Unlock() + + if timer, ok := e.toBeDeleted[nodeID]; ok { + timer.Stop() + delete(e.toBeDeleted, nodeID) + } +} + +// Start starts the garbage collector. +func (e *EphemeralGarbageCollector) Start() { + for { + select { + case <-e.cancelCh: + return + case nodeID := <-e.deleteCh: + e.mu.Lock() + delete(e.toBeDeleted, nodeID) + e.mu.Unlock() + + go e.deleteFunc(nodeID) + } + } +} diff --git a/hscontrol/db/node_test.go b/hscontrol/db/node_test.go index 5e8eb294..bafb22ba 100644 --- a/hscontrol/db/node_test.go +++ b/hscontrol/db/node_test.go @@ -1,19 +1,28 @@ package db import ( + "crypto/rand" "fmt" + "math/big" "net/netip" "regexp" + "sort" "strconv" + "sync" "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" + "github.com/puzpuzpuz/xsync/v3" + "github.com/stretchr/testify/assert" "gopkg.in/check.v1" + "gorm.io/gorm" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/types/ptr" ) func (s *Suite) TestGetNode(c *check.C) { @@ -36,9 +45,10 @@ func (s *Suite) TestGetNode(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(node) + trx := db.DB.Save(node) + c.Assert(trx.Error, check.IsNil) _, err = db.getNode("test", "testnode") c.Assert(err, check.IsNil) @@ -64,9 +74,10 @@ func (s *Suite) TestGetNodeByID(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) _, err = db.GetNodeByID(0) c.Assert(err, check.IsNil) @@ -94,9 +105,10 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) _, err = db.GetNodeByAnyKey(machineKey.Public(), nodeKey.Public(), oldNodeKey.Public()) c.Assert(err, check.IsNil) @@ -116,11 +128,11 @@ func (s *Suite) TestHardDeleteNode(c *check.C) { Hostname: "testnode3", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(1), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) - err = db.DeleteNode(&node, map[key.MachinePublic]bool{}) + _, err = db.DeleteNode(&node, xsync.NewMapOf[types.NodeID, bool]()) c.Assert(err, check.IsNil) _, err = db.getNode(user.Name, "testnode3") @@ -142,21 +154,22 @@ func (s *Suite) TestListPeers(c *check.C) { machineKey := key.NewMachine() node := types.Node{ - ID: uint64(index), + ID: types.NodeID(index), MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), Hostname: "testnode" + strconv.Itoa(index), UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) } node0ByID, err := db.GetNodeByID(0) c.Assert(err, check.IsNil) - peersOfNode0, err := db.ListPeers(node0ByID) + peersOfNode0, err := db.ListPeers(node0ByID.ID) c.Assert(err, check.IsNil) c.Assert(len(peersOfNode0), check.Equals, 9) @@ -188,19 +201,19 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) { nodeKey := key.NewNode() machineKey := key.NewMachine() + v4 := netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))) node := types.Node{ - ID: uint64(index), - MachineKey: machineKey.Public(), - NodeKey: nodeKey.Public(), - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))), - }, + ID: types.NodeID(index), + MachineKey: machineKey.Public(), + NodeKey: nodeKey.Public(), + IPv4: &v4, Hostname: "testnode" + strconv.Itoa(index), UserID: stor[index%2].user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(stor[index%2].key.ID), + AuthKeyID: ptr.To(stor[index%2].key.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) } aclPolicy := &policy.ACLPolicy{ @@ -232,16 +245,16 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) { c.Logf("Node(%v), user: %v", testNode.Hostname, testNode.User) c.Assert(err, check.IsNil) - adminPeers, err := db.ListPeers(adminNode) + adminPeers, err := db.ListPeers(adminNode.ID) c.Assert(err, check.IsNil) - testPeers, err := db.ListPeers(testNode) + testPeers, err := db.ListPeers(testNode.ID) c.Assert(err, check.IsNil) - adminRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, adminNode, adminPeers) + adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers) c.Assert(err, check.IsNil) - testRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, testNode, testPeers) + testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers) c.Assert(err, check.IsNil) peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules) @@ -280,7 +293,7 @@ func (s *Suite) TestExpireNode(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), Expiry: &time.Time{}, } db.DB.Save(node) @@ -301,70 +314,6 @@ func (s *Suite) TestExpireNode(c *check.C) { c.Assert(nodeFromDB.IsExpired(), check.Equals, true) } -func (s *Suite) TestSerdeAddressStrignSlice(c *check.C) { - input := types.NodeAddresses([]netip.Addr{ - netip.MustParseAddr("192.0.2.1"), - netip.MustParseAddr("2001:db8::1"), - }) - serialized, err := input.Value() - c.Assert(err, check.IsNil) - if serial, ok := serialized.(string); ok { - c.Assert(serial, check.Equals, "192.0.2.1,2001:db8::1") - } - - var deserialized types.NodeAddresses - err = deserialized.Scan(serialized) - c.Assert(err, check.IsNil) - - c.Assert(len(deserialized), check.Equals, len(input)) - for i := range deserialized { - c.Assert(deserialized[i], check.Equals, input[i]) - } -} - -func (s *Suite) TestGenerateGivenName(c *check.C) { - user1, err := db.CreateUser("user-1") - c.Assert(err, check.IsNil) - - pak, err := db.CreatePreAuthKey(user1.Name, false, false, nil, nil) - c.Assert(err, check.IsNil) - - _, err = db.getNode("user-1", "testnode") - c.Assert(err, check.NotNil) - - nodeKey := key.NewNode() - machineKey := key.NewMachine() - - machineKey2 := key.NewMachine() - - node := &types.Node{ - ID: 0, - MachineKey: machineKey.Public(), - NodeKey: nodeKey.Public(), - Hostname: "hostname-1", - GivenName: "hostname-1", - UserID: user1.ID, - RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), - } - db.DB.Save(node) - - givenName, err := db.GenerateGivenName(machineKey2.Public(), "hostname-2") - comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict") - c.Assert(err, check.IsNil, comment) - c.Assert(givenName, check.Equals, "hostname-2", comment) - - givenName, err = db.GenerateGivenName(machineKey.Public(), "hostname-1") - comment = check.Commentf("Same user, same node, same hostname, no conflict") - c.Assert(err, check.IsNil, comment) - c.Assert(givenName, check.Equals, "hostname-1", comment) - - givenName, err = db.GenerateGivenName(machineKey2.Public(), "hostname-1") - comment = check.Commentf("Same user, unique nodes, same hostname, conflict") - c.Assert(err, check.IsNil, comment) - c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment) -} - func (s *Suite) TestSetTags(c *check.C) { user, err := db.CreateUser("test") c.Assert(err, check.IsNil) @@ -385,9 +334,11 @@ func (s *Suite) TestSetTags(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(node) + + trx := db.DB.Save(node) + c.Assert(trx.Error, check.IsNil) // assign simple tags sTags := []string{"tag:test", "tag:foo"} @@ -397,7 +348,7 @@ func (s *Suite) TestSetTags(c *check.C) { c.Assert(err, check.IsNil) c.Assert(node.ForcedTags, check.DeepEquals, types.StringList(sTags)) - // assign duplicat tags, expect no errors but no doubles in DB + // assign duplicate tags, expect no errors but no doubles in DB eTags := []string{"tag:bar", "tag:test", "tag:unknown", "tag:test"} err = db.SetTags(node.ID, eTags) c.Assert(err, check.IsNil) @@ -408,6 +359,13 @@ func (s *Suite) TestSetTags(c *check.C) { check.DeepEquals, types.StringList([]string{"tag:bar", "tag:test", "tag:unknown"}), ) + + // test removing tags + err = db.SetTags(node.ID, []string{}) + c.Assert(err, check.IsNil) + node, err = db.getNode("test", "testnode") + c.Assert(err, check.IsNil) + c.Assert(node.ForcedTags, check.DeepEquals, types.StringList([]string{})) } func TestHeadscale_generateGivenName(t *testing.T) { @@ -517,8 +475,37 @@ func TestHeadscale_generateGivenName(t *testing.T) { } } -func (s *Suite) TestAutoApproveRoutes(c *check.C) { - acl := []byte(` +func TestAutoApproveRoutes(t *testing.T) { + tests := []struct { + name string + acl string + routes []netip.Prefix + want []netip.Prefix + }{ + { + name: "2068-approve-issue-sub", + acl: ` +{ + "groups": { + "group:k8s": ["test"] + }, + + "acls": [ + {"action": "accept", "users": ["*"], "ports": ["*:*"]}, + ], + + "autoApprovers": { + "routes": { + "10.42.0.0/16": ["test"], + } + } +}`, + routes: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, + want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, + }, + { + name: "2068-approve-issue-sub", + acl: ` { "tagOwners": { "tag:exit": ["test"], @@ -539,57 +526,308 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) { "10.11.0.0/16": ["test"], } } -} - `) +}`, + routes: []netip.Prefix{ + netip.MustParsePrefix("0.0.0.0/0"), + netip.MustParsePrefix("::/0"), + netip.MustParsePrefix("10.10.0.0/16"), + netip.MustParsePrefix("10.11.0.0/24"), + }, + want: []netip.Prefix{ + netip.MustParsePrefix("::/0"), + netip.MustParsePrefix("10.11.0.0/24"), + netip.MustParsePrefix("10.10.0.0/16"), + netip.MustParsePrefix("0.0.0.0/0"), + }, + }, + } - pol, err := policy.LoadACLPolicyFromBytes(acl, "hujson") - c.Assert(err, check.IsNil) - c.Assert(pol, check.NotNil) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + adb, err := newTestDB() + assert.NoError(t, err) + pol, err := policy.LoadACLPolicyFromBytes([]byte(tt.acl)) + + assert.NoError(t, err) + assert.NotNil(t, pol) + + user, err := adb.CreateUser("test") + assert.NoError(t, err) + + pak, err := adb.CreatePreAuthKey(user.Name, false, false, nil, nil) + assert.NoError(t, err) + + nodeKey := key.NewNode() + machineKey := key.NewMachine() + + v4 := netip.MustParseAddr("100.64.0.1") + node := types.Node{ + ID: 0, + MachineKey: machineKey.Public(), + NodeKey: nodeKey.Public(), + Hostname: "test", + UserID: user.ID, + RegisterMethod: util.RegisterMethodAuthKey, + AuthKeyID: ptr.To(pak.ID), + Hostinfo: &tailcfg.Hostinfo{ + RequestTags: []string{"tag:exit"}, + RoutableIPs: tt.routes, + }, + IPv4: &v4, + } + + trx := adb.DB.Save(&node) + assert.NoError(t, trx.Error) + + sendUpdate, err := adb.SaveNodeRoutes(&node) + assert.NoError(t, err) + assert.False(t, sendUpdate) + + node0ByID, err := adb.GetNodeByID(0) + assert.NoError(t, err) + + // TODO(kradalby): Check state update + err = adb.EnableAutoApprovedRoutes(pol, node0ByID) + assert.NoError(t, err) + + enabledRoutes, err := adb.GetEnabledRoutes(node0ByID) + assert.NoError(t, err) + assert.Len(t, enabledRoutes, len(tt.want)) + + sort.Slice(enabledRoutes, func(i, j int) bool { + return util.ComparePrefix(enabledRoutes[i], enabledRoutes[j]) > 0 + }) + + if diff := cmp.Diff(tt.want, enabledRoutes, util.Comparers...); diff != "" { + t.Errorf("unexpected enabled routes (-want +got):\n%s", diff) + } + }) + } +} + +func TestEphemeralGarbageCollectorOrder(t *testing.T) { + want := []types.NodeID{1, 3} + got := []types.NodeID{} + var mu sync.Mutex + + e := NewEphemeralGarbageCollector(func(ni types.NodeID) { + mu.Lock() + defer mu.Unlock() + got = append(got, ni) + }) + go e.Start() + + go e.Schedule(1, 1*time.Second) + go e.Schedule(2, 2*time.Second) + go e.Schedule(3, 3*time.Second) + go e.Schedule(4, 4*time.Second) + + time.Sleep(time.Second) + go e.Cancel(2) + go e.Cancel(4) + + time.Sleep(6 * time.Second) + + e.Close() + + mu.Lock() + defer mu.Unlock() + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("wrong nodes deleted, unexpected result (-want +got):\n%s", diff) + } +} + +func TestEphemeralGarbageCollectorLoads(t *testing.T) { + var got []types.NodeID + var mu sync.Mutex + + want := 1000 + + e := NewEphemeralGarbageCollector(func(ni types.NodeID) { + mu.Lock() + defer mu.Unlock() + + time.Sleep(time.Duration(generateRandomNumber(t, 3)) * time.Millisecond) + got = append(got, ni) + }) + go e.Start() + + for i := 0; i < want; i++ { + go e.Schedule(types.NodeID(i), 1*time.Second) + } + + time.Sleep(10 * time.Second) + + e.Close() + + mu.Lock() + defer mu.Unlock() + + if len(got) != want { + t.Errorf("expected %d, got %d", want, len(got)) + } +} + +func generateRandomNumber(t *testing.T, max int64) int64 { + t.Helper() + maxB := big.NewInt(max) + n, err := rand.Int(rand.Reader, maxB) + if err != nil { + t.Fatalf("getting random number: %s", err) + } + return n.Int64() + 1 +} + +func TestListEphemeralNodes(t *testing.T) { + db, err := newTestDB() + if err != nil { + t.Fatalf("creating db: %s", err) + } user, err := db.CreateUser("test") - c.Assert(err, check.IsNil) + assert.NoError(t, err) pak, err := db.CreatePreAuthKey(user.Name, false, false, nil, nil) - c.Assert(err, check.IsNil) + assert.NoError(t, err) - nodeKey := key.NewNode() - machineKey := key.NewMachine() - - defaultRouteV4 := netip.MustParsePrefix("0.0.0.0/0") - defaultRouteV6 := netip.MustParsePrefix("::/0") - route1 := netip.MustParsePrefix("10.10.0.0/16") - // Check if a subprefix of an autoapproved route is approved - route2 := netip.MustParsePrefix("10.11.0.0/24") + pakEph, err := db.CreatePreAuthKey(user.Name, false, true, nil, nil) + assert.NoError(t, err) node := types.Node{ ID: 0, - MachineKey: machineKey.Public(), - NodeKey: nodeKey.Public(), + MachineKey: key.NewMachine().Public(), + NodeKey: key.NewNode().Public(), Hostname: "test", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), - Hostinfo: &tailcfg.Hostinfo{ - RequestTags: []string{"tag:exit"}, - RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2}, - }, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + nodeEph := types.Node{ + ID: 0, + MachineKey: key.NewMachine().Public(), + NodeKey: key.NewNode().Public(), + Hostname: "ephemeral", + UserID: user.ID, + RegisterMethod: util.RegisterMethodAuthKey, + AuthKeyID: ptr.To(pakEph.ID), + } - sendUpdate, err := db.SaveNodeRoutes(&node) - c.Assert(err, check.IsNil) - c.Assert(sendUpdate, check.Equals, false) + err = db.DB.Save(&node).Error + assert.NoError(t, err) - node0ByID, err := db.GetNodeByID(0) - c.Assert(err, check.IsNil) + err = db.DB.Save(&nodeEph).Error + assert.NoError(t, err) - // TODO(kradalby): Check state update - _, err = db.EnableAutoApprovedRoutes(pol, node0ByID) - c.Assert(err, check.IsNil) + nodes, err := db.ListNodes() + assert.NoError(t, err) - enabledRoutes, err := db.GetEnabledRoutes(node0ByID) - c.Assert(err, check.IsNil) - c.Assert(enabledRoutes, check.HasLen, 4) + ephemeralNodes, err := db.ListEphemeralNodes() + assert.NoError(t, err) + + assert.Len(t, nodes, 2) + assert.Len(t, ephemeralNodes, 1) + + assert.Equal(t, nodeEph.ID, ephemeralNodes[0].ID) + assert.Equal(t, nodeEph.AuthKeyID, ephemeralNodes[0].AuthKeyID) + assert.Equal(t, nodeEph.UserID, ephemeralNodes[0].UserID) + assert.Equal(t, nodeEph.Hostname, ephemeralNodes[0].Hostname) +} + +func TestRenameNode(t *testing.T) { + db, err := newTestDB() + if err != nil { + t.Fatalf("creating db: %s", err) + } + + user, err := db.CreateUser("test") + assert.NoError(t, err) + + user2, err := db.CreateUser("test2") + assert.NoError(t, err) + + node := types.Node{ + ID: 0, + MachineKey: key.NewMachine().Public(), + NodeKey: key.NewNode().Public(), + Hostname: "test", + UserID: user.ID, + RegisterMethod: util.RegisterMethodAuthKey, + } + + node2 := types.Node{ + ID: 0, + MachineKey: key.NewMachine().Public(), + NodeKey: key.NewNode().Public(), + Hostname: "test", + UserID: user2.ID, + RegisterMethod: util.RegisterMethodAuthKey, + } + + err = db.DB.Save(&node).Error + assert.NoError(t, err) + + err = db.DB.Save(&node2).Error + assert.NoError(t, err) + + err = db.DB.Transaction(func(tx *gorm.DB) error { + _, err := RegisterNode(tx, node, nil, nil) + if err != nil { + return err + } + _, err = RegisterNode(tx, node2, nil, nil) + return err + }) + assert.NoError(t, err) + + nodes, err := db.ListNodes() + assert.NoError(t, err) + + assert.Len(t, nodes, 2) + + t.Logf("node1 %s %s", nodes[0].Hostname, nodes[0].GivenName) + t.Logf("node2 %s %s", nodes[1].Hostname, nodes[1].GivenName) + + assert.Equal(t, nodes[0].Hostname, nodes[0].GivenName) + assert.NotEqual(t, nodes[1].Hostname, nodes[1].GivenName) + assert.Equal(t, nodes[0].Hostname, nodes[1].Hostname) + assert.NotEqual(t, nodes[0].Hostname, nodes[1].GivenName) + assert.Contains(t, nodes[1].GivenName, nodes[0].Hostname) + assert.Equal(t, nodes[0].GivenName, nodes[1].Hostname) + assert.Len(t, nodes[0].Hostname, 4) + assert.Len(t, nodes[1].Hostname, 4) + assert.Len(t, nodes[0].GivenName, 4) + assert.Len(t, nodes[1].GivenName, 13) + + // Nodes can be renamed to a unique name + err = db.Write(func(tx *gorm.DB) error { + return RenameNode(tx, nodes[0].ID, "newname") + }) + assert.NoError(t, err) + + nodes, err = db.ListNodes() + assert.NoError(t, err) + assert.Len(t, nodes, 2) + assert.Equal(t, nodes[0].Hostname, "test") + assert.Equal(t, nodes[0].GivenName, "newname") + + // Nodes can reuse name that is no longer used + err = db.Write(func(tx *gorm.DB) error { + return RenameNode(tx, nodes[1].ID, "test") + }) + assert.NoError(t, err) + + nodes, err = db.ListNodes() + assert.NoError(t, err) + assert.Len(t, nodes, 2) + assert.Equal(t, nodes[0].Hostname, "test") + assert.Equal(t, nodes[0].GivenName, "newname") + assert.Equal(t, nodes[1].GivenName, "test") + + // Nodes cannot be renamed to used names + err = db.Write(func(tx *gorm.DB) error { + return RenameNode(tx, nodes[0].ID, "test") + }) + assert.ErrorContains(t, err, "name is not unique") } diff --git a/hscontrol/db/policy.go b/hscontrol/db/policy.go new file mode 100644 index 00000000..49b419b5 --- /dev/null +++ b/hscontrol/db/policy.go @@ -0,0 +1,43 @@ +package db + +import ( + "errors" + + "github.com/juanfont/headscale/hscontrol/types" + "gorm.io/gorm" + "gorm.io/gorm/clause" +) + +// SetPolicy sets the policy in the database. +func (hsdb *HSDatabase) SetPolicy(policy string) (*types.Policy, error) { + // Create a new policy. + p := types.Policy{ + Data: policy, + } + + if err := hsdb.DB.Clauses(clause.Returning{}).Create(&p).Error; err != nil { + return nil, err + } + + return &p, nil +} + +// GetPolicy returns the latest policy in the database. +func (hsdb *HSDatabase) GetPolicy() (*types.Policy, error) { + var p types.Policy + + // Query: + // SELECT * FROM policies ORDER BY id DESC LIMIT 1; + if err := hsdb.DB. + Order("id DESC"). + Limit(1). + First(&p).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, types.ErrPolicyNotFound + } + + return nil, err + } + + return &p, nil +} diff --git a/hscontrol/db/preauth_keys.go b/hscontrol/db/preauth_keys.go index d1d94bbe..5ea59a9c 100644 --- a/hscontrol/db/preauth_keys.go +++ b/hscontrol/db/preauth_keys.go @@ -10,6 +10,7 @@ import ( "github.com/juanfont/headscale/hscontrol/types" "gorm.io/gorm" + "tailscale.com/types/ptr" ) var ( @@ -83,7 +84,7 @@ func CreatePreAuthKey( if !seenTags[tag] { if err := tx.Save(&types.PreAuthKeyACLTag{PreAuthKeyID: key.ID, Tag: tag}).Error; err != nil { return nil, fmt.Errorf( - "failed to ceate key tag in the database: %w", + "failed to create key tag in the database: %w", err, ) } @@ -92,10 +93,6 @@ func CreatePreAuthKey( } } - if err != nil { - return nil, err - } - return &key, nil } @@ -203,7 +200,7 @@ func ValidatePreAuthKey(tx *gorm.DB, k string) (*types.PreAuthKey, error) { nodes := types.Nodes{} if err := tx. Preload("AuthKey"). - Where(&types.Node{AuthKeyID: uint(pak.ID)}). + Where(&types.Node{AuthKeyID: ptr.To(pak.ID)}). Find(&nodes).Error; err != nil { return nil, err } diff --git a/hscontrol/db/preauth_keys_test.go b/hscontrol/db/preauth_keys_test.go index 53cf37c4..9dd5b199 100644 --- a/hscontrol/db/preauth_keys_test.go +++ b/hscontrol/db/preauth_keys_test.go @@ -6,7 +6,7 @@ import ( "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "gopkg.in/check.v1" - "gorm.io/gorm" + "tailscale.com/types/ptr" ) func (*Suite) TestCreatePreAuthKey(c *check.C) { @@ -81,9 +81,10 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) { Hostname: "testest", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) key, err := db.ValidatePreAuthKey(pak.Key) c.Assert(err, check.Equals, ErrSingleUseAuthKeyHasBeenUsed) @@ -102,9 +103,10 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) { Hostname: "testest", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) key, err := db.ValidatePreAuthKey(pak.Key) c.Assert(err, check.IsNil) @@ -123,74 +125,6 @@ func (*Suite) TestNotReusableNotBeingUsedKey(c *check.C) { c.Assert(key.ID, check.Equals, pak.ID) } -func (*Suite) TestEphemeralKeyReusable(c *check.C) { - user, err := db.CreateUser("test7") - c.Assert(err, check.IsNil) - - pak, err := db.CreatePreAuthKey(user.Name, true, true, nil, nil) - c.Assert(err, check.IsNil) - - now := time.Now().Add(-time.Second * 30) - node := types.Node{ - ID: 0, - Hostname: "testest", - UserID: user.ID, - RegisterMethod: util.RegisterMethodAuthKey, - LastSeen: &now, - AuthKeyID: uint(pak.ID), - } - db.DB.Save(&node) - - _, err = db.ValidatePreAuthKey(pak.Key) - c.Assert(err, check.IsNil) - - _, err = db.getNode("test7", "testest") - c.Assert(err, check.IsNil) - - db.DB.Transaction(func(tx *gorm.DB) error { - ExpireEphemeralNodes(tx, time.Second*20) - return nil - }) - - // The machine record should have been deleted - _, err = db.getNode("test7", "testest") - c.Assert(err, check.NotNil) -} - -func (*Suite) TestEphemeralKeyNotReusable(c *check.C) { - user, err := db.CreateUser("test7") - c.Assert(err, check.IsNil) - - pak, err := db.CreatePreAuthKey(user.Name, false, true, nil, nil) - c.Assert(err, check.IsNil) - - now := time.Now().Add(-time.Second * 30) - node := types.Node{ - ID: 0, - Hostname: "testest", - UserID: user.ID, - RegisterMethod: util.RegisterMethodAuthKey, - LastSeen: &now, - AuthKeyID: uint(pak.ID), - } - db.DB.Save(&node) - - _, err = db.ValidatePreAuthKey(pak.Key) - c.Assert(err, check.NotNil) - - _, err = db.getNode("test7", "testest") - c.Assert(err, check.IsNil) - - db.DB.Transaction(func(tx *gorm.DB) error { - ExpireEphemeralNodes(tx, time.Second*20) - return nil - }) - - // The machine record should have been deleted - _, err = db.getNode("test7", "testest") - c.Assert(err, check.NotNil) -} - func (*Suite) TestExpirePreauthKey(c *check.C) { user, err := db.CreateUser("test3") c.Assert(err, check.IsNil) diff --git a/hscontrol/db/routes.go b/hscontrol/db/routes.go index 1ee144a7..fd837c29 100644 --- a/hscontrol/db/routes.go +++ b/hscontrol/db/routes.go @@ -2,13 +2,16 @@ package db import ( "errors" + "fmt" "net/netip" + "sort" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog/log" "gorm.io/gorm" - "tailscale.com/types/key" + "tailscale.com/util/set" ) var ErrRouteIsNotAvailable = errors.New("route is not available") @@ -124,8 +127,8 @@ func EnableRoute(tx *gorm.DB, id uint64) (*types.StateUpdate, error) { func DisableRoute(tx *gorm.DB, id uint64, - isConnected map[key.MachinePublic]bool, -) (*types.StateUpdate, error) { + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { route, err := GetRoute(tx, id) if err != nil { return nil, err @@ -137,16 +140,15 @@ func DisableRoute(tx *gorm.DB, // Tailscale requires both IPv4 and IPv6 exit routes to // be enabled at the same time, as per // https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002 - var update *types.StateUpdate + var update []types.NodeID if !route.IsExitRoute() { - update, err = failoverRouteReturnUpdate(tx, isConnected, route) + route.Enabled = false + err = tx.Save(route).Error if err != nil { return nil, err } - route.Enabled = false - route.IsPrimary = false - err = tx.Save(route).Error + update, err = failoverRouteTx(tx, isLikelyConnected, route) if err != nil { return nil, err } @@ -160,6 +162,7 @@ func DisableRoute(tx *gorm.DB, if routes[i].IsExitRoute() { routes[i].Enabled = false routes[i].IsPrimary = false + err = tx.Save(&routes[i]).Error if err != nil { return nil, err @@ -168,26 +171,11 @@ func DisableRoute(tx *gorm.DB, } } - if routes == nil { - routes, err = GetNodeRoutes(tx, &node) - if err != nil { - return nil, err - } - } - - node.Routes = routes - // If update is empty, it means that one was not created // by failover (as a failover was not necessary), create // one and return to the caller. if update == nil { - update = &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{ - &node, - }, - Message: "called from db.DisableRoute", - } + update = []types.NodeID{node.ID} } return update, nil @@ -195,18 +183,18 @@ func DisableRoute(tx *gorm.DB, func (hsdb *HSDatabase) DeleteRoute( id uint64, - isConnected map[key.MachinePublic]bool, -) (*types.StateUpdate, error) { - return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { - return DeleteRoute(tx, id, isConnected) + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { + return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return DeleteRoute(tx, id, isLikelyConnected) }) } func DeleteRoute( tx *gorm.DB, id uint64, - isConnected map[key.MachinePublic]bool, -) (*types.StateUpdate, error) { + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { route, err := GetRoute(tx, id) if err != nil { return nil, err @@ -218,9 +206,9 @@ func DeleteRoute( // Tailscale requires both IPv4 and IPv6 exit routes to // be enabled at the same time, as per // https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002 - var update *types.StateUpdate + var update []types.NodeID if !route.IsExitRoute() { - update, err = failoverRouteReturnUpdate(tx, isConnected, route) + update, err = failoverRouteTx(tx, isLikelyConnected, route) if err != nil { return nil, nil } @@ -229,12 +217,12 @@ func DeleteRoute( return nil, err } } else { - routes, err := GetNodeRoutes(tx, &node) + routes, err = GetNodeRoutes(tx, &node) if err != nil { return nil, err } - routesToDelete := types.Routes{} + var routesToDelete types.Routes for _, r := range routes { if r.IsExitRoute() { routesToDelete = append(routesToDelete, r) @@ -259,35 +247,37 @@ func DeleteRoute( node.Routes = routes if update == nil { - update = &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{ - &node, - }, - Message: "called from db.DeleteRoute", - } + update = []types.NodeID{node.ID} } return update, nil } -func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isConnected map[key.MachinePublic]bool) error { +func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) { routes, err := GetNodeRoutes(tx, node) if err != nil { - return err + return nil, fmt.Errorf("getting node routes: %w", err) } + var changed []types.NodeID for i := range routes { if err := tx.Unscoped().Delete(&routes[i]).Error; err != nil { - return err + return nil, fmt.Errorf("deleting route(%d): %w", &routes[i].ID, err) } // TODO(kradalby): This is a bit too aggressive, we could probably // figure out which routes needs to be failed over rather than all. - failoverRouteReturnUpdate(tx, isConnected, &routes[i]) + chn, err := failoverRouteTx(tx, isLikelyConnected, &routes[i]) + if err != nil { + return changed, fmt.Errorf("failing over route after delete: %w", err) + } + + if chn != nil { + changed = append(changed, chn...) + } } - return nil + return changed, nil } // isUniquePrefix returns if there is another node providing the same route already. @@ -400,7 +390,7 @@ func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) { for prefix, exists := range advertisedRoutes { if !exists { route := types.Route{ - NodeID: node.ID, + NodeID: node.ID.Uint64(), Prefix: types.IPPrefix(prefix), Advertised: true, Enabled: false, @@ -415,11 +405,12 @@ func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) { return sendUpdate, nil } -// EnsureFailoverRouteIsAvailable takes a node and checks if the node's route -// currently have a functioning host that exposes the network. -func EnsureFailoverRouteIsAvailable( +// FailoverNodeRoutesIfNeccessary takes a node and checks if the node's route +// need to be failed over to another host. +// If needed, the failover will be attempted. +func FailoverNodeRoutesIfNeccessary( tx *gorm.DB, - isConnected map[key.MachinePublic]bool, + isLikelyConnected *xsync.MapOf[types.NodeID, bool], node *types.Node, ) (*types.StateUpdate, error) { nodeRoutes, err := GetNodeRoutes(tx, node) @@ -427,82 +418,57 @@ func EnsureFailoverRouteIsAvailable( return nil, nil } - var changedNodes types.Nodes + changedNodes := make(set.Set[types.NodeID]) + +nodeRouteLoop: for _, nodeRoute := range nodeRoutes { routes, err := getRoutesByPrefix(tx, netip.Prefix(nodeRoute.Prefix)) if err != nil { - return nil, err + return nil, fmt.Errorf("getting routes by prefix: %w", err) } for _, route := range routes { if route.IsPrimary { // if we have a primary route, and the node is connected // nothing needs to be done. - if isConnected[route.Node.MachineKey] { - continue + if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val { + continue nodeRouteLoop } // if not, we need to failover the route - update, err := failoverRouteReturnUpdate(tx, isConnected, &route) - if err != nil { - return nil, err - } + failover := failoverRoute(isLikelyConnected, &route, routes) + if failover != nil { + err := failover.save(tx) + if err != nil { + return nil, fmt.Errorf("saving failover routes: %w", err) + } - if update != nil { - changedNodes = append(changedNodes, update.ChangeNodes...) + changedNodes.Add(failover.old.Node.ID) + changedNodes.Add(failover.new.Node.ID) + + continue nodeRouteLoop } } } } + chng := changedNodes.Slice() + sort.SliceStable(chng, func(i, j int) bool { + return chng[i] < chng[j] + }) + if len(changedNodes) != 0 { return &types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: changedNodes, - Message: "called from db.EnsureFailoverRouteIsAvailable", + ChangeNodes: chng, + Message: "called from db.FailoverNodeRoutesIfNeccessary", }, nil } return nil, nil } -func failoverRouteReturnUpdate( - tx *gorm.DB, - isConnected map[key.MachinePublic]bool, - r *types.Route, -) (*types.StateUpdate, error) { - changedKeys, err := failoverRoute(tx, isConnected, r) - if err != nil { - return nil, err - } - - log.Trace(). - Interface("isConnected", isConnected). - Interface("changedKeys", changedKeys). - Msg("building route failover") - - if len(changedKeys) == 0 { - return nil, nil - } - - var nodes types.Nodes - for _, key := range changedKeys { - node, err := GetNodeByMachineKey(tx, key) - if err != nil { - return nil, err - } - - nodes = append(nodes, node) - } - - return &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: nodes, - Message: "called from db.failoverRouteReturnUpdate", - }, nil -} - -// failoverRoute takes a route that is no longer available, +// failoverRouteTx takes a route that is no longer available, // this can be either from: // - being disabled // - being deleted @@ -510,11 +476,11 @@ func failoverRouteReturnUpdate( // // and tries to find a new route to take over its place. // If the given route was not primary, it returns early. -func failoverRoute( +func failoverRouteTx( tx *gorm.DB, - isConnected map[key.MachinePublic]bool, + isLikelyConnected *xsync.MapOf[types.NodeID, bool], r *types.Route, -) ([]key.MachinePublic, error) { +) ([]types.NodeID, error) { if r == nil { return nil, nil } @@ -532,14 +498,71 @@ func failoverRoute( routes, err := getRoutesByPrefix(tx, netip.Prefix(r.Prefix)) if err != nil { - return nil, err + return nil, fmt.Errorf("getting routes by prefix: %w", err) + } + + fo := failoverRoute(isLikelyConnected, r, routes) + if fo == nil { + return nil, nil + } + + err = fo.save(tx) + if err != nil { + return nil, fmt.Errorf("saving failover route: %w", err) + } + + log.Trace(). + Str("hostname", fo.new.Node.Hostname). + Msgf("set primary to new route, was: id(%d), host(%s), now: id(%d), host(%s)", fo.old.ID, fo.old.Node.Hostname, fo.new.ID, fo.new.Node.Hostname) + + // Return a list of the machinekeys of the changed nodes. + return []types.NodeID{fo.old.Node.ID, fo.new.Node.ID}, nil +} + +type failover struct { + old *types.Route + new *types.Route +} + +func (f *failover) save(tx *gorm.DB) error { + err := tx.Save(f.old).Error + if err != nil { + return fmt.Errorf("saving old primary: %w", err) + } + + err = tx.Save(f.new).Error + if err != nil { + return fmt.Errorf("saving new primary: %w", err) + } + + return nil +} + +func failoverRoute( + isLikelyConnected *xsync.MapOf[types.NodeID, bool], + routeToReplace *types.Route, + altRoutes types.Routes, +) *failover { + if routeToReplace == nil { + return nil + } + + // This route is not a primary route, and it is not + // being served to nodes. + if !routeToReplace.IsPrimary { + return nil + } + + // We do not have to failover exit nodes + if routeToReplace.IsExitRoute() { + return nil } var newPrimary *types.Route // Find a new suitable route - for idx, route := range routes { - if r.ID == route.ID { + for idx, route := range altRoutes { + if routeToReplace.ID == route.ID { continue } @@ -547,9 +570,11 @@ func failoverRoute( continue } - if isConnected[route.Node.MachineKey] { - newPrimary = &routes[idx] - break + if isLikelyConnected != nil { + if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val { + newPrimary = &altRoutes[idx] + break + } } } @@ -559,48 +584,23 @@ func failoverRoute( // the one currently marked as primary is the // best we got. if newPrimary == nil { - return nil, nil + return nil } - log.Trace(). - Str("hostname", newPrimary.Node.Hostname). - Msg("found new primary, updating db") - - // Remove primary from the old route - r.IsPrimary = false - err = tx.Save(&r).Error - if err != nil { - log.Error().Err(err).Msg("error disabling new primary route") - - return nil, err - } - - log.Trace(). - Str("hostname", newPrimary.Node.Hostname). - Msg("removed primary from old route") - - // Set primary for the new primary + routeToReplace.IsPrimary = false newPrimary.IsPrimary = true - err = tx.Save(&newPrimary).Error - if err != nil { - log.Error().Err(err).Msg("error enabling new primary route") - return nil, err + return &failover{ + old: routeToReplace, + new: newPrimary, } - - log.Trace(). - Str("hostname", newPrimary.Node.Hostname). - Msg("set primary to new route") - - // Return a list of the machinekeys of the changed nodes. - return []key.MachinePublic{r.Node.MachineKey, newPrimary.Node.MachineKey}, nil } func (hsdb *HSDatabase) EnableAutoApprovedRoutes( aclPolicy *policy.ACLPolicy, node *types.Node, -) (*types.StateUpdate, error) { - return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { +) error { + return hsdb.Write(func(tx *gorm.DB) error { return EnableAutoApprovedRoutes(tx, aclPolicy, node) }) } @@ -610,25 +610,19 @@ func EnableAutoApprovedRoutes( tx *gorm.DB, aclPolicy *policy.ACLPolicy, node *types.Node, -) (*types.StateUpdate, error) { - if len(node.IPAddresses) == 0 { - return nil, nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs +) error { + if node.IPv4 == nil && node.IPv6 == nil { + return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs } routes, err := GetNodeAdvertisedRoutes(tx, node) if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error(). - Caller(). - Err(err). - Str("node", node.Hostname). - Msg("Could not get advertised routes for node") - - return nil, err + return fmt.Errorf("getting advertised routes for node(%s %d): %w", node.Hostname, node.ID, err) } log.Trace().Interface("routes", routes).Msg("routes for autoapproving") - approvedRoutes := types.Routes{} + var approvedRoutes types.Routes for _, advertisedRoute := range routes { if advertisedRoute.Enabled { @@ -639,12 +633,7 @@ func EnableAutoApprovedRoutes( netip.Prefix(advertisedRoute.Prefix), ) if err != nil { - log.Err(err). - Str("advertisedRoute", advertisedRoute.String()). - Uint64("nodeId", node.ID). - Msg("Failed to resolve autoApprovers for advertised route") - - return nil, err + return fmt.Errorf("failed to resolve autoApprovers for route(%d) for node(%s %d): %w", advertisedRoute.ID, node.Hostname, node.ID, err) } log.Trace(). @@ -661,40 +650,23 @@ func EnableAutoApprovedRoutes( // TODO(kradalby): figure out how to get this to depend on less stuff approvedIps, err := aclPolicy.ExpandAlias(types.Nodes{node}, approvedAlias) if err != nil { - log.Err(err). - Str("alias", approvedAlias). - Msg("Failed to expand alias when processing autoApprovers policy") - - return nil, err + return fmt.Errorf("expanding alias %q for autoApprovers: %w", approvedAlias, err) } // approvedIPs should contain all of node's IPs if it matches the rule, so check for first - if approvedIps.Contains(node.IPAddresses[0]) { + if approvedIps.Contains(*node.IPv4) { approvedRoutes = append(approvedRoutes, advertisedRoute) } } } } - update := &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{}, - Message: "created in db.EnableAutoApprovedRoutes", - } - for _, approvedRoute := range approvedRoutes { - perHostUpdate, err := EnableRoute(tx, uint64(approvedRoute.ID)) + _, err := EnableRoute(tx, uint64(approvedRoute.ID)) if err != nil { - log.Err(err). - Str("approvedRoute", approvedRoute.String()). - Uint64("nodeId", node.ID). - Msg("Failed to enable approved route") - - return nil, err + return fmt.Errorf("enabling approved route(%d): %w", approvedRoute.ID, err) } - - update.ChangeNodes = append(update.ChangeNodes, perHostUpdate.ChangeNodes...) } - return update, nil + return nil } diff --git a/hscontrol/db/routes_test.go b/hscontrol/db/routes_test.go index f3357e2a..2324a21b 100644 --- a/hscontrol/db/routes_test.go +++ b/hscontrol/db/routes_test.go @@ -7,15 +7,26 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" - "github.com/stretchr/testify/assert" + "github.com/puzpuzpuz/xsync/v3" "gopkg.in/check.v1" "gorm.io/gorm" "tailscale.com/tailcfg" - "tailscale.com/types/key" + "tailscale.com/types/ptr" ) +var smap = func(m map[types.NodeID]bool) *xsync.MapOf[types.NodeID, bool] { + s := xsync.NewMapOf[types.NodeID, bool]() + + for k, v := range m { + s.Store(k, v) + } + + return s +} + func (s *Suite) TestGetRoutes(c *check.C) { user, err := db.CreateUser("test") c.Assert(err, check.IsNil) @@ -38,10 +49,11 @@ func (s *Suite) TestGetRoutes(c *check.C) { Hostname: "test_get_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), Hostinfo: &hostInfo, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) su, err := db.SaveNodeRoutes(&node) c.Assert(err, check.IsNil) @@ -88,10 +100,11 @@ func (s *Suite) TestGetEnableRoutes(c *check.C) { Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), Hostinfo: &hostInfo, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node) c.Assert(err, check.IsNil) @@ -160,10 +173,11 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) { Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), Hostinfo: &hostInfo1, } - db.DB.Save(&node1) + trx := db.DB.Save(&node1) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node1) c.Assert(err, check.IsNil) @@ -183,7 +197,7 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) { Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), Hostinfo: &hostInfo2, } db.DB.Save(&node2) @@ -242,11 +256,12 @@ func (s *Suite) TestDeleteRoutes(c *check.C) { Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), Hostinfo: &hostInfo1, LastSeen: &now, } - db.DB.Save(&node1) + trx := db.DB.Save(&node1) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node1) c.Assert(err, check.IsNil) @@ -262,7 +277,7 @@ func (s *Suite) TestDeleteRoutes(c *check.C) { c.Assert(err, check.IsNil) // TODO(kradalby): check stateupdate - _, err = db.DeleteRoute(uint64(routes[0].ID), map[key.MachinePublic]bool{}) + _, err = db.DeleteRoute(uint64(routes[0].ID), nil) c.Assert(err, check.IsNil) enabledRoutes1, err := db.GetEnabledRoutes(&node1) @@ -270,22 +285,393 @@ func (s *Suite) TestDeleteRoutes(c *check.C) { c.Assert(len(enabledRoutes1), check.Equals, 1) } -var ipp = func(s string) types.IPPrefix { return types.IPPrefix(netip.MustParsePrefix(s)) } +var ( + ipp = func(s string) types.IPPrefix { return types.IPPrefix(netip.MustParsePrefix(s)) } + mkNode = func(nid types.NodeID) types.Node { + return types.Node{ID: nid} + } +) -func TestFailoverRoute(t *testing.T) { - machineKeys := []key.MachinePublic{ - key.NewMachine().Public(), - key.NewMachine().Public(), - key.NewMachine().Public(), - key.NewMachine().Public(), +var np = func(nid types.NodeID) *types.Node { + no := mkNode(nid) + return &no +} + +var r = func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) types.Route { + return types.Route{ + Model: gorm.Model{ + ID: id, + }, + Node: mkNode(nid), + Prefix: prefix, + Enabled: enabled, + IsPrimary: primary, + } +} + +var rp = func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) *types.Route { + ro := r(id, nid, prefix, enabled, primary) + return &ro +} + +func dbForTest(t *testing.T, testName string) *HSDatabase { + t.Helper() + + tmpDir, err := os.MkdirTemp("", testName) + if err != nil { + t.Fatalf("creating tempdir: %s", err) } + dbPath := tmpDir + "/headscale_test.db" + + db, err = NewHeadscaleDatabase( + types.DatabaseConfig{ + Type: "sqlite3", + Sqlite: types.SqliteConfig{ + Path: dbPath, + }, + }, + "", + ) + if err != nil { + t.Fatalf("setting up database: %s", err) + } + + t.Logf("database set up at: %s", dbPath) + + return db +} + +func TestFailoverNodeRoutesIfNeccessary(t *testing.T) { + su := func(nids ...types.NodeID) *types.StateUpdate { + return &types.StateUpdate{ + ChangeNodes: nids, + } + } + tests := []struct { + name string + nodes types.Nodes + routes types.Routes + isConnected []map[types.NodeID]bool + want []*types.StateUpdate + wantErr bool + }{ + { + name: "n1-down-n2-down-n1-up", + nodes: types.Nodes{ + np(1), + np(2), + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + }, + // n2 goes down + { + 1: false, + 2: false, + }, + // n1 comes up + { + 1: true, + 2: false, + }, + }, + want: []*types.StateUpdate{ + // route changes from 1 -> 2 + su(1, 2), + // both down, no change + nil, + // route changes from 2 -> 1 + su(1, 2), + }, + }, + { + name: "n1-recon-n2-down-n1-recon-n2-up", + nodes: types.Nodes{ + np(1), + np(2), + np(1), + np(2), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 up recon = noop + { + 1: true, + 2: true, + }, + // n2 goes down + { + 1: true, + 2: false, + }, + // n1 up recon = noop + { + 1: true, + 2: false, + }, + // n2 comes back up + { + 1: true, + 2: false, + }, + }, + want: []*types.StateUpdate{ + nil, + nil, + nil, + nil, + }, + }, + { + name: "n1-recon-n2-down-n1-recon-n2-up", + nodes: types.Nodes{ + np(1), + np(1), + np(3), + np(3), + np(2), + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: false, + 3: true, + }, + // n1 comes up + { + 1: true, + 2: false, + 3: true, + }, + // n3 goes down + { + 1: true, + 2: false, + 3: false, + }, + // n3 comes up + { + 1: true, + 2: false, + 3: true, + }, + // n2 comes up + { + 1: true, + 2: true, + 3: true, + }, + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 3), // n1 -> n3 + nil, + su(1, 3), // n3 -> n1 + nil, + nil, + su(1, 2), // n1 -> n2 + }, + }, + { + name: "n1-recon-n2-dis-n3-take", + nodes: types.Nodes{ + np(1), + np(3), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), false, false), + r(3, 3, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + // n3 goes down + { + 1: false, + 2: true, + 3: false, + }, + }, + want: []*types.StateUpdate{ + su(1, 3), // n1 -> n3 + nil, + }, + }, + { + name: "multi-n1-oneforeach-n2-n3", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.1.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 2, 3), // n1 -> n2,n3 + }, + }, + { + name: "multi-n1-onefor-n2-disabled-n3", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.1.0.0/24"), false, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 2), // n1 -> n2, n3 is not enabled + }, + }, + { + name: "multi-n1-onefor-n2-offline-n3", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.1.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: false, + }, + }, + want: []*types.StateUpdate{ + su(1, 2), // n1 -> n2, n3 is offline + }, + }, + { + name: "multi-n2-back-to-multi-n1", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, false), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, true), + r(3, 3, ipp("10.1.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: true, + 2: false, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 2), // n2 -> n1 + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if (len(tt.isConnected) != len(tt.want)) && len(tt.want) != len(tt.nodes) { + t.Fatalf("nodes (%d), isConnected updates (%d), wants (%d) must be equal", len(tt.nodes), len(tt.isConnected), len(tt.want)) + } + + db := dbForTest(t, tt.name) + + user := types.User{Name: tt.name} + if err := db.DB.Save(&user).Error; err != nil { + t.Fatalf("failed to create user: %s", err) + } + + for _, route := range tt.routes { + route.Node.User = user + if err := db.DB.Save(&route.Node).Error; err != nil { + t.Fatalf("failed to create node: %s", err) + } + if err := db.DB.Save(&route).Error; err != nil { + t.Fatalf("failed to create route: %s", err) + } + } + + for step := range len(tt.isConnected) { + node := tt.nodes[step] + isConnected := tt.isConnected[step] + want := tt.want[step] + + got, err := Write(db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { + return FailoverNodeRoutesIfNeccessary(tx, smap(isConnected), node) + }) + + if (err != nil) != tt.wantErr { + t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(types.StateUpdate{}, "Type", "Message")); diff != "" { + t.Errorf("failoverRoute() unexpected result (-want +got):\n%s", diff) + } + } + }) + } +} + +func TestFailoverRouteTx(t *testing.T) { tests := []struct { name string failingRoute types.Route routes types.Routes - isConnected map[key.MachinePublic]bool - want []key.MachinePublic + isConnected map[types.NodeID]bool + want []types.NodeID wantErr bool }{ { @@ -301,10 +687,8 @@ func TestFailoverRoute(t *testing.T) { Model: gorm.Model{ ID: 1, }, - Prefix: ipp("10.0.0.0/24"), - Node: types.Node{ - MachineKey: machineKeys[0], - }, + Prefix: ipp("10.0.0.0/24"), + Node: types.Node{}, IsPrimary: false, }, routes: types.Routes{}, @@ -317,10 +701,8 @@ func TestFailoverRoute(t *testing.T) { Model: gorm.Model{ ID: 1, }, - Prefix: ipp("0.0.0.0/0"), - Node: types.Node{ - MachineKey: machineKeys[0], - }, + Prefix: ipp("0.0.0.0/0"), + Node: types.Node{}, IsPrimary: true, }, routes: types.Routes{}, @@ -335,7 +717,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, }, @@ -346,7 +728,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, }, @@ -362,7 +744,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -374,7 +756,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -385,19 +767,19 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: false, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: false, - machineKeys[1]: true, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, }, - want: []key.MachinePublic{ - machineKeys[0], - machineKeys[1], + want: []types.NodeID{ + 1, + 2, }, wantErr: false, }, @@ -409,7 +791,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: false, Enabled: true, @@ -421,7 +803,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -432,7 +814,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: false, Enabled: true, @@ -449,7 +831,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: true, Enabled: true, @@ -461,7 +843,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: false, Enabled: true, @@ -472,7 +854,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: true, Enabled: true, @@ -483,20 +865,19 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[2], + ID: 3, }, IsPrimary: false, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: true, - machineKeys[1]: true, - machineKeys[2]: true, + isConnected: map[types.NodeID]bool{ + 1: true, + 2: true, + 3: true, }, - want: []key.MachinePublic{ - machineKeys[1], - machineKeys[0], + want: []types.NodeID{ + 2, 1, }, wantErr: false, }, @@ -508,7 +889,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -520,7 +901,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -532,15 +913,15 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[3], + ID: 4, }, IsPrimary: false, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: true, - machineKeys[3]: false, + isConnected: map[types.NodeID]bool{ + 1: true, + 4: false, }, want: nil, wantErr: false, @@ -553,7 +934,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -565,7 +946,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -577,7 +958,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[3], + ID: 4, }, IsPrimary: false, Enabled: true, @@ -588,20 +969,20 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: true, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: false, - machineKeys[1]: true, - machineKeys[3]: false, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, + 4: false, }, - want: []key.MachinePublic{ - machineKeys[0], - machineKeys[1], + want: []types.NodeID{ + 1, + 2, }, wantErr: false, }, @@ -613,7 +994,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -625,7 +1006,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -637,7 +1018,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: false, Enabled: false, @@ -650,28 +1031,24 @@ func TestFailoverRoute(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "failover-db-test") - assert.NoError(t, err) - - db, err = NewHeadscaleDatabase( - types.DatabaseConfig{ - Type: "sqlite3", - Sqlite: types.SqliteConfig{ - Path: tmpDir + "/headscale_test.db", - }, - }, - "", - ) - assert.NoError(t, err) + db := dbForTest(t, tt.name) + user := types.User{Name: "test"} + if err := db.DB.Save(&user).Error; err != nil { + t.Fatalf("failed to create user: %s", err) + } for _, route := range tt.routes { + route.Node.User = user + if err := db.DB.Save(&route.Node).Error; err != nil { + t.Fatalf("failed to create node: %s", err) + } if err := db.DB.Save(&route).Error; err != nil { t.Fatalf("failed to create route: %s", err) } } - got, err := Write(db.DB, func(tx *gorm.DB) ([]key.MachinePublic, error) { - return failoverRoute(tx, tt.isConnected, &tt.failingRoute) + got, err := Write(db.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return failoverRouteTx(tx, smap(tt.isConnected), &tt.failingRoute) }) if (err != nil) != tt.wantErr { @@ -687,230 +1064,177 @@ func TestFailoverRoute(t *testing.T) { } } -// func TestDisableRouteFailover(t *testing.T) { -// machineKeys := []key.MachinePublic{ -// key.NewMachine().Public(), -// key.NewMachine().Public(), -// key.NewMachine().Public(), -// key.NewMachine().Public(), -// } +func TestFailoverRoute(t *testing.T) { + r := func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) types.Route { + return types.Route{ + Model: gorm.Model{ + ID: id, + }, + Node: types.Node{ + ID: nid, + }, + Prefix: prefix, + Enabled: enabled, + IsPrimary: primary, + } + } + rp := func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) *types.Route { + ro := r(id, nid, prefix, enabled, primary) + return &ro + } + tests := []struct { + name string + failingRoute types.Route + routes types.Routes + isConnected map[types.NodeID]bool + want *failover + }{ + { + name: "no-route", + failingRoute: types.Route{}, + routes: types.Routes{}, + want: nil, + }, + { + name: "no-prime", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), false, false), -// tests := []struct { -// name string -// nodes types.Nodes + routes: types.Routes{}, + want: nil, + }, + { + name: "exit-node", + failingRoute: r(1, 1, ipp("0.0.0.0/0"), false, true), + routes: types.Routes{}, + want: nil, + }, + { + name: "no-failover-single-route", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), false, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), false, true), + }, + want: nil, + }, + { + name: "failover-primary", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, + }, + want: &failover{ + old: rp(1, 1, ipp("10.0.0.0/24"), true, false), + new: rp(2, 2, ipp("10.0.0.0/24"), true, true), + }, + }, + { + name: "failover-none-primary", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, false), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + want: nil, + }, + { + name: "failover-primary-multi-route", + failingRoute: r(2, 2, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, false), + r(2, 2, ipp("10.0.0.0/24"), true, true), + r(3, 3, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: true, + 2: true, + 3: true, + }, + want: &failover{ + old: rp(2, 2, ipp("10.0.0.0/24"), true, false), + new: rp(1, 1, ipp("10.0.0.0/24"), true, true), + }, + }, + { + name: "failover-primary-no-online", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 4, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: true, + 4: false, + }, + want: nil, + }, + { + name: "failover-primary-one-not-online", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 4, ipp("10.0.0.0/24"), true, false), + r(3, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, + 4: false, + }, + want: &failover{ + old: rp(1, 1, ipp("10.0.0.0/24"), true, false), + new: rp(3, 2, ipp("10.0.0.0/24"), true, true), + }, + }, + { + name: "failover-primary-none-enabled", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, false), + r(2, 2, ipp("10.0.0.0/24"), false, true), + }, + want: nil, + }, + } -// routeID uint64 -// isConnected map[key.MachinePublic]bool + cmps := append( + util.Comparers, + cmp.Comparer(func(x, y types.IPPrefix) bool { + return netip.Prefix(x) == netip.Prefix(y) + }), + ) -// wantMachineKey key.MachinePublic -// wantErr string -// }{ -// { -// name: "single-route", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// Node: types.Node{ -// MachineKey: machineKeys[0], -// }, -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[0], -// }, -// { -// name: "failover-simple", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// &types.Node{ -// ID: 1, -// MachineKey: machineKeys[1], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 2, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: false, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[1], -// }, -// { -// name: "no-failover-offline", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// &types.Node{ -// ID: 1, -// MachineKey: machineKeys[1], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 2, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: false, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// isConnected: map[key.MachinePublic]bool{ -// machineKeys[0]: true, -// machineKeys[1]: false, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[1], -// }, -// { -// name: "failover-to-online", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// &types.Node{ -// ID: 1, -// MachineKey: machineKeys[1], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 2, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: false, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// isConnected: map[key.MachinePublic]bool{ -// machineKeys[0]: true, -// machineKeys[1]: true, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[1], -// }, -// } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotf := failoverRoute(smap(tt.isConnected), &tt.failingRoute, tt.routes) -// for _, tt := range tests { -// t.Run(tt.name, func(t *testing.T) { -// datab, err := NewHeadscaleDatabase("sqlite3", ":memory:", false, []netip.Prefix{}, "") -// assert.NoError(t, err) + if tt.want == nil && gotf != nil { + t.Fatalf("expected nil, got %+v", gotf) + } -// // bootstrap db -// datab.DB.Transaction(func(tx *gorm.DB) error { -// for _, node := range tt.nodes { -// err := tx.Save(node).Error -// if err != nil { -// return err -// } + if gotf == nil && tt.want != nil { + t.Fatalf("expected %+v, got nil", tt.want) + } -// _, err = SaveNodeRoutes(tx, node) -// if err != nil { -// return err -// } -// } + if tt.want != nil && gotf != nil { + want := map[string]*types.Route{ + "new": tt.want.new, + "old": tt.want.old, + } -// return nil -// }) + got := map[string]*types.Route{ + "new": gotf.new, + "old": gotf.old, + } -// got, err := Write(datab.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { -// return DisableRoute(tx, tt.routeID, tt.isConnected) -// }) - -// // if (err.Error() != "") != tt.wantErr { -// // t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr) - -// // return -// // } - -// if len(got.ChangeNodes) != 1 { -// t.Errorf("expected update with one machine, got %d", len(got.ChangeNodes)) -// } - -// if diff := cmp.Diff(tt.wantMachineKey, got.ChangeNodes[0].MachineKey, util.Comparers...); diff != "" { -// t.Errorf("DisableRoute() unexpected result (-want +got):\n%s", diff) -// } -// }) -// } -// } + if diff := cmp.Diff(want, got, cmps...); diff != "" { + t.Fatalf("failoverRoute unexpected result (-want +got):\n%s", diff) + } + } + }) + } +} diff --git a/hscontrol/db/suite_test.go b/hscontrol/db/suite_test.go index 1b97ce06..d546b33d 100644 --- a/hscontrol/db/suite_test.go +++ b/hscontrol/db/suite_test.go @@ -36,10 +36,18 @@ func (s *Suite) ResetDB(c *check.C) { // } var err error - tmpDir, err = os.MkdirTemp("", "headscale-db-test-*") + db, err = newTestDB() if err != nil { c.Fatal(err) } +} + +func newTestDB() (*HSDatabase, error) { + var err error + tmpDir, err = os.MkdirTemp("", "headscale-db-test-*") + if err != nil { + return nil, err + } log.Printf("database path: %s", tmpDir+"/headscale_test.db") @@ -53,6 +61,8 @@ func (s *Suite) ResetDB(c *check.C) { "", ) if err != nil { - c.Fatal(err) + return nil, err } + + return db, nil } diff --git a/hscontrol/db/testdata/0-22-3-to-0-23-0-routes-are-dropped-2063.sqlite b/hscontrol/db/testdata/0-22-3-to-0-23-0-routes-are-dropped-2063.sqlite new file mode 100644 index 00000000..10e1aaec Binary files /dev/null and b/hscontrol/db/testdata/0-22-3-to-0-23-0-routes-are-dropped-2063.sqlite differ diff --git a/hscontrol/db/testdata/0-22-3-to-0-23-0-routes-fail-foreign-key-2076.sqlite b/hscontrol/db/testdata/0-22-3-to-0-23-0-routes-fail-foreign-key-2076.sqlite new file mode 100644 index 00000000..dbe96962 Binary files /dev/null and b/hscontrol/db/testdata/0-22-3-to-0-23-0-routes-fail-foreign-key-2076.sqlite differ diff --git a/hscontrol/db/users.go b/hscontrol/db/users.go index 99e93393..1cf8e92f 100644 --- a/hscontrol/db/users.go +++ b/hscontrol/db/users.go @@ -2,10 +2,10 @@ package db import ( "errors" + "fmt" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" - "github.com/rs/zerolog/log" "gorm.io/gorm" ) @@ -34,12 +34,7 @@ func CreateUser(tx *gorm.DB, name string) (*types.User, error) { } user.Name = name if err := tx.Create(&user).Error; err != nil { - log.Error(). - Str("func", "CreateUser"). - Err(err). - Msg("Could not create row") - - return nil, err + return nil, fmt.Errorf("creating user: %w", err) } return &user, nil diff --git a/hscontrol/db/users_test.go b/hscontrol/db/users_test.go index b36e8613..0629480c 100644 --- a/hscontrol/db/users_test.go +++ b/hscontrol/db/users_test.go @@ -5,6 +5,7 @@ import ( "github.com/juanfont/headscale/hscontrol/util" "gopkg.in/check.v1" "gorm.io/gorm" + "tailscale.com/types/ptr" ) func (s *Suite) TestCreateAndDestroyUser(c *check.C) { @@ -51,9 +52,10 @@ func (s *Suite) TestDestroyUserErrors(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) err = db.DestroyUser("test") c.Assert(err, check.Equals, ErrUserStillHasNodes) @@ -103,9 +105,10 @@ func (s *Suite) TestSetMachineUser(c *check.C) { Hostname: "testnode", UserID: oldUser.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: ptr.To(pak.ID), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) c.Assert(node.UserID, check.Equals, oldUser.ID) err = db.AssignNodeToUser(&node, newUser.Name) diff --git a/hscontrol/derp/derp.go b/hscontrol/derp/derp.go index 83c200a2..5d4b24f2 100644 --- a/hscontrol/derp/derp.go +++ b/hscontrol/derp/derp.go @@ -31,7 +31,7 @@ func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) { } func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) { - ctx, cancel := context.WithTimeout(context.Background(), types.HTTPReadTimeout) + ctx, cancel := context.WithTimeout(context.Background(), types.HTTPTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil) @@ -40,7 +40,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) { } client := http.Client{ - Timeout: types.HTTPReadTimeout, + Timeout: types.HTTPTimeout, } resp, err := client.Do(req) @@ -81,7 +81,7 @@ func mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap { } func GetDERPMap(cfg types.DERPConfig) *tailcfg.DERPMap { - derpMaps := make([]*tailcfg.DERPMap, 0) + var derpMaps []*tailcfg.DERPMap for _, path := range cfg.Paths { log.Debug(). @@ -125,10 +125,5 @@ func GetDERPMap(cfg types.DERPConfig) *tailcfg.DERPMap { log.Trace().Interface("derpMap", derpMap).Msg("DERPMap loaded") - if len(derpMap.Regions) == 0 { - log.Warn(). - Msg("DERP map is empty, not a single DERP map datasource was loaded correctly or contained a region") - } - return derpMap } diff --git a/hscontrol/derp/server/derp_server.go b/hscontrol/derp/server/derp_server.go index 52a63e9f..0b0c9b16 100644 --- a/hscontrol/derp/server/derp_server.go +++ b/hscontrol/derp/server/derp_server.go @@ -204,7 +204,7 @@ func DERPProbeHandler( } } -// DERPBootstrapDNSHandler implements the /bootsrap-dns endpoint +// DERPBootstrapDNSHandler implements the /bootstrap-dns endpoint // Described in https://github.com/tailscale/tailscale/issues/1405, // this endpoint provides a way to help a client when it fails to start up // because its DNS are broken. diff --git a/hscontrol/grpcv1.go b/hscontrol/grpcv1.go index 379502c7..596748f2 100644 --- a/hscontrol/grpcv1.go +++ b/hscontrol/grpcv1.go @@ -3,7 +3,10 @@ package hscontrol import ( "context" + "errors" "fmt" + "io" + "os" "sort" "strings" "time" @@ -11,12 +14,14 @@ import ( "github.com/rs/zerolog/log" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" "gorm.io/gorm" "tailscale.com/tailcfg" "tailscale.com/types/key" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/db" + "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" ) @@ -144,7 +149,7 @@ func (api headscaleV1APIServer) ExpirePreAuthKey( ctx context.Context, request *v1.ExpirePreAuthKeyRequest, ) (*v1.ExpirePreAuthKeyResponse, error) { - err := api.h.db.DB.Transaction(func(tx *gorm.DB) error { + err := api.h.db.Write(func(tx *gorm.DB) error { preAuthKey, err := db.GetPreAuthKey(tx, request.GetUser(), request.Key) if err != nil { return err @@ -195,7 +200,7 @@ func (api headscaleV1APIServer) RegisterNode( return nil, err } - addrs, err := api.h.ipAlloc.Next() + ipv4, ipv6, err := api.h.ipAlloc.Next() if err != nil { return nil, err } @@ -208,7 +213,7 @@ func (api headscaleV1APIServer) RegisterNode( request.GetUser(), nil, util.RegisterMethodCLI, - addrs, + ipv4, ipv6, ) }) if err != nil { @@ -222,7 +227,7 @@ func (api headscaleV1APIServer) GetNode( ctx context.Context, request *v1.GetNodeRequest, ) (*v1.GetNodeResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } @@ -231,7 +236,7 @@ func (api headscaleV1APIServer) GetNode( // Populate the online field based on // currently connected nodes. - resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey) + resp.Online = api.h.nodeNotifier.IsConnected(node.ID) return &v1.GetNodeResponse{Node: resp}, nil } @@ -248,12 +253,12 @@ func (api headscaleV1APIServer) SetTags( } node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) { - err := db.SetTags(tx, request.GetNodeId(), request.GetTags()) + err := db.SetTags(tx, types.NodeID(request.GetNodeId()), request.GetTags()) if err != nil { return nil, err } - return db.GetNodeByID(tx, request.GetNodeId()) + return db.GetNodeByID(tx, types.NodeID(request.GetNodeId())) }) if err != nil { return &v1.SetTagsResponse{ @@ -261,15 +266,12 @@ func (api headscaleV1APIServer) SetTags( }, status.Error(codes.InvalidArgument, err.Error()) } - stateUpdate := types.StateUpdate{ + ctx = types.NotifyCtx(ctx, "cli-settags", node.Hostname) + api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, + ChangeNodes: []types.NodeID{node.ID}, Message: "called from api.SetTags", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-settags", node.Hostname) - api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + }, node.ID) log.Trace(). Str("node", node.Hostname). @@ -281,13 +283,13 @@ func (api headscaleV1APIServer) SetTags( func validateTag(tag string) error { if strings.Index(tag, "tag:") != 0 { - return fmt.Errorf("tag must start with the string 'tag:'") + return errors.New("tag must start with the string 'tag:'") } if strings.ToLower(tag) != tag { - return fmt.Errorf("tag should be lowercase") + return errors.New("tag should be lowercase") } if len(strings.Fields(tag)) > 1 { - return fmt.Errorf("tag should not contains space") + return errors.New("tag should not contains space") } return nil } @@ -296,26 +298,30 @@ func (api headscaleV1APIServer) DeleteNode( ctx context.Context, request *v1.DeleteNodeRequest, ) (*v1.DeleteNodeResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } - err = api.h.db.DeleteNode( + changedNodes, err := api.h.db.DeleteNode( node, - api.h.nodeNotifier.ConnectedMap(), + api.h.nodeNotifier.LikelyConnectedMap(), ) if err != nil { return nil, err } - stateUpdate := types.StateUpdate{ + ctx = types.NotifyCtx(ctx, "cli-deletenode", node.Hostname) + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ Type: types.StatePeerRemoved, - Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)}, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-deletenode", node.Hostname) - api.h.nodeNotifier.NotifyAll(ctx, stateUpdate) + Removed: []types.NodeID{node.ID}, + }) + + if changedNodes != nil { + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changedNodes, + }) } return &v1.DeleteNodeResponse{}, nil @@ -330,33 +336,27 @@ func (api headscaleV1APIServer) ExpireNode( node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) { db.NodeSetExpiry( tx, - request.GetNodeId(), + types.NodeID(request.GetNodeId()), now, ) - return db.GetNodeByID(tx, request.GetNodeId()) + return db.GetNodeByID(tx, types.NodeID(request.GetNodeId())) }) if err != nil { return nil, err } - selfUpdate := types.StateUpdate{ - Type: types.StateSelfUpdate, - ChangeNodes: types.Nodes{node}, - } - if selfUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname) - api.h.nodeNotifier.NotifyByMachineKey( - ctx, - selfUpdate, - node.MachineKey) - } + ctx = types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname) + api.h.nodeNotifier.NotifyByNodeID( + ctx, + types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: []types.NodeID{node.ID}, + }, + node.ID) - stateUpdate := types.StateUpdateExpire(node.ID, now) - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname) - api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + ctx = types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname) + api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID) log.Trace(). Str("node", node.Hostname). @@ -373,28 +373,25 @@ func (api headscaleV1APIServer) RenameNode( node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) { err := db.RenameNode( tx, - request.GetNodeId(), + types.NodeID(request.GetNodeId()), request.GetNewName(), ) if err != nil { return nil, err } - return db.GetNodeByID(tx, request.GetNodeId()) + return db.GetNodeByID(tx, types.NodeID(request.GetNodeId())) }) if err != nil { return nil, err } - stateUpdate := types.StateUpdate{ + ctx = types.NotifyCtx(ctx, "cli-renamenode", node.Hostname) + api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, + ChangeNodes: []types.NodeID{node.ID}, Message: "called from api.RenameNode", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-renamenode", node.Hostname) - api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + }, node.ID) log.Trace(). Str("node", node.Hostname). @@ -408,7 +405,7 @@ func (api headscaleV1APIServer) ListNodes( ctx context.Context, request *v1.ListNodesRequest, ) (*v1.ListNodesResponse, error) { - isConnected := api.h.nodeNotifier.ConnectedMap() + isLikelyConnected := api.h.nodeNotifier.LikelyConnectedMap() if request.GetUser() != "" { nodes, err := db.Read(api.h.db.DB, func(rx *gorm.DB) (types.Nodes, error) { return db.ListNodesByUser(rx, request.GetUser()) @@ -423,7 +420,9 @@ func (api headscaleV1APIServer) ListNodes( // Populate the online field based on // currently connected nodes. - resp.Online = isConnected[node.MachineKey] + if val, ok := isLikelyConnected.Load(node.ID); ok && val { + resp.Online = true + } response[index] = resp } @@ -446,7 +445,9 @@ func (api headscaleV1APIServer) ListNodes( // Populate the online field based on // currently connected nodes. - resp.Online = isConnected[node.MachineKey] + if val, ok := isLikelyConnected.Load(node.ID); ok && val { + resp.Online = true + } validTags, invalidTags := api.h.ACLPolicy.TagsOfNode( node, @@ -463,7 +464,7 @@ func (api headscaleV1APIServer) MoveNode( ctx context.Context, request *v1.MoveNodeRequest, ) (*v1.MoveNodeResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } @@ -476,6 +477,24 @@ func (api headscaleV1APIServer) MoveNode( return &v1.MoveNodeResponse{Node: node.Proto()}, nil } +func (api headscaleV1APIServer) BackfillNodeIPs( + ctx context.Context, + request *v1.BackfillNodeIPsRequest, +) (*v1.BackfillNodeIPsResponse, error) { + log.Trace().Msg("Backfill called") + + if !request.Confirmed { + return nil, errors.New("not confirmed, aborting") + } + + changes, err := api.h.db.BackfillNodeIPs(api.h.ipAlloc) + if err != nil { + return nil, err + } + + return &v1.BackfillNodeIPsResponse{Changes: changes}, nil +} + func (api headscaleV1APIServer) GetRoutes( ctx context.Context, request *v1.GetRoutesRequest, @@ -503,7 +522,7 @@ func (api headscaleV1APIServer) EnableRoute( return nil, err } - if update != nil && update.Valid() { + if update != nil { ctx := types.NotifyCtx(ctx, "cli-enableroute", "unknown") api.h.nodeNotifier.NotifyAll( ctx, *update) @@ -516,17 +535,19 @@ func (api headscaleV1APIServer) DisableRoute( ctx context.Context, request *v1.DisableRouteRequest, ) (*v1.DisableRouteResponse, error) { - isConnected := api.h.nodeNotifier.ConnectedMap() - update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { - return db.DisableRoute(tx, request.GetRouteId(), isConnected) + update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return db.DisableRoute(tx, request.GetRouteId(), api.h.nodeNotifier.LikelyConnectedMap()) }) if err != nil { return nil, err } - if update != nil && update.Valid() { + if update != nil { ctx := types.NotifyCtx(ctx, "cli-disableroute", "unknown") - api.h.nodeNotifier.NotifyAll(ctx, *update) + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: update, + }) } return &v1.DisableRouteResponse{}, nil @@ -536,7 +557,7 @@ func (api headscaleV1APIServer) GetNodeRoutes( ctx context.Context, request *v1.GetNodeRoutesRequest, ) (*v1.GetNodeRoutesResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } @@ -555,17 +576,20 @@ func (api headscaleV1APIServer) DeleteRoute( ctx context.Context, request *v1.DeleteRouteRequest, ) (*v1.DeleteRouteResponse, error) { - isConnected := api.h.nodeNotifier.ConnectedMap() - update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { + isConnected := api.h.nodeNotifier.LikelyConnectedMap() + update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) { return db.DeleteRoute(tx, request.GetRouteId(), isConnected) }) if err != nil { return nil, err } - if update != nil && update.Valid() { + if update != nil { ctx := types.NotifyCtx(ctx, "cli-deleteroute", "unknown") - api.h.nodeNotifier.NotifyWithIgnore(ctx, *update) + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: update, + }) } return &v1.DeleteRouteResponse{}, nil @@ -652,6 +676,99 @@ func (api headscaleV1APIServer) DeleteApiKey( return &v1.DeleteApiKeyResponse{}, nil } +func (api headscaleV1APIServer) GetPolicy( + _ context.Context, + _ *v1.GetPolicyRequest, +) (*v1.GetPolicyResponse, error) { + switch api.h.cfg.Policy.Mode { + case types.PolicyModeDB: + p, err := api.h.db.GetPolicy() + if err != nil { + return nil, fmt.Errorf("loading ACL from database: %w", err) + } + + return &v1.GetPolicyResponse{ + Policy: p.Data, + UpdatedAt: timestamppb.New(p.UpdatedAt), + }, nil + case types.PolicyModeFile: + // Read the file and return the contents as-is. + absPath := util.AbsolutePathFromConfigPath(api.h.cfg.Policy.Path) + f, err := os.Open(absPath) + if err != nil { + return nil, fmt.Errorf("reading policy from path %q: %w", absPath, err) + } + + defer f.Close() + + b, err := io.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("reading policy from file: %w", err) + } + + return &v1.GetPolicyResponse{Policy: string(b)}, nil + } + + return nil, fmt.Errorf("no supported policy mode found in configuration, policy.mode: %q", api.h.cfg.Policy.Mode) +} + +func (api headscaleV1APIServer) SetPolicy( + _ context.Context, + request *v1.SetPolicyRequest, +) (*v1.SetPolicyResponse, error) { + if api.h.cfg.Policy.Mode != types.PolicyModeDB { + return nil, types.ErrPolicyUpdateIsDisabled + } + + p := request.GetPolicy() + + pol, err := policy.LoadACLPolicyFromBytes([]byte(p)) + if err != nil { + return nil, fmt.Errorf("loading ACL policy file: %w", err) + } + + // Validate and reject configuration that would error when applied + // when creating a map response. This requires nodes, so there is still + // a scenario where they might be allowed if the server has no nodes + // yet, but it should help for the general case and for hot reloading + // configurations. + nodes, err := api.h.db.ListNodes() + if err != nil { + return nil, fmt.Errorf("loading nodes from database to validate policy: %w", err) + } + + _, err = pol.CompileFilterRules(nodes) + if err != nil { + return nil, fmt.Errorf("verifying policy rules: %w", err) + } + + if len(nodes) > 0 { + _, err = pol.CompileSSHPolicy(nodes[0], nodes) + if err != nil { + return nil, fmt.Errorf("verifying SSH rules: %w", err) + } + } + + updated, err := api.h.db.SetPolicy(p) + if err != nil { + return nil, err + } + + api.h.ACLPolicy = pol + + ctx := types.NotifyCtx(context.Background(), "acl-update", "na") + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StateFullUpdate, + }) + + response := &v1.SetPolicyResponse{ + Policy: updated.Data, + UpdatedAt: timestamppb.New(updated.UpdatedAt), + } + + return response, nil +} + // The following service calls are for testing and debugging func (api headscaleV1APIServer) DebugCreateNode( ctx context.Context, @@ -685,18 +802,12 @@ func (api headscaleV1APIServer) DebugCreateNode( return nil, err } - givenName, err := api.h.db.GenerateGivenName(mkey, request.GetName()) - if err != nil { - return nil, err - } - nodeKey := key.NewNode() newNode := types.Node{ MachineKey: mkey, NodeKey: nodeKey.Public(), Hostname: request.GetName(), - GivenName: givenName, User: *user, Expiry: &time.Time{}, diff --git a/hscontrol/handlers.go b/hscontrol/handlers.go index ee670733..6efe1984 100644 --- a/hscontrol/handlers.go +++ b/hscontrol/handlers.go @@ -68,12 +68,6 @@ func (h *Headscale) KeyHandler( Msg("could not get capability version") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) - if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Failed to write response") - } return } @@ -82,19 +76,6 @@ func (h *Headscale) KeyHandler( Str("handler", "/key"). Int("cap_ver", int(capVer)). Msg("New noise client") - if err != nil { - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") - writer.WriteHeader(http.StatusBadRequest) - _, err := writer.Write([]byte("Wrong params")) - if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Failed to write response") - } - - return - } // TS2021 (Tailscale v2 protocol) requires to have a different key if capVer >= NoiseCapabilityVersion { @@ -162,6 +143,18 @@ var registerWebAPITemplate = template.Must( <html> <head> <title>Registration - Headscale + +

headscale

@@ -169,7 +162,7 @@ var registerWebAPITemplate = template.Must(

Run the command below in the headscale server to add this machine to your network:

-
headscale nodes register --user USERNAME --key {{.Key}}
+ headscale nodes register --user USERNAME --key {{.Key}} `)) diff --git a/hscontrol/mapper/mapper.go b/hscontrol/mapper/mapper.go index df0f4d9c..8593e167 100644 --- a/hscontrol/mapper/mapper.go +++ b/hscontrol/mapper/mapper.go @@ -16,12 +16,13 @@ import ( "time" mapset "github.com/deckarep/golang-set/v2" + "github.com/juanfont/headscale/hscontrol/db" + "github.com/juanfont/headscale/hscontrol/notifier" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/klauspost/compress/zstd" "github.com/rs/zerolog/log" - "golang.org/x/exp/maps" "tailscale.com/envknob" "tailscale.com/smallzstd" "tailscale.com/tailcfg" @@ -51,21 +52,14 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_ type Mapper struct { // Configuration // TODO(kradalby): figure out if this is the format we want this in - derpMap *tailcfg.DERPMap - baseDomain string - dnsCfg *tailcfg.DNSConfig - logtail bool - randomClientPort bool + db *db.HSDatabase + cfg *types.Config + derpMap *tailcfg.DERPMap + notif *notifier.Notifier uid string created time.Time seq uint64 - - // Map isnt concurrency safe, so we need to ensure - // only one func is accessing it over time. - mu sync.Mutex - peers map[uint64]*types.Node - patches map[uint64][]patch } type patch struct { @@ -74,35 +68,22 @@ type patch struct { } func NewMapper( - node *types.Node, - peers types.Nodes, + db *db.HSDatabase, + cfg *types.Config, derpMap *tailcfg.DERPMap, - baseDomain string, - dnsCfg *tailcfg.DNSConfig, - logtail bool, - randomClientPort bool, + notif *notifier.Notifier, ) *Mapper { - log.Debug(). - Caller(). - Str("node", node.Hostname). - Msg("creating new mapper") - uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength) return &Mapper{ - derpMap: derpMap, - baseDomain: baseDomain, - dnsCfg: dnsCfg, - logtail: logtail, - randomClientPort: randomClientPort, + db: db, + cfg: cfg, + derpMap: derpMap, + notif: notif, uid: uid, created: time.Now(), seq: 0, - - // TODO: populate - peers: peers.IDMap(), - patches: make(map[uint64][]patch), } } @@ -113,7 +94,6 @@ func (m *Mapper) String() string { func generateUserProfiles( node *types.Node, peers types.Nodes, - baseDomain string, ) []tailcfg.UserProfile { userMap := make(map[string]types.User) userMap[node.User.Name] = node.User @@ -121,57 +101,51 @@ func generateUserProfiles( userMap[peer.User.Name] = peer.User // not worth checking if already is there } - profiles := []tailcfg.UserProfile{} + var profiles []tailcfg.UserProfile for _, user := range userMap { - displayName := user.Name - - if baseDomain != "" { - displayName = fmt.Sprintf("%s@%s", user.Name, baseDomain) - } - profiles = append(profiles, - tailcfg.UserProfile{ - ID: tailcfg.UserID(user.ID), - LoginName: user.Name, - DisplayName: displayName, - }) + user.TailscaleUserProfile()) } return profiles } func generateDNSConfig( - base *tailcfg.DNSConfig, + cfg *types.Config, baseDomain string, node *types.Node, peers types.Nodes, ) *tailcfg.DNSConfig { - dnsConfig := base.Clone() + if cfg.DNSConfig == nil { + return nil + } + + dnsConfig := cfg.DNSConfig.Clone() // if MagicDNS is enabled - if base != nil && base.Proxied { - // Only inject the Search Domain of the current user - // shared nodes should use their full FQDN - dnsConfig.Domains = append( - dnsConfig.Domains, - fmt.Sprintf( - "%s.%s", - node.User.Name, - baseDomain, - ), - ) + if dnsConfig.Proxied { + if cfg.DNSUserNameInMagicDNS { + // Only inject the Search Domain of the current user + // shared nodes should use their full FQDN + dnsConfig.Domains = append( + dnsConfig.Domains, + fmt.Sprintf( + "%s.%s", + node.User.Name, + baseDomain, + ), + ) - userSet := mapset.NewSet[types.User]() - userSet.Add(node.User) - for _, p := range peers { - userSet.Add(p.User) + userSet := mapset.NewSet[types.User]() + userSet.Add(node.User) + for _, p := range peers { + userSet.Add(p.User) + } + for _, user := range userSet.ToSlice() { + dnsRoute := fmt.Sprintf("%v.%v", user.Name, baseDomain) + dnsConfig.Routes[dnsRoute] = nil + } } - for _, user := range userSet.ToSlice() { - dnsRoute := fmt.Sprintf("%v.%v", user.Name, baseDomain) - dnsConfig.Routes[dnsRoute] = nil - } - } else { - dnsConfig = base } addNextDNSMetadata(dnsConfig.Resolvers, node) @@ -194,8 +168,8 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) { "device_model": []string{node.Hostinfo.OS}, } - if len(node.IPAddresses) > 0 { - attrs.Add("device_ip", node.IPAddresses[0].String()) + if len(node.IPs()) > 0 { + attrs.Add("device_ip", node.IPs()[0].String()) } resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode()) @@ -207,11 +181,10 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) { // It is a separate function to make testing easier. func (m *Mapper) fullMapResponse( node *types.Node, + peers types.Nodes, pol *policy.ACLPolicy, capVer tailcfg.CapabilityVersion, ) (*tailcfg.MapResponse, error) { - peers := nodeMapToList(m.peers) - resp, err := m.baseWithConfigMapResponse(node, pol, capVer) if err != nil { return nil, err @@ -219,14 +192,13 @@ func (m *Mapper) fullMapResponse( err = appendPeerChanges( resp, + true, // full change pol, node, capVer, peers, peers, - m.baseDomain, - m.dnsCfg, - m.randomClientPort, + m.cfg, ) if err != nil { return nil, err @@ -240,35 +212,25 @@ func (m *Mapper) FullMapResponse( mapRequest tailcfg.MapRequest, node *types.Node, pol *policy.ACLPolicy, + messages ...string, ) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - peers := maps.Keys(m.peers) - peersWithPatches := maps.Keys(m.patches) - slices.Sort(peers) - slices.Sort(peersWithPatches) - - if len(peersWithPatches) > 0 { - log.Debug(). - Str("node", node.Hostname). - Uints64("peers", peers). - Uints64("pending_patches", peersWithPatches). - Msgf("node requested full map response, but has pending patches") - } - - resp, err := m.fullMapResponse(node, pol, mapRequest.Version) + peers, err := m.ListPeers(node.ID) if err != nil { return nil, err } - return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress) + resp, err := m.fullMapResponse(node, peers, pol, mapRequest.Version) + if err != nil { + return nil, err + } + + return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...) } -// LiteMapResponse returns a MapResponse for the given node. +// ReadOnlyMapResponse returns a MapResponse for the given node. // Lite means that the peers has been omitted, this is intended // to be used to answer MapRequests with OmitPeers set to true. -func (m *Mapper) LiteMapResponse( +func (m *Mapper) ReadOnlyMapResponse( mapRequest tailcfg.MapRequest, node *types.Node, pol *policy.ACLPolicy, @@ -279,18 +241,6 @@ func (m *Mapper) LiteMapResponse( return nil, err } - rules, sshPolicy, err := policy.GenerateFilterAndSSHRules( - pol, - node, - nodeMapToList(m.peers), - ) - if err != nil { - return nil, err - } - - resp.PacketFilter = policy.ReduceFilterRules(node, rules) - resp.SSHPolicy = sshPolicy - return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...) } @@ -320,50 +270,74 @@ func (m *Mapper) DERPMapResponse( func (m *Mapper) PeerChangedResponse( mapRequest tailcfg.MapRequest, node *types.Node, - changed types.Nodes, + changed map[types.NodeID]bool, + patches []*tailcfg.PeerChange, pol *policy.ACLPolicy, messages ...string, ) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Update our internal map. - for _, node := range changed { - if patches, ok := m.patches[node.ID]; ok { - // preserve online status in case the patch has an outdated one - online := node.IsOnline - - for _, p := range patches { - // TODO(kradalby): Figure if this needs to be sorted by timestamp - node.ApplyPeerChange(p.change) - } - - // Ensure the patches are not applied again later - delete(m.patches, node.ID) - - node.IsOnline = online - } - - m.peers[node.ID] = node - } - resp := m.baseMapResponse() - err := appendPeerChanges( + peers, err := m.ListPeers(node.ID) + if err != nil { + return nil, err + } + + var removedIDs []tailcfg.NodeID + var changedIDs []types.NodeID + for nodeID, nodeChanged := range changed { + if nodeChanged { + changedIDs = append(changedIDs, nodeID) + } else { + removedIDs = append(removedIDs, nodeID.NodeID()) + } + } + + changedNodes := make(types.Nodes, 0, len(changedIDs)) + for _, peer := range peers { + if slices.Contains(changedIDs, peer.ID) { + changedNodes = append(changedNodes, peer) + } + } + + err = appendPeerChanges( &resp, + false, // partial change pol, node, mapRequest.Version, - nodeMapToList(m.peers), - changed, - m.baseDomain, - m.dnsCfg, - m.randomClientPort, + peers, + changedNodes, + m.cfg, ) if err != nil { return nil, err } + resp.PeersRemoved = removedIDs + + // Sending patches as a part of a PeersChanged response + // is technically not suppose to be done, but they are + // applied after the PeersChanged. The patch list + // should _only_ contain Nodes that are not in the + // PeersChanged or PeersRemoved list and the caller + // should filter them out. + // + // From tailcfg docs: + // These are applied after Peers* above, but in practice the + // control server should only send these on their own, without + // the Peers* fields also set. + if patches != nil { + resp.PeersChangedPatch = patches + } + + // Add the node itself, it might have changed, and particularly + // if there are no patches or changes, this is a self update. + tailnode, err := tailNode(node, mapRequest.Version, pol, m.cfg) + if err != nil { + return nil, err + } + resp.Node = tailnode + return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...) } @@ -375,71 +349,12 @@ func (m *Mapper) PeerChangedPatchResponse( changed []*tailcfg.PeerChange, pol *policy.ACLPolicy, ) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - sendUpdate := false - // patch the internal map - for _, change := range changed { - if peer, ok := m.peers[uint64(change.NodeID)]; ok { - peer.ApplyPeerChange(change) - sendUpdate = true - } else { - log.Trace().Str("node", node.Hostname).Msgf("Node with ID %s is missing from mapper for Node %s, saving patch for when node is available", change.NodeID, node.Hostname) - - p := patch{ - timestamp: time.Now(), - change: change, - } - - if patches, ok := m.patches[uint64(change.NodeID)]; ok { - m.patches[uint64(change.NodeID)] = append(patches, p) - } else { - m.patches[uint64(change.NodeID)] = []patch{p} - } - } - } - - if !sendUpdate { - return nil, nil - } - resp := m.baseMapResponse() resp.PeersChangedPatch = changed return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) } -// TODO(kradalby): We need some integration tests for this. -func (m *Mapper) PeerRemovedResponse( - mapRequest tailcfg.MapRequest, - node *types.Node, - removed []tailcfg.NodeID, -) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Some nodes might have been removed already - // so we dont want to ask downstream to remove - // twice, than can cause a panic in tailscaled. - notYetRemoved := []tailcfg.NodeID{} - - // remove from our internal map - for _, id := range removed { - if _, ok := m.peers[uint64(id)]; ok { - notYetRemoved = append(notYetRemoved, id) - } - - delete(m.peers, uint64(id)) - delete(m.patches, uint64(id)) - } - - resp := m.baseMapResponse() - resp.PeersRemoved = notYetRemoved - - return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) -} - func (m *Mapper) marshalMapResponse( mapRequest tailcfg.MapRequest, resp *tailcfg.MapResponse, @@ -451,10 +366,7 @@ func (m *Mapper) marshalMapResponse( jsonBody, err := json.Marshal(resp) if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Cannot marshal map response") + return nil, fmt.Errorf("marshalling map response: %w", err) } if debugDumpMapResponsePath != "" { @@ -469,10 +381,8 @@ func (m *Mapper) marshalMapResponse( switch { case resp.Peers != nil && len(resp.Peers) > 0: responseType = "full" - case isSelfUpdate(messages...): + case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil && resp.DERPMap == nil && !resp.KeepAlive: responseType = "self" - case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil: - responseType = "lite" case resp.PeersChanged != nil && len(resp.PeersChanged) > 0: responseType = "changed" case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0: @@ -483,10 +393,7 @@ func (m *Mapper) marshalMapResponse( body, err := json.MarshalIndent(data, "", " ") if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Cannot marshal map response") + return nil, fmt.Errorf("marshalling map response: %w", err) } perms := fs.FileMode(debugMapResponsePerm) @@ -496,11 +403,11 @@ func (m *Mapper) marshalMapResponse( panic(err) } - now := time.Now().UnixNano() + now := time.Now().Format("2006-01-02T15-04-05.999999999") mapResponsePath := path.Join( mPath, - fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType), + fmt.Sprintf("%s-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType), ) log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath) @@ -574,7 +481,7 @@ func (m *Mapper) baseWithConfigMapResponse( ) (*tailcfg.MapResponse, error) { resp := m.baseMapResponse() - tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort) + tailnode, err := tailNode(node, capVer, pol, m.cfg) if err != nil { return nil, err } @@ -582,7 +489,7 @@ func (m *Mapper) baseWithConfigMapResponse( resp.DERPMap = m.derpMap - resp.Domain = m.baseDomain + resp.Domain = m.cfg.BaseDomain // Do not instruct clients to collect services we do not // support or do anything with them @@ -591,12 +498,26 @@ func (m *Mapper) baseWithConfigMapResponse( resp.KeepAlive = false resp.Debug = &tailcfg.Debug{ - DisableLogTail: !m.logtail, + DisableLogTail: !m.cfg.LogTail.Enabled, } return &resp, nil } +func (m *Mapper) ListPeers(nodeID types.NodeID) (types.Nodes, error) { + peers, err := m.db.ListPeers(nodeID) + if err != nil { + return nil, err + } + + for _, peer := range peers { + online := m.notif.IsLikelyConnected(peer.ID) + peer.IsOnline = &online + } + + return peers, nil +} + func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes { ret := make(types.Nodes, 0) @@ -612,42 +533,40 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes { func appendPeerChanges( resp *tailcfg.MapResponse, + fullChange bool, pol *policy.ACLPolicy, node *types.Node, capVer tailcfg.CapabilityVersion, peers types.Nodes, changed types.Nodes, - baseDomain string, - dnsCfg *tailcfg.DNSConfig, - randomClientPort bool, + cfg *types.Config, ) error { - fullChange := len(peers) == len(changed) + packetFilter, err := pol.CompileFilterRules(append(peers, node)) + if err != nil { + return err + } - rules, sshPolicy, err := policy.GenerateFilterAndSSHRules( - pol, - node, - peers, - ) + sshPolicy, err := pol.CompileSSHPolicy(node, peers) if err != nil { return err } // If there are filter rules present, see if there are any nodes that cannot - // access eachother at all and remove them from the peers. - if len(rules) > 0 { - changed = policy.FilterNodesByACL(node, changed, rules) + // access each-other at all and remove them from the peers. + if len(packetFilter) > 0 { + changed = policy.FilterNodesByACL(node, changed, packetFilter) } - profiles := generateUserProfiles(node, changed, baseDomain) + profiles := generateUserProfiles(node, changed) dnsConfig := generateDNSConfig( - dnsCfg, - baseDomain, + cfg, + cfg.BaseDomain, node, peers, ) - tailPeers, err := tailNodes(changed, capVer, pol, dnsCfg, baseDomain, randomClientPort) + tailPeers, err := tailNodes(changed, capVer, pol, cfg) if err != nil { return err } @@ -663,19 +582,30 @@ func appendPeerChanges( resp.PeersChanged = tailPeers } resp.DNSConfig = dnsConfig - resp.PacketFilter = policy.ReduceFilterRules(node, rules) resp.UserProfiles = profiles resp.SSHPolicy = sshPolicy - return nil -} - -func isSelfUpdate(messages ...string) bool { - for _, message := range messages { - if strings.Contains(message, types.SelfUpdateIdentifier) { - return true + // 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates) + if capVer >= 81 { + // Currently, we do not send incremental package filters, however using the + // new PacketFilters field and "base" allows us to send a full update when we + // have to send an empty list, avoiding the hack in the else block. + resp.PacketFilters = map[string][]tailcfg.FilterRule{ + "base": policy.ReduceFilterRules(node, packetFilter), + } + } else { + // This is a hack to avoid sending an empty list of packet filters. + // Since tailcfg.PacketFilter has omitempty, any empty PacketFilter will + // be omitted, causing the client to consider it unchanged, keeping the + // previous packet filter. Worst case, this can cause a node that previously + // has access to a node to _not_ loose access if an empty (allow none) is sent. + reduced := policy.ReduceFilterRules(node, packetFilter) + if len(reduced) > 0 { + resp.PacketFilter = reduced + } else { + resp.PacketFilter = packetFilter } } - return false + return nil } diff --git a/hscontrol/mapper/mapper_test.go b/hscontrol/mapper/mapper_test.go index bcc17dd4..0484fc02 100644 --- a/hscontrol/mapper/mapper_test.go +++ b/hscontrol/mapper/mapper_test.go @@ -17,6 +17,11 @@ import ( "tailscale.com/types/key" ) +var iap = func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip +} + func (s *Suite) TestGetMapResponseUserProfiles(c *check.C) { mach := func(hostname, username string, userid uint) *types.Node { return &types.Node{ @@ -38,7 +43,6 @@ func (s *Suite) TestGetMapResponseUserProfiles(c *check.C) { types.Nodes{ nodeInShared2, nodeInShared3, node2InShared1, }, - "", ) c.Assert(len(userProfiles), check.Equals, 3) @@ -122,7 +126,10 @@ func TestDNSConfigMapResponse(t *testing.T) { } got := generateDNSConfig( - &dnsConfigOrig, + &types.Config{ + DNSConfig: &dnsConfigOrig, + DNSUserNameInMagicDNS: true, + }, baseDomain, nodeInShared1, peersOfNodeInShared1, @@ -176,17 +183,16 @@ func Test_fullMapResponse(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "mini", - GivenName: "mini", - UserID: 0, - User: types.User{Name: "mini"}, - ForcedTags: []string{}, - AuthKeyID: 0, - AuthKey: &types.PreAuthKey{}, - LastSeen: &lastSeen, - Expiry: &expire, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.1"), + Hostname: "mini", + GivenName: "mini", + UserID: 0, + User: types.User{Name: "mini"}, + ForcedTags: []string{}, + AuthKey: &types.PreAuthKey{}, + LastSeen: &lastSeen, + Expiry: &expire, + Hostinfo: &tailcfg.Hostinfo{}, Routes: []types.Route{ { Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")), @@ -257,17 +263,17 @@ func Test_fullMapResponse(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "peer1", - GivenName: "peer1", - UserID: 0, - User: types.User{Name: "mini"}, - ForcedTags: []string{}, - LastSeen: &lastSeen, - Expiry: &expire, - Hostinfo: &tailcfg.Hostinfo{}, - Routes: []types.Route{}, - CreatedAt: created, + IPv4: iap("100.64.0.2"), + Hostname: "peer1", + GivenName: "peer1", + UserID: 0, + User: types.User{Name: "mini"}, + ForcedTags: []string{}, + LastSeen: &lastSeen, + Expiry: &expire, + Hostinfo: &tailcfg.Hostinfo{}, + Routes: []types.Route{}, + CreatedAt: created, } tailPeer1 := &tailcfg.Node{ @@ -312,17 +318,17 @@ func Test_fullMapResponse(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "peer2", - GivenName: "peer2", - UserID: 1, - User: types.User{Name: "peer2"}, - ForcedTags: []string{}, - LastSeen: &lastSeen, - Expiry: &expire, - Hostinfo: &tailcfg.Hostinfo{}, - Routes: []types.Route{}, - CreatedAt: created, + IPv4: iap("100.64.0.3"), + Hostname: "peer2", + GivenName: "peer2", + UserID: 1, + User: types.User{Name: "peer2"}, + ForcedTags: []string{}, + LastSeen: &lastSeen, + Expiry: &expire, + Hostinfo: &tailcfg.Hostinfo{}, + Routes: []types.Route{}, + CreatedAt: created, } tests := []struct { @@ -331,13 +337,10 @@ func Test_fullMapResponse(t *testing.T) { node *types.Node peers types.Nodes - baseDomain string - dnsConfig *tailcfg.DNSConfig - derpMap *tailcfg.DERPMap - logtail bool - randomClientPort bool - want *tailcfg.MapResponse - wantErr bool + derpMap *tailcfg.DERPMap + cfg *types.Config + want *tailcfg.MapResponse + wantErr bool }{ // { // name: "empty-node", @@ -349,15 +352,17 @@ func Test_fullMapResponse(t *testing.T) { // wantErr: true, // }, { - name: "no-pol-no-peers-map-response", - pol: &policy.ACLPolicy{}, - node: mini, - peers: types.Nodes{}, - baseDomain: "", - dnsConfig: &tailcfg.DNSConfig{}, - derpMap: &tailcfg.DERPMap{}, - logtail: false, - randomClientPort: false, + name: "no-pol-no-peers-map-response", + pol: &policy.ACLPolicy{}, + node: mini, + peers: types.Nodes{}, + derpMap: &tailcfg.DERPMap{}, + cfg: &types.Config{ + BaseDomain: "", + DNSConfig: &tailcfg.DNSConfig{}, + LogTail: types.LogTailConfig{Enabled: false}, + RandomizeClientPort: false, + }, want: &tailcfg.MapResponse{ Node: tailMini, KeepAlive: false, @@ -383,11 +388,13 @@ func Test_fullMapResponse(t *testing.T) { peers: types.Nodes{ peer1, }, - baseDomain: "", - dnsConfig: &tailcfg.DNSConfig{}, - derpMap: &tailcfg.DERPMap{}, - logtail: false, - randomClientPort: false, + derpMap: &tailcfg.DERPMap{}, + cfg: &types.Config{ + BaseDomain: "", + DNSConfig: &tailcfg.DNSConfig{}, + LogTail: types.LogTailConfig{Enabled: false}, + RandomizeClientPort: false, + }, want: &tailcfg.MapResponse{ KeepAlive: false, Node: tailMini, @@ -424,11 +431,13 @@ func Test_fullMapResponse(t *testing.T) { peer1, peer2, }, - baseDomain: "", - dnsConfig: &tailcfg.DNSConfig{}, - derpMap: &tailcfg.DERPMap{}, - logtail: false, - randomClientPort: false, + derpMap: &tailcfg.DERPMap{}, + cfg: &types.Config{ + BaseDomain: "", + DNSConfig: &tailcfg.DNSConfig{}, + LogTail: types.LogTailConfig{Enabled: false}, + RandomizeClientPort: false, + }, want: &tailcfg.MapResponse{ KeepAlive: false, Node: tailMini, @@ -463,17 +472,15 @@ func Test_fullMapResponse(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mappy := NewMapper( - tt.node, - tt.peers, + nil, + tt.cfg, tt.derpMap, - tt.baseDomain, - tt.dnsConfig, - tt.logtail, - tt.randomClientPort, + nil, ) got, err := mappy.fullMapResponse( tt.node, + tt.peers, tt.pol, 0, ) diff --git a/hscontrol/mapper/tail.go b/hscontrol/mapper/tail.go index c10da4de..b0878d1a 100644 --- a/hscontrol/mapper/tail.go +++ b/hscontrol/mapper/tail.go @@ -3,12 +3,10 @@ package mapper import ( "fmt" "net/netip" - "strconv" "time" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" "github.com/samber/lo" "tailscale.com/tailcfg" ) @@ -17,9 +15,7 @@ func tailNodes( nodes types.Nodes, capVer tailcfg.CapabilityVersion, pol *policy.ACLPolicy, - dnsConfig *tailcfg.DNSConfig, - baseDomain string, - randomClientPort bool, + cfg *types.Config, ) ([]*tailcfg.Node, error) { tNodes := make([]*tailcfg.Node, len(nodes)) @@ -28,9 +24,7 @@ func tailNodes( node, capVer, pol, - dnsConfig, - baseDomain, - randomClientPort, + cfg, ) if err != nil { return nil, err @@ -42,17 +36,14 @@ func tailNodes( return tNodes, nil } -// tailNode converts a Node into a Tailscale Node. includeRoutes is false for shared nodes -// as per the expected behaviour in the official SaaS. +// tailNode converts a Node into a Tailscale Node. func tailNode( node *types.Node, capVer tailcfg.CapabilityVersion, pol *policy.ACLPolicy, - dnsConfig *tailcfg.DNSConfig, - baseDomain string, - randomClientPort bool, + cfg *types.Config, ) (*tailcfg.Node, error) { - addrs := node.IPAddresses.Prefixes() + addrs := node.Prefixes() allowedIPs := append( []netip.Prefix{}, @@ -85,7 +76,7 @@ func tailNode( keyExpiry = time.Time{} } - hostname, err := node.GetFQDN(dnsConfig, baseDomain) + hostname, err := node.GetFQDN(cfg, cfg.BaseDomain) if err != nil { return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err) } @@ -94,17 +85,15 @@ func tailNode( tags = lo.Uniq(append(tags, node.ForcedTags...)) tNode := tailcfg.Node{ - ID: tailcfg.NodeID(node.ID), // this is the actual ID - StableID: tailcfg.StableNodeID( - strconv.FormatUint(node.ID, util.Base10), - ), // in headscale, unlike tailcontrol server, IDs are permanent - Name: hostname, - Cap: capVer, + ID: tailcfg.NodeID(node.ID), // this is the actual ID + StableID: node.ID.StableID(), + Name: hostname, + Cap: capVer, User: tailcfg.UserID(node.UserID), Key: node.NodeKey, - KeyExpiry: keyExpiry, + KeyExpiry: keyExpiry.UTC(), Machine: node.MachineKey, DiscoKey: node.DiscoKey, @@ -113,7 +102,7 @@ func tailNode( Endpoints: node.Endpoints, DERP: derp, Hostinfo: node.Hostinfo.View(), - Created: node.CreatedAt, + Created: node.CreatedAt.UTC(), Online: node.IsOnline, @@ -133,7 +122,7 @@ func tailNode( tailcfg.CapabilitySSH: []tailcfg.RawMessage{}, } - if randomClientPort { + if cfg.RandomizeClientPort { tNode.CapMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{} } } else { @@ -143,7 +132,7 @@ func tailNode( tailcfg.CapabilitySSH, } - if randomClientPort { + if cfg.RandomizeClientPort { tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrRandomizeClientPort) } } diff --git a/hscontrol/mapper/tail_test.go b/hscontrol/mapper/tail_test.go index f6e370c4..f744c9c6 100644 --- a/hscontrol/mapper/tail_test.go +++ b/hscontrol/mapper/tail_test.go @@ -1,6 +1,7 @@ package mapper import ( + "encoding/json" "net/netip" "testing" "time" @@ -55,12 +56,14 @@ func TestTailNode(t *testing.T) { { name: "empty-node", node: &types.Node{ - Hostinfo: &tailcfg.Hostinfo{}, + GivenName: "empty", + Hostinfo: &tailcfg.Hostinfo{}, }, pol: &policy.ACLPolicy{}, dnsConfig: &tailcfg.DNSConfig{}, baseDomain: "", want: &tailcfg.Node{ + Name: "empty", StableID: "0", Addresses: []netip.Prefix{}, AllowedIPs: []netip.Prefix{}, @@ -89,9 +92,7 @@ func TestTailNode(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), Hostname: "mini", GivenName: "mini", UserID: 0, @@ -99,7 +100,6 @@ func TestTailNode(t *testing.T) { Name: "mini", }, ForcedTags: []string{}, - AuthKeyID: 0, AuthKey: &types.PreAuthKey{}, LastSeen: &lastSeen, Expiry: &expire, @@ -182,13 +182,16 @@ func TestTailNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + cfg := &types.Config{ + BaseDomain: tt.baseDomain, + DNSConfig: tt.dnsConfig, + RandomizeClientPort: false, + } got, err := tailNode( tt.node, 0, tt.pol, - tt.dnsConfig, - tt.baseDomain, - false, + cfg, ) if (err != nil) != tt.wantErr { @@ -203,3 +206,68 @@ func TestTailNode(t *testing.T) { }) } } + +func TestNodeExpiry(t *testing.T) { + tp := func(t time.Time) *time.Time { + return &t + } + tests := []struct { + name string + exp *time.Time + wantTime time.Time + wantTimeZero bool + }{ + { + name: "no-expiry", + exp: nil, + wantTimeZero: true, + }, + { + name: "zero-expiry", + exp: &time.Time{}, + wantTimeZero: true, + }, + { + name: "localtime", + exp: tp(time.Time{}.Local()), + wantTimeZero: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + node := &types.Node{ + GivenName: "test", + Expiry: tt.exp, + } + tn, err := tailNode( + node, + 0, + &policy.ACLPolicy{}, + &types.Config{}, + ) + if err != nil { + t.Fatalf("nodeExpiry() error = %v", err) + } + + // Round trip the node through JSON to ensure the time is serialized correctly + seri, err := json.Marshal(tn) + if err != nil { + t.Fatalf("nodeExpiry() error = %v", err) + } + var deseri tailcfg.Node + err = json.Unmarshal(seri, &deseri) + if err != nil { + t.Fatalf("nodeExpiry() error = %v", err) + } + + if tt.wantTimeZero { + if !deseri.KeyExpiry.IsZero() { + t.Errorf("nodeExpiry() = %v, want zero", deseri.KeyExpiry) + } + } else if deseri.KeyExpiry != tt.wantTime { + t.Errorf("nodeExpiry() = %v, want %v", deseri.KeyExpiry, tt.wantTime) + } + }) + } +} diff --git a/hscontrol/metrics.go b/hscontrol/metrics.go index fc56f584..4870e74e 100644 --- a/hscontrol/metrics.go +++ b/hscontrol/metrics.go @@ -1,25 +1,120 @@ package hscontrol import ( + "net/http" + "strconv" + + "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "tailscale.com/envknob" ) +var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS") + +var mapResponseLastSentSeconds *prometheus.GaugeVec + +func init() { + if debugHighCardinalityMetrics { + mapResponseLastSentSeconds = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_last_sent_seconds", + Help: "last sent metric to node.id", + }, []string{"type", "id"}) + } +} + const prometheusNamespace = "headscale" var ( - // This is a high cardinality metric (user x node), we might want to make this - // configurable/opt-in in the future. - nodeRegistrations = promauto.NewCounterVec(prometheus.CounterOpts{ + mapResponseSent = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, - Name: "node_registrations_total", - Help: "The total amount of registered node attempts", - }, []string{"action", "auth", "status", "user"}) - - updateRequestsSentToNode = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mapresponse_sent_total", + Help: "total count of mapresponses sent to clients", + }, []string{"status", "type"}) + mapResponseUpdateReceived = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, - Name: "update_request_sent_to_node_total", - Help: "The number of calls/messages issued on a specific nodes update channel", - }, []string{"user", "node", "status"}) - // TODO(kradalby): This is very debugging, we might want to remove it. + Name: "mapresponse_updates_received_total", + Help: "total count of mapresponse updates received on update channel", + }, []string{"type"}) + mapResponseWriteUpdatesInStream = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_write_updates_in_stream_total", + Help: "total count of writes that occurred in a stream session, pre-68 nodes", + }, []string{"status"}) + mapResponseEndpointUpdates = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_endpoint_updates_total", + Help: "total count of endpoint updates received", + }, []string{"status"}) + mapResponseReadOnly = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_readonly_requests_total", + Help: "total count of readonly requests received", + }, []string{"status"}) + mapResponseEnded = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_ended_total", + Help: "total count of new mapsessions ended", + }, []string{"reason"}) + mapResponseClosed = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_closed_total", + Help: "total count of calls to mapresponse close", + }, []string{"return"}) + httpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: prometheusNamespace, + Name: "http_duration_seconds", + Help: "Duration of HTTP requests.", + }, []string{"path"}) + httpCounter = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "http_requests_total", + Help: "Total number of http requests processed", + }, []string{"code", "method", "path"}, + ) ) + +// prometheusMiddleware implements mux.MiddlewareFunc. +func prometheusMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + route := mux.CurrentRoute(r) + path, _ := route.GetPathTemplate() + + // Ignore streaming and noise sessions + // it has its own router further down. + if path == "/ts2021" || path == "/machine/map" || path == "/derp" || path == "/derp/probe" || path == "/bootstrap-dns" { + next.ServeHTTP(w, r) + return + } + + rw := &respWriterProm{ResponseWriter: w} + + timer := prometheus.NewTimer(httpDuration.WithLabelValues(path)) + next.ServeHTTP(rw, r) + timer.ObserveDuration() + httpCounter.WithLabelValues(strconv.Itoa(rw.status), r.Method, path).Inc() + }) +} + +type respWriterProm struct { + http.ResponseWriter + status int + written int64 + wroteHeader bool +} + +func (r *respWriterProm) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *respWriterProm) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} diff --git a/hscontrol/noise.go b/hscontrol/noise.go index 0fa28d19..554be65c 100644 --- a/hscontrol/noise.go +++ b/hscontrol/noise.go @@ -95,18 +95,19 @@ func (h *Headscale) NoiseUpgradeHandler( // The HTTP2 server that exposes this router is created for // a single hijacked connection from /ts2021, using netutil.NewOneConnListener router := mux.NewRouter() + router.Use(prometheusMiddleware) router.HandleFunc("/machine/register", noiseServer.NoiseRegistrationHandler). Methods(http.MethodPost) router.HandleFunc("/machine/map", noiseServer.NoisePollNetMapHandler) server := http.Server{ - ReadTimeout: types.HTTPReadTimeout, + ReadTimeout: types.HTTPTimeout, } noiseServer.httpBaseConfig = &http.Server{ Handler: router, - ReadHeaderTimeout: types.HTTPReadTimeout, + ReadHeaderTimeout: types.HTTPTimeout, } noiseServer.http2Server = &http2.Server{} @@ -163,3 +164,79 @@ func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error { return nil } + +const ( + MinimumCapVersion tailcfg.CapabilityVersion = 61 +) + +// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol +// +// This is the busiest endpoint, as it keeps the HTTP long poll that updates +// the clients when something in the network changes. +// +// The clients POST stuff like HostInfo and their Endpoints here, but +// only after their first request (marked with the ReadOnly field). +// +// At this moment the updates are sent in a quite horrendous way, but they kinda work. +func (ns *noiseServer) NoisePollNetMapHandler( + writer http.ResponseWriter, + req *http.Request, +) { + log.Trace(). + Str("handler", "NoisePollNetMap"). + Msg("PollNetMapHandler called") + + log.Trace(). + Any("headers", req.Header). + Caller(). + Msg("Headers") + + body, _ := io.ReadAll(req.Body) + + mapRequest := tailcfg.MapRequest{} + if err := json.Unmarshal(body, &mapRequest); err != nil { + log.Error(). + Caller(). + Err(err). + Msg("Cannot parse MapRequest") + http.Error(writer, "Internal error", http.StatusInternalServerError) + + return + } + + // Reject unsupported versions + if mapRequest.Version < MinimumCapVersion { + log.Info(). + Caller(). + Int("min_version", int(MinimumCapVersion)). + Int("client_version", int(mapRequest.Version)). + Msg("unsupported client connected") + http.Error(writer, "Internal error", http.StatusBadRequest) + + return + } + + ns.nodeKey = mapRequest.NodeKey + + node, err := ns.headscale.db.GetNodeByAnyKey( + ns.conn.Peer(), + mapRequest.NodeKey, + key.NodePublic{}, + ) + if err != nil { + log.Error(). + Str("handler", "NoisePollNetMap"). + Msgf("Failed to fetch node from the database with node key: %s", mapRequest.NodeKey.String()) + http.Error(writer, "Internal error", http.StatusInternalServerError) + + return + } + + sess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, node) + sess.tracef("a node sending a MapRequest with Noise protocol") + if !sess.isStreaming() { + sess.serve() + } else { + sess.serveLongPoll() + } +} diff --git a/hscontrol/notifier/metrics.go b/hscontrol/notifier/metrics.go new file mode 100644 index 00000000..8a7a8839 --- /dev/null +++ b/hscontrol/notifier/metrics.go @@ -0,0 +1,68 @@ +package notifier + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "tailscale.com/envknob" +) + +const prometheusNamespace = "headscale" + +var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS") + +var notifierUpdateSent *prometheus.CounterVec + +func init() { + if debugHighCardinalityMetrics { + notifierUpdateSent = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "notifier_update_sent_total", + Help: "total count of update sent on nodes channel", + }, []string{"status", "type", "trigger", "id"}) + } else { + notifierUpdateSent = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "notifier_update_sent_total", + Help: "total count of update sent on nodes channel", + }, []string{"status", "type", "trigger"}) + } +} + +var ( + notifierWaitersForLock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_waiters_for_lock", + Help: "gauge of waiters for the notifier lock", + }, []string{"type", "action"}) + notifierWaitForLock = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: prometheusNamespace, + Name: "notifier_wait_for_lock_seconds", + Help: "histogram of time spent waiting for the notifier lock", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.5, 1, 3, 5, 10}, + }, []string{"action"}) + notifierUpdateReceived = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "notifier_update_received_total", + Help: "total count of updates received by notifier", + }, []string{"type", "trigger"}) + notifierNodeUpdateChans = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_open_channels_total", + Help: "total count open channels in notifier", + }) + notifierBatcherWaitersForLock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_batcher_waiters_for_lock", + Help: "gauge of waiters for the notifier batcher lock", + }, []string{"type", "action"}) + notifierBatcherChanges = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_batcher_changes_pending", + Help: "gauge of full changes pending in the notifier batcher", + }, []string{}) + notifierBatcherPatches = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_batcher_patches_pending", + Help: "gauge of patches pending in the notifier batcher", + }, []string{}) +) diff --git a/hscontrol/notifier/notifier.go b/hscontrol/notifier/notifier.go index 2384a40f..ceede6ba 100644 --- a/hscontrol/notifier/notifier.go +++ b/hscontrol/notifier/notifier.go @@ -3,81 +3,165 @@ package notifier import ( "context" "fmt" + "sort" "strings" "sync" + "time" "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog/log" - "tailscale.com/types/key" + "github.com/sasha-s/go-deadlock" + "tailscale.com/envknob" + "tailscale.com/tailcfg" + "tailscale.com/util/set" ) -type Notifier struct { - l sync.RWMutex - nodes map[string]chan<- types.StateUpdate - connected map[key.MachinePublic]bool -} +var ( + debugDeadlock = envknob.Bool("HEADSCALE_DEBUG_DEADLOCK") + debugDeadlockTimeout = envknob.RegisterDuration("HEADSCALE_DEBUG_DEADLOCK_TIMEOUT") +) -func NewNotifier() *Notifier { - return &Notifier{ - nodes: make(map[string]chan<- types.StateUpdate), - connected: make(map[key.MachinePublic]bool), +func init() { + deadlock.Opts.Disable = !debugDeadlock + if debugDeadlock { + deadlock.Opts.DeadlockTimeout = debugDeadlockTimeout() + deadlock.Opts.PrintAllCurrentGoroutines = true } } -func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) { - log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node") - defer log.Trace(). - Caller(). - Str("key", machineKey.ShortString()). - Msg("releasing lock to add node") - - n.l.Lock() - defer n.l.Unlock() - - n.nodes[machineKey.String()] = c - n.connected[machineKey] = true - - log.Trace(). - Str("machine_key", machineKey.ShortString()). - Int("open_chans", len(n.nodes)). - Msg("Added new channel") +type Notifier struct { + l deadlock.Mutex + nodes map[types.NodeID]chan<- types.StateUpdate + connected *xsync.MapOf[types.NodeID, bool] + b *batcher + cfg *types.Config + closed bool } -func (n *Notifier) RemoveNode(machineKey key.MachinePublic) { - log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node") - defer log.Trace(). - Caller(). - Str("key", machineKey.ShortString()). - Msg("releasing lock to remove node") +func NewNotifier(cfg *types.Config) *Notifier { + n := &Notifier{ + nodes: make(map[types.NodeID]chan<- types.StateUpdate), + connected: xsync.NewMapOf[types.NodeID, bool](), + cfg: cfg, + closed: false, + } + b := newBatcher(cfg.Tuning.BatchChangeDelay, n) + n.b = b + go b.doWork() + return n +} + +// Close stops the batcher and closes all channels. +func (n *Notifier) Close() { + notifierWaitersForLock.WithLabelValues("lock", "close").Inc() n.l.Lock() defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "close").Dec() - if len(n.nodes) == 0 { + n.closed = true + n.b.close() + + for _, c := range n.nodes { + close(c) + } +} + +func (n *Notifier) tracef(nID types.NodeID, msg string, args ...any) { + log.Trace(). + Uint64("node.id", nID.Uint64()). + Int("open_chans", len(n.nodes)).Msgf(msg, args...) +} + +func (n *Notifier) AddNode(nodeID types.NodeID, c chan<- types.StateUpdate) { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "add").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "add").Dec() + notifierWaitForLock.WithLabelValues("add").Observe(time.Since(start).Seconds()) + + if n.closed { return } - delete(n.nodes, machineKey.String()) - n.connected[machineKey] = false + // If a channel exists, it means the node has opened a new + // connection. Close the old channel and replace it. + if curr, ok := n.nodes[nodeID]; ok { + n.tracef(nodeID, "channel present, closing and replacing") + close(curr) + } - log.Trace(). - Str("machine_key", machineKey.ShortString()). - Int("open_chans", len(n.nodes)). - Msg("Removed channel") + n.nodes[nodeID] = c + n.connected.Store(nodeID, true) + + n.tracef(nodeID, "added new channel") + notifierNodeUpdateChans.Inc() +} + +// RemoveNode removes a node and a given channel from the notifier. +// It checks that the channel is the same as currently being updated +// and ignores the removal if it is not. +// RemoveNode reports if the node/chan was removed. +func (n *Notifier) RemoveNode(nodeID types.NodeID, c chan<- types.StateUpdate) bool { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "remove").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "remove").Dec() + notifierWaitForLock.WithLabelValues("remove").Observe(time.Since(start).Seconds()) + + if n.closed { + return true + } + + if len(n.nodes) == 0 { + return true + } + + // If the channel exist, but it does not belong + // to the caller, ignore. + if curr, ok := n.nodes[nodeID]; ok { + if curr != c { + n.tracef(nodeID, "channel has been replaced, not removing") + return false + } + } + + delete(n.nodes, nodeID) + n.connected.Store(nodeID, false) + + n.tracef(nodeID, "removed channel") + notifierNodeUpdateChans.Dec() + + return true } // IsConnected reports if a node is connected to headscale and has a // poll session open. -func (n *Notifier) IsConnected(machineKey key.MachinePublic) bool { - n.l.RLock() - defer n.l.RUnlock() +func (n *Notifier) IsConnected(nodeID types.NodeID) bool { + notifierWaitersForLock.WithLabelValues("lock", "conncheck").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "conncheck").Dec() - return n.connected[machineKey] + if val, ok := n.connected.Load(nodeID); ok { + return val + } + return false } -// TODO(kradalby): This returns a pointer and can be dangerous. -func (n *Notifier) ConnectedMap() map[key.MachinePublic]bool { +// IsLikelyConnected reports if a node is connected to headscale and has a +// poll session open, but doesnt lock, so might be wrong. +func (n *Notifier) IsLikelyConnected(nodeID types.NodeID) bool { + if val, ok := n.connected.Load(nodeID); ok { + return val + } + return false +} + +func (n *Notifier) LikelyConnectedMap() *xsync.MapOf[types.NodeID, bool] { return n.connected } @@ -88,86 +172,299 @@ func (n *Notifier) NotifyAll(ctx context.Context, update types.StateUpdate) { func (n *Notifier) NotifyWithIgnore( ctx context.Context, update types.StateUpdate, - ignore ...string, + ignoreNodeIDs ...types.NodeID, ) { - log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify") - defer log.Trace(). - Caller(). - Interface("type", update.Type). - Msg("releasing lock, finished notifying") + if n.closed { + return + } - n.l.RLock() - defer n.l.RUnlock() + notifierUpdateReceived.WithLabelValues(update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc() + n.b.addOrPassthrough(update) +} - for key, c := range n.nodes { - if util.IsStringInSlice(ignore, key) { - continue - } +func (n *Notifier) NotifyByNodeID( + ctx context.Context, + update types.StateUpdate, + nodeID types.NodeID, +) { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "notify").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "notify").Dec() + notifierWaitForLock.WithLabelValues("notify").Observe(time.Since(start).Seconds()) + if n.closed { + return + } + + if c, ok := n.nodes[nodeID]; ok { select { case <-ctx.Done(): log.Error(). Err(ctx.Err()). - Str("mkey", key). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). + Uint64("node.id", nodeID.Uint64()). + Any("origin", types.NotifyOriginKey.Value(ctx)). + Any("origin-hostname", types.NotifyHostnameKey.Value(ctx)). Msgf("update not sent, context cancelled") + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc() + } return case c <- update: - log.Trace(). - Str("mkey", key). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). - Msgf("update successfully sent on chan") + n.tracef(nodeID, "update successfully sent on chan, origin: %s, origin-hostname: %s", ctx.Value("origin"), ctx.Value("hostname")) + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc() + } } } } -func (n *Notifier) NotifyByMachineKey( - ctx context.Context, - update types.StateUpdate, - mKey key.MachinePublic, -) { - log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify") - defer log.Trace(). - Caller(). - Interface("type", update.Type). - Msg("releasing lock, finished notifying") +func (n *Notifier) sendAll(update types.StateUpdate) { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "send-all").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "send-all").Dec() + notifierWaitForLock.WithLabelValues("send-all").Observe(time.Since(start).Seconds()) - n.l.RLock() - defer n.l.RUnlock() + if n.closed { + return + } - if c, ok := n.nodes[mKey.String()]; ok { + for id, c := range n.nodes { + // Whenever an update is sent to all nodes, there is a chance that the node + // has disconnected and the goroutine that was supposed to consume the update + // has shut down the channel and is waiting for the lock held here in RemoveNode. + // This means that there is potential for a deadlock which would stop all updates + // going out to clients. This timeout prevents that from happening by moving on to the + // next node if the context is cancelled. Afther sendAll releases the lock, the add/remove + // call will succeed and the update will go to the correct nodes on the next call. + ctx, cancel := context.WithTimeout(context.Background(), n.cfg.Tuning.NotifierSendTimeout) + defer cancel() select { case <-ctx.Done(): log.Error(). Err(ctx.Err()). - Str("mkey", mKey.String()). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). + Uint64("node.id", id.Uint64()). Msgf("update not sent, context cancelled") + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all", id.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all").Inc() + } return case c <- update: - log.Trace(). - Str("mkey", mKey.String()). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). - Msgf("update successfully sent on chan") + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all", id.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all").Inc() + } } } } func (n *Notifier) String() string { - n.l.RLock() - defer n.l.RUnlock() + notifierWaitersForLock.WithLabelValues("lock", "string").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "string").Dec() - str := []string{"Notifier, in map:\n"} + var b strings.Builder + fmt.Fprintf(&b, "chans (%d):\n", len(n.nodes)) - for k, v := range n.nodes { - str = append(str, fmt.Sprintf("\t%s: %v\n", k, v)) + var keys []types.NodeID + n.connected.Range(func(key types.NodeID, value bool) bool { + keys = append(keys, key) + return true + }) + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + for _, key := range keys { + fmt.Fprintf(&b, "\t%d: %p\n", key, n.nodes[key]) } - return strings.Join(str, "") + b.WriteString("\n") + fmt.Fprintf(&b, "connected (%d):\n", len(n.nodes)) + + for _, key := range keys { + val, _ := n.connected.Load(key) + fmt.Fprintf(&b, "\t%d: %t\n", key, val) + } + + return b.String() +} + +type batcher struct { + tick *time.Ticker + + mu sync.Mutex + + cancelCh chan struct{} + + changedNodeIDs set.Slice[types.NodeID] + nodesChanged bool + patches map[types.NodeID]tailcfg.PeerChange + patchesChanged bool + + n *Notifier +} + +func newBatcher(batchTime time.Duration, n *Notifier) *batcher { + return &batcher{ + tick: time.NewTicker(batchTime), + cancelCh: make(chan struct{}), + patches: make(map[types.NodeID]tailcfg.PeerChange), + n: n, + } +} + +func (b *batcher) close() { + b.cancelCh <- struct{}{} +} + +// addOrPassthrough adds the update to the batcher, if it is not a +// type that is currently batched, it will be sent immediately. +func (b *batcher) addOrPassthrough(update types.StateUpdate) { + notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Inc() + b.mu.Lock() + defer b.mu.Unlock() + notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Dec() + + switch update.Type { + case types.StatePeerChanged: + b.changedNodeIDs.Add(update.ChangeNodes...) + b.nodesChanged = true + notifierBatcherChanges.WithLabelValues().Set(float64(b.changedNodeIDs.Len())) + + case types.StatePeerChangedPatch: + for _, newPatch := range update.ChangePatches { + if curr, ok := b.patches[types.NodeID(newPatch.NodeID)]; ok { + overwritePatch(&curr, newPatch) + b.patches[types.NodeID(newPatch.NodeID)] = curr + } else { + b.patches[types.NodeID(newPatch.NodeID)] = *newPatch + } + } + b.patchesChanged = true + notifierBatcherPatches.WithLabelValues().Set(float64(len(b.patches))) + + default: + b.n.sendAll(update) + } +} + +// flush sends all the accumulated patches to all +// nodes in the notifier. +func (b *batcher) flush() { + notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Inc() + b.mu.Lock() + defer b.mu.Unlock() + notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Dec() + + if b.nodesChanged || b.patchesChanged { + var patches []*tailcfg.PeerChange + // If a node is getting a full update from a change + // node update, then the patch can be dropped. + for nodeID, patch := range b.patches { + if b.changedNodeIDs.Contains(nodeID) { + delete(b.patches, nodeID) + } else { + patches = append(patches, &patch) + } + } + + changedNodes := b.changedNodeIDs.Slice().AsSlice() + sort.Slice(changedNodes, func(i, j int) bool { + return changedNodes[i] < changedNodes[j] + }) + + if b.changedNodeIDs.Slice().Len() > 0 { + update := types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changedNodes, + } + + b.n.sendAll(update) + } + + if len(patches) > 0 { + patchUpdate := types.StateUpdate{ + Type: types.StatePeerChangedPatch, + ChangePatches: patches, + } + + b.n.sendAll(patchUpdate) + } + + b.changedNodeIDs = set.Slice[types.NodeID]{} + notifierBatcherChanges.WithLabelValues().Set(0) + b.nodesChanged = false + b.patches = make(map[types.NodeID]tailcfg.PeerChange, len(b.patches)) + notifierBatcherPatches.WithLabelValues().Set(0) + b.patchesChanged = false + } +} + +func (b *batcher) doWork() { + for { + select { + case <-b.cancelCh: + return + case <-b.tick.C: + b.flush() + } + } +} + +// overwritePatch takes the current patch and a newer patch +// and override any field that has changed. +func overwritePatch(currPatch, newPatch *tailcfg.PeerChange) { + if newPatch.DERPRegion != 0 { + currPatch.DERPRegion = newPatch.DERPRegion + } + + if newPatch.Cap != 0 { + currPatch.Cap = newPatch.Cap + } + + if newPatch.CapMap != nil { + currPatch.CapMap = newPatch.CapMap + } + + if newPatch.Endpoints != nil { + currPatch.Endpoints = newPatch.Endpoints + } + + if newPatch.Key != nil { + currPatch.Key = newPatch.Key + } + + if newPatch.KeySignature != nil { + currPatch.KeySignature = newPatch.KeySignature + } + + if newPatch.DiscoKey != nil { + currPatch.DiscoKey = newPatch.DiscoKey + } + + if newPatch.Online != nil { + currPatch.Online = newPatch.Online + } + + if newPatch.LastSeen != nil { + currPatch.LastSeen = newPatch.LastSeen + } + + if newPatch.KeyExpiry != nil { + currPatch.KeyExpiry = newPatch.KeyExpiry + } } diff --git a/hscontrol/notifier/notifier_test.go b/hscontrol/notifier/notifier_test.go new file mode 100644 index 00000000..c41e0039 --- /dev/null +++ b/hscontrol/notifier/notifier_test.go @@ -0,0 +1,265 @@ +package notifier + +import ( + "context" + "net/netip" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "tailscale.com/tailcfg" +) + +func TestBatcher(t *testing.T) { + tests := []struct { + name string + updates []types.StateUpdate + want []types.StateUpdate + }{ + { + name: "full-passthrough", + updates: []types.StateUpdate{ + { + Type: types.StateFullUpdate, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StateFullUpdate, + }, + }, + }, + { + name: "derp-passthrough", + updates: []types.StateUpdate{ + { + Type: types.StateDERPUpdated, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StateDERPUpdated, + }, + }, + }, + { + name: "single-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, + }, + }, + }, + }, + { + name: "merge-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, 4, + }, + }, + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, 3, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, 3, 4, + }, + }, + }, + }, + { + name: "single-patch-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 5, + }, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 5, + }, + }, + }, + }, + }, + { + name: "merge-patch-to-same-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 5, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 6, + }, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 6, + }, + }, + }, + }, + }, + { + name: "merge-patch-to-multiple-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 3, + Endpoints: []netip.AddrPort{ + netip.MustParseAddrPort("1.1.1.1:9090"), + }, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 3, + Endpoints: []netip.AddrPort{ + netip.MustParseAddrPort("1.1.1.1:9090"), + netip.MustParseAddrPort("2.2.2.2:8080"), + }, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 4, + DERPRegion: 6, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 4, + Cap: tailcfg.CapabilityVersion(54), + }, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 3, + Endpoints: []netip.AddrPort{ + netip.MustParseAddrPort("1.1.1.1:9090"), + netip.MustParseAddrPort("2.2.2.2:8080"), + }, + }, + { + NodeID: 4, + DERPRegion: 6, + Cap: tailcfg.CapabilityVersion(54), + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := NewNotifier(&types.Config{ + Tuning: types.Tuning{ + // We will call flush manually for the tests, + // so do not run the worker. + BatchChangeDelay: time.Hour, + + // Since we do not load the config, we wont get the + // default, so set it manually so we dont time out + // and have flakes. + NotifierSendTimeout: time.Second, + }, + }) + + ch := make(chan types.StateUpdate, 30) + defer close(ch) + n.AddNode(1, ch) + defer n.RemoveNode(1, ch) + + for _, u := range tt.updates { + n.NotifyAll(context.Background(), u) + } + + n.b.flush() + + var got []types.StateUpdate + for len(ch) > 0 { + out := <-ch + got = append(got, out) + } + + // Make the inner order stable for comparison. + for _, u := range got { + sort.Slice(u.ChangeNodes, func(i, j int) bool { + return u.ChangeNodes[i] < u.ChangeNodes[j] + }) + sort.Slice(u.ChangePatches, func(i, j int) bool { + return u.ChangePatches[i].NodeID < u.ChangePatches[j].NodeID + }) + } + + if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { + t.Errorf("batcher() unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/hscontrol/oidc.go b/hscontrol/oidc.go index f3c1c171..58aa0e77 100644 --- a/hscontrol/oidc.go +++ b/hscontrol/oidc.go @@ -10,6 +10,7 @@ import ( "fmt" "html/template" "net/http" + "slices" "strings" "time" @@ -56,14 +57,8 @@ func (h *Headscale) initOIDC() error { // grab oidc config if it hasn't been already if h.oauth2Config == nil { h.oidcProvider, err = oidc.NewProvider(context.Background(), h.cfg.OIDC.Issuer) - if err != nil { - log.Error(). - Err(err). - Caller(). - Msgf("Could not retrieve OIDC Config: %s", err.Error()) - - return err + return fmt.Errorf("creating OIDC provider from issuer config: %w", err) } h.oauth2Config = &oauth2.Config{ @@ -403,7 +398,7 @@ func validateOIDCAllowedDomains( ) error { if len(allowedDomains) > 0 { if at := strings.LastIndex(claims.Email, "@"); at < 0 || - !util.IsStringInSlice(allowedDomains, claims.Email[at+1:]) { + !slices.Contains(allowedDomains, claims.Email[at+1:]) { log.Trace().Msg("authenticated principal does not match any allowed domain") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") @@ -431,7 +426,7 @@ func validateOIDCAllowedGroups( ) error { if len(allowedGroups) > 0 { for _, group := range allowedGroups { - if util.IsStringInSlice(claims.Groups, group) { + if slices.Contains(claims.Groups, group) { return nil } } @@ -458,7 +453,7 @@ func validateOIDCAllowedUsers( claims *IDTokenClaims, ) error { if len(allowedUsers) > 0 && - !util.IsStringInSlice(allowedUsers, claims.Email) { + !slices.Contains(allowedUsers, claims.Email) { log.Trace().Msg("authenticated principal does not match any allowed user") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusBadRequest) @@ -518,12 +513,6 @@ func (h *Headscale) validateNodeForOIDCCallback( User: claims.Email, Verb: "Reauthenticated", }); err != nil { - log.Error(). - Str("func", "OIDCCallback"). - Str("type", "reauthenticate"). - Err(err). - Msg("Could not render OIDC callback template") - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("Could not render OIDC callback template")) @@ -531,7 +520,7 @@ func (h *Headscale) validateNodeForOIDCCallback( util.LogErr(err, "Failed to write response") } - return nil, true, err + return nil, true, fmt.Errorf("rendering OIDC callback template: %w", err) } writer.Header().Set("Content-Type", "text/html; charset=utf-8") @@ -541,11 +530,18 @@ func (h *Headscale) validateNodeForOIDCCallback( util.LogErr(err, "Failed to write response") } - stateUpdate := types.StateUpdateExpire(node.ID, expiry) - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "oidc-expiry", "na") - h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + ctx := types.NotifyCtx(context.Background(), "oidc-expiry-self", node.Hostname) + h.nodeNotifier.NotifyByNodeID( + ctx, + types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: []types.NodeID{node.ID}, + }, + node.ID, + ) + + ctx = types.NotifyCtx(context.Background(), "oidc-expiry-peers", node.Hostname) + h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, expiry), node.ID) return nil, true, nil } @@ -586,10 +582,6 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback( if errors.Is(err, db.ErrUserNotFound) { user, err = h.db.CreateUser(userName) if err != nil { - log.Error(). - Err(err). - Caller(). - Msgf("could not create new user '%s'", userName) writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("could not create user")) @@ -597,14 +589,9 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback( util.LogErr(err, "Failed to write response") } - return nil, err + return nil, fmt.Errorf("creating new user: %w", err) } } else if err != nil { - log.Error(). - Caller(). - Err(err). - Str("user", userName). - Msg("could not find or create user") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("could not find or create user")) @@ -612,7 +599,7 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback( util.LogErr(err, "Failed to write response") } - return nil, err + return nil, fmt.Errorf("find or create user: %w", err) } return user, nil @@ -624,12 +611,12 @@ func (h *Headscale) registerNodeForOIDCCallback( machineKey *key.MachinePublic, expiry time.Time, ) error { - addrs, err := h.ipAlloc.Next() + ipv4, ipv6, err := h.ipAlloc.Next() if err != nil { return err } - if err := h.db.DB.Transaction(func(tx *gorm.DB) error { + if err := h.db.Write(func(tx *gorm.DB) error { if _, err := db.RegisterNodeFromAuthCallback( // TODO(kradalby): find a better way to use the cache across modules tx, @@ -638,7 +625,7 @@ func (h *Headscale) registerNodeForOIDCCallback( user.Name, &expiry, util.RegisterMethodOIDC, - addrs, + ipv4, ipv6, ); err != nil { return err } @@ -668,12 +655,6 @@ func renderOIDCCallbackTemplate( User: claims.Email, Verb: "Authenticated", }); err != nil { - log.Error(). - Str("func", "OIDCCallback"). - Str("type", "authenticate"). - Err(err). - Msg("Could not render OIDC callback template") - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("Could not render OIDC callback template")) @@ -681,7 +662,7 @@ func renderOIDCCallbackTemplate( util.LogErr(err, "Failed to write response") } - return nil, err + return nil, fmt.Errorf("rendering OIDC callback template: %w", err) } return &content, nil diff --git a/hscontrol/platform_config.go b/hscontrol/platform_config.go index 0404f546..9844a606 100644 --- a/hscontrol/platform_config.go +++ b/hscontrol/platform_config.go @@ -59,46 +59,6 @@ func (h *Headscale) WindowsConfigMessage( } } -// WindowsRegConfig generates and serves a .reg file configured with the Headscale server address. -func (h *Headscale) WindowsRegConfig( - writer http.ResponseWriter, - req *http.Request, -) { - config := WindowsRegistryConfig{ - URL: h.cfg.ServerURL, - } - - var content bytes.Buffer - if err := windowsRegTemplate.Execute(&content, config); err != nil { - log.Error(). - Str("handler", "WindowsRegConfig"). - Err(err). - Msg("Could not render Apple macOS template") - - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") - writer.WriteHeader(http.StatusInternalServerError) - _, err := writer.Write([]byte("Could not render Windows registry template")) - if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Failed to write response") - } - - return - } - - writer.Header().Set("Content-Type", "text/x-ms-regedit; charset=utf-8") - writer.WriteHeader(http.StatusOK) - _, err := writer.Write(content.Bytes()) - if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Failed to write response") - } -} - // AppleConfigMessage shows a simple message in the browser to point the user to the iOS/MacOS profile and instructions for how to install it. func (h *Headscale) AppleConfigMessage( writer http.ResponseWriter, @@ -305,10 +265,6 @@ func (h *Headscale) ApplePlatformConfig( } } -type WindowsRegistryConfig struct { - URL string -} - type AppleMobileConfig struct { UUID uuid.UUID URL string @@ -320,14 +276,6 @@ type AppleMobilePlatformConfig struct { URL string } -var windowsRegTemplate = textTemplate.Must( - textTemplate.New("windowsconfig").Parse(`Windows Registry Editor Version 5.00 - -[HKEY_LOCAL_MACHINE\SOFTWARE\Tailscale IPN] -"UnattendedMode"="always" -"LoginURL"="{{.URL}}" -`)) - var commonTemplate = textTemplate.Must( textTemplate.New("mobileconfig").Parse(` diff --git a/hscontrol/policy/acls.go b/hscontrol/policy/acls.go index 2ccc56b4..2b3a50f7 100644 --- a/hscontrol/policy/acls.go +++ b/hscontrol/policy/acls.go @@ -7,7 +7,6 @@ import ( "io" "net/netip" "os" - "path/filepath" "strconv" "strings" "time" @@ -17,7 +16,6 @@ import ( "github.com/rs/zerolog/log" "github.com/tailscale/hujson" "go4.org/netipx" - "gopkg.in/yaml.v3" "tailscale.com/tailcfg" ) @@ -36,6 +34,38 @@ const ( expectedTokenItems = 2 ) +var theInternetSet *netipx.IPSet + +// theInternet returns the IPSet for the Internet. +// https://www.youtube.com/watch?v=iDbyYGrswtg +func theInternet() *netipx.IPSet { + if theInternetSet != nil { + return theInternetSet + } + + var internetBuilder netipx.IPSetBuilder + internetBuilder.AddPrefix(netip.MustParsePrefix("2000::/3")) + internetBuilder.AddPrefix(netip.MustParsePrefix("0.0.0.0/0")) + + // Delete Private network addresses + // https://datatracker.ietf.org/doc/html/rfc1918 + internetBuilder.RemovePrefix(netip.MustParsePrefix("fc00::/7")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("10.0.0.0/8")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("172.16.0.0/12")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("192.168.0.0/16")) + + // Delete Tailscale networks + internetBuilder.RemovePrefix(netip.MustParsePrefix("fd7a:115c:a1e0::/48")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("100.64.0.0/10")) + + // Delete "cant find DHCP networks" + internetBuilder.RemovePrefix(netip.MustParsePrefix("fe80::/10")) // link-loca + internetBuilder.RemovePrefix(netip.MustParsePrefix("169.254.0.0/16")) + + theInternetSet, _ := internetBuilder.IPSet() + return theInternetSet +} + // For some reason golang.org/x/net/internal/iana is an internal package. const ( protocolICMP = 1 // Internet Control Message @@ -76,35 +106,22 @@ func LoadACLPolicyFromPath(path string) (*ACLPolicy, error) { Bytes("file", policyBytes). Msg("Loading ACLs") - switch filepath.Ext(path) { - case ".yml", ".yaml": - return LoadACLPolicyFromBytes(policyBytes, "yaml") - } - - return LoadACLPolicyFromBytes(policyBytes, "hujson") + return LoadACLPolicyFromBytes(policyBytes) } -func LoadACLPolicyFromBytes(acl []byte, format string) (*ACLPolicy, error) { +func LoadACLPolicyFromBytes(acl []byte) (*ACLPolicy, error) { var policy ACLPolicy - switch format { - case "yaml": - err := yaml.Unmarshal(acl, &policy) - if err != nil { - return nil, err - } - default: - ast, err := hujson.Parse(acl) - if err != nil { - return nil, err - } + ast, err := hujson.Parse(acl) + if err != nil { + return nil, fmt.Errorf("parsing hujson, err: %w", err) + } - ast.Standardize() - acl = ast.Pack() - err = json.Unmarshal(acl, &policy) - if err != nil { - return nil, err - } + ast.Standardize() + acl = ast.Pack() + + if err := json.Unmarshal(acl, &policy); err != nil { + return nil, fmt.Errorf("unmarshalling policy, err: %w", err) } if policy.IsZero() { @@ -114,7 +131,7 @@ func LoadACLPolicyFromBytes(acl []byte, format string) (*ACLPolicy, error) { return &policy, nil } -func GenerateFilterAndSSHRules( +func GenerateFilterAndSSHRulesForTests( policy *ACLPolicy, node *types.Node, peers types.Nodes, @@ -124,67 +141,49 @@ func GenerateFilterAndSSHRules( return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil } - rules, err := policy.generateFilterRules(node, peers) + rules, err := policy.CompileFilterRules(append(peers, node)) if err != nil { return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err } log.Trace().Interface("ACL", rules).Str("node", node.GivenName).Msg("ACL rules") - var sshPolicy *tailcfg.SSHPolicy - sshRules, err := policy.generateSSHRules(node, peers) + sshPolicy, err := policy.CompileSSHPolicy(node, peers) if err != nil { return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err } - log.Trace(). - Interface("SSH", sshRules). - Str("node", node.GivenName). - Msg("SSH rules") - - if sshPolicy == nil { - sshPolicy = &tailcfg.SSHPolicy{} - } - sshPolicy.Rules = sshRules - return rules, sshPolicy, nil } -// generateFilterRules takes a set of nodes and an ACLPolicy and generates a +// CompileFilterRules takes a set of nodes and an ACLPolicy and generates a // set of Tailscale compatible FilterRules used to allow traffic on clients. -func (pol *ACLPolicy) generateFilterRules( - node *types.Node, - peers types.Nodes, +func (pol *ACLPolicy) CompileFilterRules( + nodes types.Nodes, ) ([]tailcfg.FilterRule, error) { - rules := []tailcfg.FilterRule{} - nodes := append(peers, node) + if pol == nil { + return tailcfg.FilterAllowAll, nil + } + + var rules []tailcfg.FilterRule for index, acl := range pol.ACLs { if acl.Action != "accept" { return nil, ErrInvalidAction } - srcIPs := []string{} + var srcIPs []string for srcIndex, src := range acl.Sources { srcs, err := pol.expandSource(src, nodes) if err != nil { - log.Error(). - Interface("src", src). - Int("ACL index", index). - Int("Src index", srcIndex). - Msgf("Error parsing ACL") - - return nil, err + return nil, fmt.Errorf("parsing policy, acl index: %d->%d: %w", index, srcIndex, err) } srcIPs = append(srcIPs, srcs...) } protocols, isWildcard, err := parseProtocol(acl.Protocol) if err != nil { - log.Error(). - Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol) - - return nil, err + return nil, fmt.Errorf("parsing policy, protocol err: %w ", err) } destPorts := []tailcfg.NetPortRange{} @@ -207,7 +206,7 @@ func (pol *ACLPolicy) generateFilterRules( return nil, err } - dests := []tailcfg.NetPortRange{} + var dests []tailcfg.NetPortRange for _, dest := range expanded.Prefixes() { for _, port := range *ports { pr := tailcfg.NetPortRange{ @@ -237,30 +236,29 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F for _, rule := range rules { // record if the rule is actually relevant for the given node. - dests := []tailcfg.NetPortRange{} - + var dests []tailcfg.NetPortRange + DEST_LOOP: for _, dest := range rule.DstPorts { expanded, err := util.ParseIPSet(dest.IP, nil) // Fail closed, if we cant parse it, then we should not allow // access. if err != nil { - continue + continue DEST_LOOP } - if node.IPAddresses.InIPSet(expanded) { + if node.InIPSet(expanded) { dests = append(dests, dest) + continue DEST_LOOP } // If the node exposes routes, ensure they are note removed // when the filters are reduced. if node.Hostinfo != nil { - // TODO(kradalby): Evaluate if we should only keep - // the routes if the route is enabled. This will - // require database access in this part of the code. if len(node.Hostinfo.RoutableIPs) > 0 { for _, routableIP := range node.Hostinfo.RoutableIPs { - if expanded.ContainsPrefix(routableIP) { + if expanded.OverlapsPrefix(routableIP) { dests = append(dests, dest) + continue DEST_LOOP } } } @@ -279,11 +277,15 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F return ret } -func (pol *ACLPolicy) generateSSHRules( +func (pol *ACLPolicy) CompileSSHPolicy( node *types.Node, peers types.Nodes, -) ([]*tailcfg.SSHRule, error) { - rules := []*tailcfg.SSHRule{} +) (*tailcfg.SSHPolicy, error) { + if pol == nil { + return nil, nil + } + + var rules []*tailcfg.SSHRule acceptAction := tailcfg.SSHAction{ Message: "", @@ -320,7 +322,7 @@ func (pol *ACLPolicy) generateSSHRules( return nil, err } - if !node.IPAddresses.InIPSet(destSet) { + if !node.InIPSet(destSet) { continue } @@ -331,16 +333,12 @@ func (pol *ACLPolicy) generateSSHRules( case "check": checkAction, err := sshCheckAction(sshACL.CheckPeriod) if err != nil { - log.Error(). - Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod) + return nil, fmt.Errorf("parsing SSH policy, parsing check duration, index: %d: %w", index, err) } else { action = *checkAction } default: - log.Error(). - Msgf("Error parsing SSH %d, unknown action '%s', skipping", index, sshACL.Action) - - continue + return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", sshACL.Action, index, err) } principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources)) @@ -352,10 +350,7 @@ func (pol *ACLPolicy) generateSSHRules( } else if isGroup(rawSrc) { users, err := pol.expandUsersFromGroup(rawSrc) if err != nil { - log.Error(). - Msgf("Error parsing SSH %d, Source %d", index, innerIndex) - - return nil, err + return nil, fmt.Errorf("parsing SSH policy, expanding user from group, index: %d->%d: %w", index, innerIndex, err) } for _, user := range users { @@ -369,10 +364,7 @@ func (pol *ACLPolicy) generateSSHRules( rawSrc, ) if err != nil { - log.Error(). - Msgf("Error parsing SSH %d, Source %d", index, innerIndex) - - return nil, err + return nil, fmt.Errorf("parsing SSH policy, expanding alias, index: %d->%d: %w", index, innerIndex, err) } for _, expandedSrc := range expandedSrcs.Prefixes() { principals = append(principals, &tailcfg.SSHPrincipal{ @@ -393,7 +385,9 @@ func (pol *ACLPolicy) generateSSHRules( }) } - return rules, nil + return &tailcfg.SSHPolicy{ + Rules: rules, + }, nil } func sshCheckAction(duration string) (*tailcfg.SSHAction, error) { @@ -502,7 +496,7 @@ func parseProtocol(protocol string) ([]int, bool, error) { default: protocolNumber, err := strconv.Atoi(protocol) if err != nil { - return nil, false, err + return nil, false, fmt.Errorf("parsing protocol number: %w", err) } needsWildcard := protocolNumber != protocolTCP && protocolNumber != protocolUDP && @@ -523,8 +517,7 @@ func (pol *ACLPolicy) expandSource( return []string{}, err } - prefixes := []string{} - + var prefixes []string for _, prefix := range ipSet.Prefixes() { prefixes = append(prefixes, prefix.String()) } @@ -539,6 +532,7 @@ func (pol *ACLPolicy) expandSource( // - a host // - an ip // - a cidr +// - an autogroup // and transform these in IPAddresses. func (pol *ACLPolicy) ExpandAlias( nodes types.Nodes, @@ -564,6 +558,10 @@ func (pol *ACLPolicy) ExpandAlias( return pol.expandIPsFromTag(alias, nodes) } + if isAutoGroup(alias) { + return expandAutoGroup(alias) + } + // if alias is a user if ips, err := pol.expandIPsFromUser(alias, nodes); ips != nil { return ips, err @@ -600,8 +598,8 @@ func excludeCorrectlyTaggedNodes( nodes types.Nodes, user string, ) types.Nodes { - out := types.Nodes{} - tags := []string{} + var out types.Nodes + var tags []string for tag := range aclPolicy.TagOwners { owners, _ := expandOwnersFromTag(aclPolicy, user) ns := append(owners, user) @@ -646,7 +644,7 @@ func expandPorts(portsStr string, isWild bool) (*[]tailcfg.PortRange, error) { return nil, ErrWildcardIsNeeded } - ports := []tailcfg.PortRange{} + var ports []tailcfg.PortRange for _, portStr := range strings.Split(portsStr, ",") { log.Trace().Msgf("parsing portstring: %s", portStr) rang := strings.Split(portStr, "-") @@ -722,7 +720,7 @@ func expandOwnersFromTag( func (pol *ACLPolicy) expandUsersFromGroup( group string, ) ([]string, error) { - users := []string{} + var users []string log.Trace().Caller().Interface("pol", pol).Msg("test") aclGroups, ok := pol.Groups[group] if !ok { @@ -757,7 +755,7 @@ func (pol *ACLPolicy) expandIPsFromGroup( group string, nodes types.Nodes, ) (*netipx.IPSet, error) { - build := netipx.IPSetBuilder{} + var build netipx.IPSetBuilder users, err := pol.expandUsersFromGroup(group) if err != nil { @@ -766,7 +764,7 @@ func (pol *ACLPolicy) expandIPsFromGroup( for _, user := range users { filteredNodes := filterNodesByUser(nodes, user) for _, node := range filteredNodes { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } @@ -777,12 +775,12 @@ func (pol *ACLPolicy) expandIPsFromTag( alias string, nodes types.Nodes, ) (*netipx.IPSet, error) { - build := netipx.IPSetBuilder{} + var build netipx.IPSetBuilder // check for forced tags for _, node := range nodes { if util.StringOrPrefixListContains(node.ForcedTags, alias) { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } @@ -814,7 +812,7 @@ func (pol *ACLPolicy) expandIPsFromTag( } if util.StringOrPrefixListContains(node.Hostinfo.RequestTags, alias) { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } } @@ -826,18 +824,18 @@ func (pol *ACLPolicy) expandIPsFromUser( user string, nodes types.Nodes, ) (*netipx.IPSet, error) { - build := netipx.IPSetBuilder{} + var build netipx.IPSetBuilder filteredNodes := filterNodesByUser(nodes, user) filteredNodes = excludeCorrectlyTaggedNodes(pol, filteredNodes, user) // shortcurcuit if we have no nodes to get ips from. if len(filteredNodes) == 0 { - return nil, nil //nolint + return nil, nil // nolint } for _, node := range filteredNodes { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } return build.IPSet() @@ -851,11 +849,11 @@ func (pol *ACLPolicy) expandIPsFromSingleIP( matches := nodes.FilterByIP(ip) - build := netipx.IPSetBuilder{} + var build netipx.IPSetBuilder build.Add(ip) for _, node := range matches { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } return build.IPSet() @@ -866,17 +864,17 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix( nodes types.Nodes, ) (*netipx.IPSet, error) { log.Trace().Str("prefix", prefix.String()).Msg("expandAlias got prefix") - build := netipx.IPSetBuilder{} + var build netipx.IPSetBuilder build.AddPrefix(prefix) // This is suboptimal and quite expensive, but if we only add the prefix, we will miss all the relevant IPv6 // addresses for the hosts that belong to tailscale. This doesnt really affect stuff like subnet routers. for _, node := range nodes { - for _, ip := range node.IPAddresses { + for _, ip := range node.IPs() { // log.Trace(). // Msgf("checking if node ip (%s) is part of prefix (%s): %v, is single ip prefix (%v), addr: %s", ip.String(), prefix.String(), prefix.Contains(ip), prefix.IsSingleIP(), prefix.Addr().String()) if prefix.Contains(ip) { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } } @@ -884,6 +882,16 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix( return build.IPSet() } +func expandAutoGroup(alias string) (*netipx.IPSet, error) { + switch { + case strings.HasPrefix(alias, "autogroup:internet"): + return theInternet(), nil + + default: + return nil, fmt.Errorf("unknown autogroup %q", alias) + } +} + func isWildcard(str string) bool { return str == "*" } @@ -896,14 +904,18 @@ func isTag(str string) bool { return strings.HasPrefix(str, "tag:") } +func isAutoGroup(str string) bool { + return strings.HasPrefix(str, "autogroup:") +} + // TagsOfNode will return the tags of the current node. // Invalid tags are tags added by a user on a node, and that user doesn't have authority to add this tag. // Valid tags are tags added by a user that is allowed in the ACL policy to add this tag. func (pol *ACLPolicy) TagsOfNode( node *types.Node, ) ([]string, []string) { - validTags := make([]string, 0) - invalidTags := make([]string, 0) + var validTags []string + var invalidTags []string // TODO(kradalby): Why is this sometimes nil? coming from tailNode? if node == nil { @@ -944,7 +956,7 @@ func (pol *ACLPolicy) TagsOfNode( } func filterNodesByUser(nodes types.Nodes, user string) types.Nodes { - out := types.Nodes{} + var out types.Nodes for _, node := range nodes { if node.User.Name == user { out = append(out, node) @@ -960,7 +972,7 @@ func FilterNodesByACL( nodes types.Nodes, filter []tailcfg.FilterRule, ) types.Nodes { - result := types.Nodes{} + var result types.Nodes for index, peer := range nodes { if peer.ID == node.ID { diff --git a/hscontrol/policy/acls_test.go b/hscontrol/policy/acls_test.go index ff18dd05..6b2e0f97 100644 --- a/hscontrol/policy/acls_test.go +++ b/hscontrol/policy/acls_test.go @@ -16,6 +16,11 @@ import ( "tailscale.com/tailcfg" ) +var iap = func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip +} + func Test(t *testing.T) { check.TestingT(t) } @@ -316,44 +321,27 @@ func TestParsing(t *testing.T) { wantErr: false, }, { - name: "port-wildcard-yaml", - format: "yaml", + name: "ipv6", + format: "hujson", acl: ` ---- -hosts: - host-1: 100.100.100.100/32 - subnet-1: 100.100.101.100/24 -acls: - - action: accept - src: - - "*" - dst: - - host-1:* -`, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{"0.0.0.0/0", "::/0"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, - }, - }, - }, - wantErr: false, - }, +{ + "hosts": { + "host-1": "100.100.100.100/32", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ { - name: "ipv6-yaml", - format: "yaml", - acl: ` ---- -hosts: - host-1: 100.100.100.100/32 - subnet-1: 100.100.101.100/24 -acls: - - action: accept - src: - - "*" - dst: - - host-1:* + "action": "accept", + "src": [ + "*", + ], + "dst": [ + "host-1:*", + ], + }, + ], +} `, want: []tailcfg.FilterRule{ { @@ -369,7 +357,7 @@ acls: for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - pol, err := LoadACLPolicyFromBytes([]byte(tt.acl), tt.format) + pol, err := LoadACLPolicyFromBytes([]byte(tt.acl)) if tt.wantErr && err == nil { t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr) @@ -385,15 +373,12 @@ acls: return } - rules, err := pol.generateFilterRules(&types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.100.100.100"), - }, - }, types.Nodes{ + rules, err := pol.CompileFilterRules(types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("200.200.200.200"), - }, + IPv4: iap("100.100.100.100"), + }, + &types.Node{ + IPv4: iap("200.200.200.200"), User: types.User{ Name: "testuser", }, @@ -530,7 +515,7 @@ func (s *Suite) TestRuleInvalidGeneration(c *check.C) { "example-host-2:80" ], "deny": [ - "exapmle-host-2:100" + "example-host-2:100" ], }, { @@ -542,11 +527,11 @@ func (s *Suite) TestRuleInvalidGeneration(c *check.C) { ], } `) - pol, err := LoadACLPolicyFromBytes(acl, "hujson") + pol, err := LoadACLPolicyFromBytes(acl) c.Assert(pol.ACLs, check.HasLen, 6) c.Assert(err, check.IsNil) - rules, err := pol.generateFilterRules(&types.Node{}, types.Nodes{}) + rules, err := pol.CompileFilterRules(types.Nodes{}) c.Assert(err, check.NotNil) c.Assert(rules, check.IsNil) } @@ -562,7 +547,7 @@ func (s *Suite) TestInvalidAction(c *check.C) { }, }, } - _, _, err := GenerateFilterAndSSHRules(pol, &types.Node{}, types.Nodes{}) + _, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}) c.Assert(errors.Is(err, ErrInvalidAction), check.Equals, true) } @@ -581,7 +566,7 @@ func (s *Suite) TestInvalidGroupInGroup(c *check.C) { }, }, } - _, _, err := GenerateFilterAndSSHRules(pol, &types.Node{}, types.Nodes{}) + _, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}) c.Assert(errors.Is(err, ErrInvalidGroup), check.Equals, true) } @@ -597,7 +582,7 @@ func (s *Suite) TestInvalidTagOwners(c *check.C) { }, } - _, _, err := GenerateFilterAndSSHRules(pol, &types.Node{}, types.Nodes{}) + _, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}) c.Assert(errors.Is(err, ErrInvalidTag), check.Equals, true) } @@ -633,7 +618,7 @@ func Test_expandGroup(t *testing.T) { wantErr: false, }, { - name: "InexistantGroup", + name: "InexistentGroup", field: field{ pol: ACLPolicy{ Groups: Groups{ @@ -941,7 +926,7 @@ func Test_listNodesInUser(t *testing.T) { }, user: "mickael", }, - want: types.Nodes{}, + want: nil, }, } for _, test := range tests { @@ -996,12 +981,10 @@ func Test_expandAlias(t *testing.T) { alias: "*", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, + IPv4: iap("100.64.0.1"), }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.78.84.227"), - }, + IPv4: iap("100.78.84.227"), }, }, }, @@ -1022,27 +1005,19 @@ func Test_expandAlias(t *testing.T) { alias: "group:accountant", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1063,27 +1038,19 @@ func Test_expandAlias(t *testing.T) { alias: "group:hr", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1128,9 +1095,7 @@ func Test_expandAlias(t *testing.T) { alias: "10.0.0.1", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("10.0.0.1"), - }, + IPv4: iap("10.0.0.1"), User: types.User{Name: "mickael"}, }, }, @@ -1149,10 +1114,8 @@ func Test_expandAlias(t *testing.T) { alias: "10.0.0.1", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("10.0.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("10.0.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1171,10 +1134,8 @@ func Test_expandAlias(t *testing.T) { alias: "fd7a:115c:a1e0:ab12:4843:2222:6273:2222", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("10.0.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("10.0.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1240,9 +1201,7 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1251,9 +1210,7 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1262,15 +1219,11 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, }, }, @@ -1294,27 +1247,19 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1331,29 +1276,21 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:hr-webserver"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:hr-webserver"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1374,16 +1311,12 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:hr-webserver"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1392,15 +1325,11 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1419,9 +1348,7 @@ func Test_expandAlias(t *testing.T) { alias: "joe", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1430,9 +1357,7 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1441,16 +1366,12 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, Hostinfo: &tailcfg.Hostinfo{}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1498,9 +1419,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1509,9 +1428,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1520,9 +1437,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1531,9 +1446,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, - User: types.User{Name: "joe"}, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.4"), + User: types.User{Name: "joe"}, + Hostinfo: &tailcfg.Hostinfo{}, }, }, }, @@ -1550,9 +1465,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1561,9 +1474,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1572,9 +1483,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1583,9 +1492,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, - User: types.User{Name: "joe"}, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.4"), + User: types.User{Name: "joe"}, + Hostinfo: &tailcfg.Hostinfo{}, }, }, }, @@ -1597,9 +1506,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1608,17 +1515,13 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:accountant-webserver"}, Hostinfo: &tailcfg.Hostinfo{}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1627,9 +1530,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, - User: types.User{Name: "joe"}, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.4"), + User: types.User{Name: "joe"}, + Hostinfo: &tailcfg.Hostinfo{}, }, }, }, @@ -1641,9 +1544,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1652,9 +1553,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1663,9 +1562,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1674,9 +1571,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1685,9 +1580,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1696,9 +1589,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1724,8 +1615,7 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { pol ACLPolicy } type args struct { - node *types.Node - peers types.Nodes + nodes types.Nodes } tests := []struct { name string @@ -1738,7 +1628,7 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { name: "no-policy", field: field{}, args: args{}, - want: []tailcfg.FilterRule{}, + want: nil, wantErr: false, }, { @@ -1755,13 +1645,12 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { }, }, args: args{ - node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), + nodes: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), }, }, - peers: types.Nodes{}, }, want: []tailcfg.FilterRule{ { @@ -1800,19 +1689,15 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { }, }, args: args{ - node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), - }, - User: types.User{Name: "mickael"}, - }, - peers: types.Nodes{ + nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), + User: types.User{Name: "mickael"}, + }, + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1846,9 +1731,8 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.field.pol.generateFilterRules( - tt.args.node, - tt.args.peers, + got, err := tt.field.pol.CompileFilterRules( + tt.args.nodes, ) if (err != nil) != tt.wantErr { t.Errorf("ACLgenerateFilterRules() error = %v, wantErr %v", err, tt.wantErr) @@ -1864,6 +1748,108 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { } } +// tsExitNodeDest is the list of destination IP ranges that are allowed when +// you dump the filter list from a Tailscale node connected to Tailscale SaaS. +var tsExitNodeDest = []tailcfg.NetPortRange{ + { + IP: "0.0.0.0-9.255.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "11.0.0.0-100.63.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "100.128.0.0-169.253.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "169.255.0.0-172.15.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "172.32.0.0-192.167.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "192.169.0.0-255.255.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "2000::-3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + Ports: tailcfg.PortRangeAny, + }, +} + +// hsExitNodeDest is the list of destination IP ranges that are allowed when +// we use headscale "autogroup:internet". +var hsExitNodeDest = []tailcfg.NetPortRange{ + {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "2000::/3", Ports: tailcfg.PortRangeAny}, +} + +func TestTheInternet(t *testing.T) { + internetSet := theInternet() + + internetPrefs := internetSet.Prefixes() + + for i := range internetPrefs { + if internetPrefs[i].String() != hsExitNodeDest[i].IP { + t.Errorf("prefix from internet set %q != hsExit list %q", internetPrefs[i].String(), hsExitNodeDest[i].IP) + } + } + + if len(internetPrefs) != len(hsExitNodeDest) { + t.Fatalf("expected same length of prefixes, internet: %d, hsExit: %d", len(internetPrefs), len(hsExitNodeDest)) + } +} + func TestReduceFilterRules(t *testing.T) { tests := []struct { name string @@ -1884,18 +1870,14 @@ func TestReduceFilterRules(t *testing.T) { }, }, node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), - }, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), User: types.User{Name: "mickael"}, }, peers: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1921,10 +1903,8 @@ func TestReduceFilterRules(t *testing.T) { }, }, node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0::1"), - }, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), User: types.User{Name: "user1"}, Hostinfo: &tailcfg.Hostinfo{ RoutableIPs: []netip.Prefix{ @@ -1934,10 +1914,8 @@ func TestReduceFilterRules(t *testing.T) { }, peers: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0::2"), - }, + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), User: types.User{Name: "user1"}, }, }, @@ -1976,16 +1954,473 @@ func TestReduceFilterRules(t *testing.T) { }, }, }, + { + name: "1786-reducing-breaks-exit-nodes-the-client", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "autogroup:internet:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + // "internal" exit node + &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{types.ExitRouteV4, types.ExitRouteV6}, + }, + }, + }, + want: []tailcfg.FilterRule{}, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-exit", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "autogroup:internet:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{types.ExitRouteV4, types.ExitRouteV6}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: hsExitNodeDest, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-example-from-issue", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "0.0.0.0/5:*", + "8.0.0.0/7:*", + "11.0.0.0/8:*", + "12.0.0.0/6:*", + "16.0.0.0/4:*", + "32.0.0.0/3:*", + "64.0.0.0/2:*", + "128.0.0.0/3:*", + "160.0.0.0/5:*", + "168.0.0.0/6:*", + "172.0.0.0/12:*", + "172.32.0.0/11:*", + "172.64.0.0/10:*", + "172.128.0.0/9:*", + "173.0.0.0/8:*", + "174.0.0.0/7:*", + "176.0.0.0/4:*", + "192.0.0.0/9:*", + "192.128.0.0/11:*", + "192.160.0.0/13:*", + "192.169.0.0/16:*", + "192.170.0.0/15:*", + "192.172.0.0/14:*", + "192.176.0.0/12:*", + "192.192.0.0/10:*", + "193.0.0.0/8:*", + "194.0.0.0/7:*", + "196.0.0.0/6:*", + "200.0.0.0/5:*", + "208.0.0.0/4:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{types.ExitRouteV4, types.ExitRouteV6}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::100/128", Ports: tailcfg.PortRangeAny}, + {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-app-connector-like", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "8.0.0.0/8:*", + "16.0.0.0/8:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "8.0.0.0/8", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "16.0.0.0/8", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-app-connector-like2", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "8.0.0.0/16:*", + "16.0.0.0/16:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "8.0.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "16.0.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1817-reduce-breaks-32-mask", + pol: ACLPolicy{ + Hosts: Hosts{ + "vlan1": netip.MustParsePrefix("172.16.0.0/24"), + "dns1": netip.MustParsePrefix("172.16.0.21/32"), + }, + Groups: Groups{ + "group:access": {"user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:access"}, + Destinations: []string{ + "tag:access-servers:*", + "dns1:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")}, + }, + ForcedTags: types.StringList{"tag:access-servers"}, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "172.16.0.21/32", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - rules, _ := tt.pol.generateFilterRules( - tt.node, - tt.peers, + got, _ := tt.pol.CompileFilterRules( + append(tt.peers, tt.node), ) - got := ReduceFilterRules(tt.node, rules) + got = ReduceFilterRules(tt.node, got) if diff := cmp.Diff(tt.want, got); diff != "" { log.Trace().Interface("got", got).Msg("result") @@ -2152,26 +2587,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { { name: "all hosts can talk to each other", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2184,47 +2613,41 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 1, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - User: types.User{Name: "joe"}, + ID: 1, + IPv4: iap("100.64.0.1"), + User: types.User{Name: "joe"}, }, }, want: types.Nodes{ &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.3")}, - User: types.User{Name: "mickael"}, + ID: 3, + IPv4: iap("100.64.0.3"), + User: types.User{Name: "mickael"}, }, }, }, { name: "One host can talk to another, but not all hosts", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2237,42 +2660,36 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 1, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - User: types.User{Name: "joe"}, + ID: 1, + IPv4: iap("100.64.0.1"), + User: types.User{Name: "joe"}, }, }, want: types.Nodes{ &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, }, { name: "host cannot directly talk to destination, but return path is authorized", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2285,42 +2702,36 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, want: types.Nodes{ &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.3")}, - User: types.User{Name: "mickael"}, + ID: 3, + IPv4: iap("100.64.0.3"), + User: types.User{Name: "mickael"}, }, }, }, { name: "rules allows all hosts to reach one destination", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2333,19 +2744,15 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, }, want: types.Nodes{ &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, }, @@ -2353,26 +2760,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { { name: "rules allows all hosts to reach one destination, destination can reach all hosts", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2385,26 +2786,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, }, want: types.Nodes{ &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2412,26 +2807,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { { name: "rule allows all hosts to reach all destinations", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2444,61 +2833,53 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, want: types.Nodes{ &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.3")}, - User: types.User{Name: "mickael"}, + ID: 3, + IPv4: iap("100.64.0.3"), + User: types.User{Name: "mickael"}, }, }, }, { name: "without rule all communications are forbidden", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, rules: []tailcfg.FilterRule{ // list of all ACLRules registered }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, - want: types.Nodes{}, + want: nil, }, { // Investigating 699 @@ -2511,38 +2892,30 @@ func Test_getFilteredByACLPeers(t *testing.T) { &types.Node{ ID: 1, Hostname: "ts-head-upcrmb", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - netip.MustParseAddr("fd7a:115c:a1e0::3"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.3"), + IPv6: iap("fd7a:115c:a1e0::3"), + User: types.User{Name: "user1"}, }, &types.Node{ ID: 2, Hostname: "ts-unstable-rlwpvr", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - netip.MustParseAddr("fd7a:115c:a1e0::4"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.4"), + IPv6: iap("fd7a:115c:a1e0::4"), + User: types.User{Name: "user1"}, }, &types.Node{ ID: 3, Hostname: "ts-head-8w6paa", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0::1"), - }, - User: types.User{Name: "user2"}, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user2"}, }, &types.Node{ ID: 4, Hostname: "ts-unstable-lys2ib", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0::2"), - }, - User: types.User{Name: "user2"}, + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, }, }, rules: []tailcfg.FilterRule{ // list of all ACLRules registered @@ -2562,31 +2935,25 @@ func Test_getFilteredByACLPeers(t *testing.T) { node: &types.Node{ // current nodes ID: 3, Hostname: "ts-head-8w6paa", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0::1"), - }, - User: types.User{Name: "user2"}, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user2"}, }, }, want: types.Nodes{ &types.Node{ ID: 1, Hostname: "ts-head-upcrmb", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - netip.MustParseAddr("fd7a:115c:a1e0::3"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.3"), + IPv6: iap("fd7a:115c:a1e0::3"), + User: types.User{Name: "user1"}, }, &types.Node{ ID: 2, Hostname: "ts-unstable-rlwpvr", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - netip.MustParseAddr("fd7a:115c:a1e0::4"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.4"), + IPv6: iap("fd7a:115c:a1e0::4"), + User: types.User{Name: "user1"}, }, }, }, @@ -2595,16 +2962,16 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "peer1", - User: types.User{Name: "mini"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "peer1", + User: types.User{Name: "mini"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "peer2", - User: types.User{Name: "peer2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "peer2", + User: types.User{Name: "peer2"}, }, }, rules: []tailcfg.FilterRule{ @@ -2617,18 +2984,18 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "mini", - User: types.User{Name: "mini"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "mini", + User: types.User{Name: "mini"}, }, }, want: []*types.Node{ { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "peer2", - User: types.User{Name: "peer2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "peer2", + User: types.User{Name: "peer2"}, }, }, }, @@ -2637,22 +3004,22 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1-1", - User: types.User{Name: "user1"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, rules: []tailcfg.FilterRule{ @@ -2686,30 +3053,30 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "user-2-1", - User: types.User{Name: "user2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, }, }, want: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1-1", - User: types.User{Name: "user1"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, }, @@ -2718,22 +3085,22 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "user-2-1", - User: types.User{Name: "user2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, rules: []tailcfg.FilterRule{ @@ -2767,30 +3134,30 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1-1", - User: types.User{Name: "user1"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, }, }, want: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "user-2-1", - User: types.User{Name: "user2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, }, @@ -2800,16 +3167,16 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.1"), + Hostname: "user1", + User: types.User{Name: "user1"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "router", - User: types.User{Name: "router"}, + ID: 2, + IPv4: iap("100.64.0.2"), + Hostname: "router", + User: types.User{Name: "router"}, Routes: types.Routes{ types.Route{ NodeID: 2, @@ -2831,18 +3198,18 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.1"), + Hostname: "user1", + User: types.User{Name: "user1"}, }, }, want: []*types.Node{ { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "router", - User: types.User{Name: "router"}, + ID: 2, + IPv4: iap("100.64.0.2"), + Hostname: "router", + User: types.User{Name: "router"}, Routes: types.Routes{ types.Route{ NodeID: 2, @@ -2883,23 +3250,23 @@ func TestSSHRules(t *testing.T) { node types.Node peers types.Nodes pol ACLPolicy - want []*tailcfg.SSHRule + want *tailcfg.SSHPolicy }{ { name: "peers-can-connect", node: types.Node{ - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.99.42")}, - UserID: 0, + Hostname: "testnodes", + IPv4: iap("100.64.99.42"), + UserID: 0, User: types.User{ Name: "user1", }, }, peers: types.Nodes{ &types.Node{ - Hostname: "testnodes2", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 0, + Hostname: "testnodes2", + IPv4: iap("100.64.0.1"), + UserID: 0, User: types.User{ Name: "user1", }, @@ -2946,7 +3313,7 @@ func TestSSHRules(t *testing.T) { }, }, }, - want: []*tailcfg.SSHRule{ + want: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{ { Principals: []*tailcfg.SSHPrincipal{ { @@ -2991,23 +3358,23 @@ func TestSSHRules(t *testing.T) { }, Action: &tailcfg.SSHAction{Accept: true, AllowLocalPortForwarding: true}, }, - }, + }}, }, { name: "peers-cannot-connect", node: types.Node{ - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 0, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 0, User: types.User{ Name: "user1", }, }, peers: types.Nodes{ &types.Node{ - Hostname: "testnodes2", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.99.42")}, - UserID: 0, + Hostname: "testnodes2", + IPv4: iap("100.64.99.42"), + UserID: 0, User: types.User{ Name: "user1", }, @@ -3042,13 +3409,13 @@ func TestSSHRules(t *testing.T) { }, }, }, - want: []*tailcfg.SSHRule{}, + want: &tailcfg.SSHPolicy{Rules: nil}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.pol.generateSSHRules(&tt.node, tt.peers) + got, err := tt.pol.CompileSSHPolicy(&tt.node, tt.peers) assert.NoError(t, err) if diff := cmp.Diff(tt.want, got); diff != "" { @@ -3132,10 +3499,10 @@ func TestValidExpandTagOwnersInSources(t *testing.T) { } node := &types.Node{ - ID: 0, - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 0, + ID: 0, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 0, User: types.User{ Name: "user1", }, @@ -3155,7 +3522,7 @@ func TestValidExpandTagOwnersInSources(t *testing.T) { }, } - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}) assert.NoError(t, err) want := []tailcfg.FilterRule{ @@ -3184,10 +3551,10 @@ func TestInvalidTagValidUser(t *testing.T) { } node := &types.Node{ - ID: 1, - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 1, + ID: 1, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3206,7 +3573,7 @@ func TestInvalidTagValidUser(t *testing.T) { }, } - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}) assert.NoError(t, err) want := []tailcfg.FilterRule{ @@ -3235,10 +3602,10 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) { } node := &types.Node{ - ID: 1, - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 1, + ID: 1, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3265,7 +3632,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) { // c.Assert(rules[0].DstPorts, check.HasLen, 1) // c.Assert(rules[0].DstPorts[0].IP, check.Equals, "100.64.0.1/32") - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}) assert.NoError(t, err) want := []tailcfg.FilterRule{ @@ -3296,10 +3663,10 @@ func TestValidTagInvalidUser(t *testing.T) { } node := &types.Node{ - ID: 1, - Hostname: "webserver", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 1, + ID: 1, + Hostname: "webserver", + IPv4: iap("100.64.0.1"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3313,10 +3680,10 @@ func TestValidTagInvalidUser(t *testing.T) { } nodes2 := &types.Node{ - ID: 2, - Hostname: "user", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - UserID: 1, + ID: 2, + Hostname: "user", + IPv4: iap("100.64.0.2"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3335,7 +3702,7 @@ func TestValidTagInvalidUser(t *testing.T) { }, } - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{nodes2}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{nodes2}) assert.NoError(t, err) want := []tailcfg.FilterRule{ diff --git a/hscontrol/policy/acls_types.go b/hscontrol/policy/acls_types.go index e9c44909..5b5d1838 100644 --- a/hscontrol/policy/acls_types.go +++ b/hscontrol/policy/acls_types.go @@ -6,26 +6,25 @@ import ( "strings" "github.com/tailscale/hujson" - "gopkg.in/yaml.v3" ) // ACLPolicy represents a Tailscale ACL Policy. type ACLPolicy struct { - Groups Groups `json:"groups" yaml:"groups"` - Hosts Hosts `json:"hosts" yaml:"hosts"` - TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"` - ACLs []ACL `json:"acls" yaml:"acls"` - Tests []ACLTest `json:"tests" yaml:"tests"` - AutoApprovers AutoApprovers `json:"autoApprovers" yaml:"autoApprovers"` - SSHs []SSH `json:"ssh" yaml:"ssh"` + Groups Groups `json:"groups"` + Hosts Hosts `json:"hosts"` + TagOwners TagOwners `json:"tagOwners"` + ACLs []ACL `json:"acls"` + Tests []ACLTest `json:"tests"` + AutoApprovers AutoApprovers `json:"autoApprovers"` + SSHs []SSH `json:"ssh"` } // ACL is a basic rule for the ACL Policy. type ACL struct { - Action string `json:"action" yaml:"action"` - Protocol string `json:"proto" yaml:"proto"` - Sources []string `json:"src" yaml:"src"` - Destinations []string `json:"dst" yaml:"dst"` + Action string `json:"action"` + Protocol string `json:"proto"` + Sources []string `json:"src"` + Destinations []string `json:"dst"` } // Groups references a series of alias in the ACL rules. @@ -37,27 +36,27 @@ type Hosts map[string]netip.Prefix // TagOwners specify what users (users?) are allow to use certain tags. type TagOwners map[string][]string -// ACLTest is not implemented, but should be use to check if a certain rule is allowed. +// ACLTest is not implemented, but should be used to check if a certain rule is allowed. type ACLTest struct { - Source string `json:"src" yaml:"src"` - Accept []string `json:"accept" yaml:"accept"` - Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"` + Source string `json:"src"` + Accept []string `json:"accept"` + Deny []string `json:"deny,omitempty"` } // AutoApprovers specify which users (users?), groups or tags have their advertised routes // or exit node status automatically enabled. type AutoApprovers struct { - Routes map[string][]string `json:"routes" yaml:"routes"` - ExitNode []string `json:"exitNode" yaml:"exitNode"` + Routes map[string][]string `json:"routes"` + ExitNode []string `json:"exitNode"` } // SSH controls who can ssh into which machines. type SSH struct { - Action string `json:"action" yaml:"action"` - Sources []string `json:"src" yaml:"src"` - Destinations []string `json:"dst" yaml:"dst"` - Users []string `json:"users" yaml:"users"` - CheckPeriod string `json:"checkPeriod,omitempty" yaml:"checkPeriod,omitempty"` + Action string `json:"action"` + Sources []string `json:"src"` + Destinations []string `json:"dst"` + Users []string `json:"users"` + CheckPeriod string `json:"checkPeriod,omitempty"` } // UnmarshalJSON allows to parse the Hosts directly into netip objects. @@ -89,27 +88,6 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error { return nil } -// UnmarshalYAML allows to parse the Hosts directly into netip objects. -func (hosts *Hosts) UnmarshalYAML(data []byte) error { - newHosts := Hosts{} - hostIPPrefixMap := make(map[string]string) - - err := yaml.Unmarshal(data, &hostIPPrefixMap) - if err != nil { - return err - } - for host, prefixStr := range hostIPPrefixMap { - prefix, err := netip.ParsePrefix(prefixStr) - if err != nil { - return err - } - newHosts[host] = prefix - } - *hosts = newHosts - - return nil -} - // IsZero is perhaps a bit naive here. func (pol ACLPolicy) IsZero() bool { if len(pol.Groups) == 0 && len(pol.Hosts) == 0 && len(pol.ACLs) == 0 { @@ -119,7 +97,7 @@ func (pol ACLPolicy) IsZero() bool { return false } -// Returns the list of autoApproving users, groups or tags for a given IPPrefix. +// GetRouteApprovers returns the list of autoApproving users, groups or tags for a given IPPrefix. func (autoApprovers *AutoApprovers) GetRouteApprovers( prefix netip.Prefix, ) ([]string, error) { @@ -127,7 +105,7 @@ func (autoApprovers *AutoApprovers) GetRouteApprovers( return autoApprovers.ExitNode, nil // 0.0.0.0/0, ::/0 or equivalent } - approverAliases := []string{} + approverAliases := make([]string, 0) for autoApprovedPrefix, autoApproverAliases := range autoApprovers.Routes { autoApprovedPrefix, err := netip.ParsePrefix(autoApprovedPrefix) diff --git a/hscontrol/poll.go b/hscontrol/poll.go index bf48cc05..82a5295f 100644 --- a/hscontrol/poll.go +++ b/hscontrol/poll.go @@ -3,78 +3,154 @@ package hscontrol import ( "context" "fmt" + "math/rand/v2" "net/http" + "slices" + "sort" "strings" "time" "github.com/juanfont/headscale/hscontrol/db" "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" + "github.com/sasha-s/go-deadlock" xslices "golang.org/x/exp/slices" "gorm.io/gorm" - "tailscale.com/envknob" "tailscale.com/tailcfg" ) const ( - keepAliveInterval = 60 * time.Second + keepAliveInterval = 50 * time.Second ) type contextKey string const nodeNameContextKey = contextKey("nodeName") -type UpdateNode func() +type mapSession struct { + h *Headscale + req tailcfg.MapRequest + ctx context.Context + capVer tailcfg.CapabilityVersion + mapper *mapper.Mapper -func logPollFunc( - mapRequest tailcfg.MapRequest, - node *types.Node, -) (func(string), func(string), func(error, string)) { - return func(msg string) { - log.Trace(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Msg(msg) - }, - func(msg string) { - log.Warn(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Msg(msg) - }, - func(err error, msg string) { - log.Error(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Err(err). - Msg(msg) - } + cancelChMu deadlock.Mutex + + ch chan types.StateUpdate + cancelCh chan struct{} + cancelChOpen bool + + keepAlive time.Duration + keepAliveTicker *time.Ticker + + node *types.Node + w http.ResponseWriter + + warnf func(string, ...any) + infof func(string, ...any) + tracef func(string, ...any) + errf func(error, string, ...any) } -// handlePoll ensures the node gets the appropriate updates from either -// polling or immediate responses. -// -//nolint:gocyclo -func (h *Headscale) handlePoll( - writer http.ResponseWriter, +func (h *Headscale) newMapSession( ctx context.Context, + req tailcfg.MapRequest, + w http.ResponseWriter, node *types.Node, - mapRequest tailcfg.MapRequest, -) { - logTrace, logWarn, logErr := logPollFunc(mapRequest, node) +) *mapSession { + warnf, infof, tracef, errf := logPollFunc(req, node) + + var updateChan chan types.StateUpdate + if req.Stream { + // Use a buffered channel in case a node is not fully ready + // to receive a message to make sure we dont block the entire + // notifier. + updateChan = make(chan types.StateUpdate, h.cfg.Tuning.NodeMapSessionBufferedChanSize) + updateChan <- types.StateUpdate{ + Type: types.StateFullUpdate, + } + } + + ka := keepAliveInterval + (time.Duration(rand.IntN(9000)) * time.Millisecond) + + return &mapSession{ + h: h, + ctx: ctx, + req: req, + w: w, + node: node, + capVer: req.Version, + mapper: h.mapper, + + ch: updateChan, + cancelCh: make(chan struct{}), + cancelChOpen: true, + + keepAlive: ka, + keepAliveTicker: nil, + + // Loggers + warnf: warnf, + infof: infof, + tracef: tracef, + errf: errf, + } +} + +func (m *mapSession) close() { + m.cancelChMu.Lock() + defer m.cancelChMu.Unlock() + + if !m.cancelChOpen { + mapResponseClosed.WithLabelValues("chanclosed").Inc() + return + } + + m.tracef("mapSession (%p) sending message on cancel chan", m) + select { + case m.cancelCh <- struct{}{}: + mapResponseClosed.WithLabelValues("sent").Inc() + m.tracef("mapSession (%p) sent message on cancel chan", m) + case <-time.After(30 * time.Second): + mapResponseClosed.WithLabelValues("timeout").Inc() + m.tracef("mapSession (%p) timed out sending close message", m) + } +} + +func (m *mapSession) isStreaming() bool { + return m.req.Stream && !m.req.ReadOnly +} + +func (m *mapSession) isEndpointUpdate() bool { + return !m.req.Stream && !m.req.ReadOnly && m.req.OmitPeers +} + +func (m *mapSession) isReadOnlyUpdate() bool { + return !m.req.Stream && m.req.OmitPeers && m.req.ReadOnly +} + +func (m *mapSession) resetKeepAlive() { + m.keepAliveTicker.Reset(m.keepAlive) +} + +func (m *mapSession) beforeServeLongPoll() { + if m.node.IsEphemeral() { + m.h.ephemeralGC.Cancel(m.node.ID) + } +} + +func (m *mapSession) afterServeLongPoll() { + if m.node.IsEphemeral() { + m.h.ephemeralGC.Schedule(m.node.ID, m.h.cfg.EphemeralNodeInactivityTimeout) + } +} + +// serve handles non-streaming requests. +func (m *mapSession) serve() { + // TODO(kradalby): A set todos to harden: + // - func to tell the stream to die, readonly -> false, !stream && omitpeers -> false, true // This is the mechanism where the node gives us information about its // current configuration. @@ -84,473 +160,259 @@ func (h *Headscale) handlePoll( // breaking existing long-polling (Stream == true) connections. // In this case, the server can omit the entire response; the client // only checks the HTTP response status code. + // + // This is what Tailscale calls a Lite update, the client ignores + // the response and just wants a 200. + // !req.stream && !req.ReadOnly && req.OmitPeers + // // TODO(kradalby): remove ReadOnly when we only support capVer 68+ - if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly { - log.Info(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Int("cap_ver", int(mapRequest.Version)). - Msg("Received update") + if m.isEndpointUpdate() { + m.handleEndpointUpdate() - change := node.PeerChangeFromMapRequest(mapRequest) + return + } - online := h.nodeNotifier.IsConnected(node.MachineKey) - change.Online = &online + // ReadOnly is whether the client just wants to fetch the + // MapResponse, without updating their Endpoints. The + // Endpoints field will be ignored and LastSeen will not be + // updated and peers will not be notified of changes. + // + // The intended use is for clients to discover the DERP map at + // start-up before their first real endpoint update. + if m.isReadOnlyUpdate() { + m.handleReadOnlyRequest() - node.ApplyPeerChange(&change) + return + } +} - hostInfoChange := node.Hostinfo.Equal(mapRequest.Hostinfo) +// serveLongPoll ensures the node gets the appropriate updates from either +// polling or immediate responses. +// +//nolint:gocyclo +func (m *mapSession) serveLongPoll() { + m.beforeServeLongPoll() - logTracePeerChange(node.Hostname, hostInfoChange, &change) + // Clean up the session when the client disconnects + defer func() { + m.cancelChMu.Lock() + m.cancelChOpen = false + close(m.cancelCh) + m.cancelChMu.Unlock() - // Check if the Hostinfo of the node has changed. - // If it has changed, check if there has been a change tod - // the routable IPs of the host and update update them in - // the database. Then send a Changed update - // (containing the whole node object) to peers to inform about - // the route change. - // If the hostinfo has changed, but not the routes, just update - // hostinfo and let the function continue. - if !hostInfoChange { - oldRoutes := node.Hostinfo.RoutableIPs - newRoutes := mapRequest.Hostinfo.RoutableIPs - - oldServicesCount := len(node.Hostinfo.Services) - newServicesCount := len(mapRequest.Hostinfo.Services) - - node.Hostinfo = mapRequest.Hostinfo - - sendUpdate := false - - // Route changes come as part of Hostinfo, which means that - // when an update comes, the Node Route logic need to run. - // This will require a "change" in comparison to a "patch", - // which is more costly. - if !xslices.Equal(oldRoutes, newRoutes) { - var err error - sendUpdate, err = h.db.SaveNodeRoutes(node) - if err != nil { - logErr(err, "Error processing node routes") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - - if h.ACLPolicy != nil { - // update routes with peer information - update, err := h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node) - if err != nil { - logErr(err, "Error running auto approved routes") - } - - if update != nil { - sendUpdate = true - } - } - } - - // Services is mostly useful for discovery and not critical, - // except for peerapi, which is how nodes talk to eachother. - // If peerapi was not part of the initial mapresponse, we - // need to make sure its sent out later as it is needed for - // Taildrop. - // TODO(kradalby): Length comparison is a bit naive, replace. - if oldServicesCount != newServicesCount { - sendUpdate = true - } - - if sendUpdate { - if err := h.db.DB.Save(node).Error; err != nil { - logErr(err, "Failed to persist/update node in the database") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - - // Send an update to all peers to propagate the new routes - // available. - stateUpdate := types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, - Message: "called from handlePoll -> update -> new hostinfo", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-peers-hostinfochange", node.Hostname) - h.nodeNotifier.NotifyWithIgnore( - ctx, - stateUpdate, - node.MachineKey.String()) - } - - // Send an update to the node itself with to ensure it - // has an updated packetfilter allowing the new route - // if it is defined in the ACL. - selfUpdate := types.StateUpdate{ - Type: types.StateSelfUpdate, - ChangeNodes: types.Nodes{node}, - } - if selfUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-self-hostinfochange", node.Hostname) - h.nodeNotifier.NotifyByMachineKey( - ctx, - selfUpdate, - node.MachineKey) - } - - return - } + // only update node status if the node channel was removed. + // in principal, it will be removed, but the client rapidly + // reconnects, the channel might be of another connection. + // In that case, it is not closed and the node is still online. + if m.h.nodeNotifier.RemoveNode(m.node.ID, m.ch) { + // Failover the node's routes if any. + m.h.updateNodeOnlineStatus(false, m.node) + m.pollFailoverRoutes("node closing connection", m.node) } - if err := h.db.DB.Save(node).Error; err != nil { - logErr(err, "Failed to persist/update node in the database") - http.Error(writer, "", http.StatusInternalServerError) + m.afterServeLongPoll() + m.infof("node has disconnected, mapSession: %p, chan: %p", m, m.ch) + }() + // From version 68, all streaming requests can be treated as read only. + // TODO: Remove when we drop support for 1.48 + if m.capVer < 68 { + // Error has been handled/written to client in the func + // return + err := m.handleSaveNode() + if err != nil { + mapResponseWriteUpdatesInStream.WithLabelValues("error").Inc() + + m.close() return } - - stateUpdate := types.StateUpdate{ - Type: types.StatePeerChangedPatch, - ChangePatches: []*tailcfg.PeerChange{&change}, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-peers-patch", node.Hostname) - h.nodeNotifier.NotifyWithIgnore( - ctx, - stateUpdate, - node.MachineKey.String()) - } - - writer.WriteHeader(http.StatusOK) - if f, ok := writer.(http.Flusher); ok { - f.Flush() - } - - return - } else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly { - // ReadOnly is whether the client just wants to fetch the - // MapResponse, without updating their Endpoints. The - // Endpoints field will be ignored and LastSeen will not be - // updated and peers will not be notified of changes. - // - // The intended use is for clients to discover the DERP map at - // start-up before their first real endpoint update. - } else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly { - h.handleLiteRequest(writer, node, mapRequest) - - return - } else if mapRequest.OmitPeers && mapRequest.Stream { - logErr(nil, "Ignoring request, don't know how to handle it") - - return - } - - change := node.PeerChangeFromMapRequest(mapRequest) - - // A stream is being set up, the node is Online - online := true - change.Online = &online - - node.ApplyPeerChange(&change) - - // Only save HostInfo if changed, update routes if changed - // TODO(kradalby): Remove when capver is over 68 - if !node.Hostinfo.Equal(mapRequest.Hostinfo) { - oldRoutes := node.Hostinfo.RoutableIPs - newRoutes := mapRequest.Hostinfo.RoutableIPs - - node.Hostinfo = mapRequest.Hostinfo - - if !xslices.Equal(oldRoutes, newRoutes) { - _, err := h.db.SaveNodeRoutes(node) - if err != nil { - logErr(err, "Error processing node routes") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - } - } - - if err := h.db.DB.Save(node).Error; err != nil { - logErr(err, "Failed to persist/update node in the database") - http.Error(writer, "", http.StatusInternalServerError) - - return + mapResponseWriteUpdatesInStream.WithLabelValues("ok").Inc() } // Set up the client stream - h.pollNetMapStreamWG.Add(1) - defer h.pollNetMapStreamWG.Done() + m.h.pollNetMapStreamWG.Add(1) + defer m.h.pollNetMapStreamWG.Done() - // Use a buffered channel in case a node is not fully ready - // to receive a message to make sure we dont block the entire - // notifier. - // 12 is arbitrarily chosen. - chanSize := 3 - if size, ok := envknob.LookupInt("HEADSCALE_TUNING_POLL_QUEUE_SIZE"); ok { - chanSize = size - } - updateChan := make(chan types.StateUpdate, chanSize) - defer closeChanWithLog(updateChan, node.Hostname, "updateChan") + m.pollFailoverRoutes("node connected", m.node) - // Register the node's update channel - h.nodeNotifier.AddNode(node.MachineKey, updateChan) - defer h.nodeNotifier.RemoveNode(node.MachineKey) + // Upgrade the writer to a ResponseController + rc := http.NewResponseController(m.w) - // When a node connects to control, list the peers it has at - // that given point, further updates are kept in memory in - // the Mapper, which lives for the duration of the polling - // session. - peers, err := h.db.ListPeers(node) - if err != nil { - logErr(err, "Failed to list peers when opening poller") - http.Error(writer, "", http.StatusInternalServerError) + // Longpolling will break if there is a write timeout, + // so it needs to be disabled. + rc.SetWriteDeadline(time.Time{}) - return - } - - isConnected := h.nodeNotifier.ConnectedMap() - for _, peer := range peers { - online := isConnected[peer.MachineKey] - peer.IsOnline = &online - } - - mapp := mapper.NewMapper( - node, - peers, - h.DERPMap, - h.cfg.BaseDomain, - h.cfg.DNSConfig, - h.cfg.LogTail.Enabled, - h.cfg.RandomizeClientPort, - ) - - // update ACLRules with peer informations (to update server tags if necessary) - if h.ACLPolicy != nil { - // update routes with peer information - // This state update is ignored as it will be sent - // as part of the whole node - // TODO(kradalby): figure out if that is actually correct - _, err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node) - if err != nil { - logErr(err, "Error running auto approved routes") - } - } - - logTrace("Sending initial map") - - mapResp, err := mapp.FullMapResponse(mapRequest, node, h.ACLPolicy) - if err != nil { - logErr(err, "Failed to create MapResponse") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - - // Send the client an update to make sure we send an initial mapresponse - _, err = writer.Write(mapResp) - if err != nil { - logErr(err, "Could not write the map response") - - return - } - - if flusher, ok := writer.(http.Flusher); ok { - flusher.Flush() - } else { - return - } - - stateUpdate := types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, - Message: "called from handlePoll -> new node added", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-newnode-peers", node.Hostname) - h.nodeNotifier.NotifyWithIgnore( - ctx, - stateUpdate, - node.MachineKey.String()) - } - - if len(node.Routes) > 0 { - go h.pollFailoverRoutes(logErr, "new node", node) - } - - keepAliveTicker := time.NewTicker(keepAliveInterval) - - ctx, cancel := context.WithCancel(context.WithValue(ctx, nodeNameContextKey, node.Hostname)) + ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname)) defer cancel() + m.keepAliveTicker = time.NewTicker(m.keepAlive) + + m.h.nodeNotifier.AddNode(m.node.ID, m.ch) + go m.h.updateNodeOnlineStatus(true, m.node) + + m.infof("node has connected, mapSession: %p, chan: %p", m, m.ch) + + // Loop through updates and continuously send them to the + // client. for { - logTrace("Waiting for update on stream channel") + // consume channels with update, keep alives or "batch" blocking signals select { - case <-keepAliveTicker.C: - data, err := mapp.KeepAliveResponse(mapRequest, node) - if err != nil { - logErr(err, "Error generating the keep alive msg") + case <-m.cancelCh: + m.tracef("poll cancelled received") + mapResponseEnded.WithLabelValues("cancelled").Inc() + return - return - } - _, err = writer.Write(data) - if err != nil { - logErr(err, "Cannot write keep alive message") - - return - } - if flusher, ok := writer.(http.Flusher); ok { - flusher.Flush() - } else { - log.Error().Msg("Failed to create http flusher") + case <-ctx.Done(): + m.tracef("poll context done") + mapResponseEnded.WithLabelValues("done").Inc() + return + // Consume updates sent to node + case update, ok := <-m.ch: + if !ok { + m.tracef("update channel closed, streaming session is likely being replaced") return } - // This goroutine is not ideal, but we have a potential issue here - // where it blocks too long and that holds up updates. - // One alternative is to split these different channels into - // goroutines, but then you might have a problem without a lock - // if a keepalive is written at the same time as an update. - go h.updateNodeOnlineStatus(true, node) + // If the node has been removed from headscale, close the stream + if slices.Contains(update.Removed, m.node.ID) { + m.tracef("node removed, closing stream") + return + } - case update := <-updateChan: - logTrace("Received update") - now := time.Now() + m.tracef("received stream update: %s %s", update.Type.String(), update.Message) + mapResponseUpdateReceived.WithLabelValues(update.Type.String()).Inc() var data []byte var err error + var lastMessage string // Ensure the node object is updated, for example, there // might have been a hostinfo update in a sidechannel // which contains data needed to generate a map response. - node, err = h.db.GetNodeByMachineKey(node.MachineKey) + m.node, err = m.h.db.GetNodeByID(m.node.ID) if err != nil { - logErr(err, "Could not get machine from db") + m.errf(err, "Could not get machine from db") return } - startMapResp := time.Now() + updateType := "full" switch update.Type { case types.StateFullUpdate: - logTrace("Sending Full MapResponse") - - data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy) + m.tracef("Sending Full MapResponse") + data, err = m.mapper.FullMapResponse(m.req, m.node, m.h.ACLPolicy, fmt.Sprintf("from mapSession: %p, stream: %t", m, m.isStreaming())) case types.StatePeerChanged: - logTrace(fmt.Sprintf("Sending Changed MapResponse: %s", update.Message)) + changed := make(map[types.NodeID]bool, len(update.ChangeNodes)) - isConnectedMap := h.nodeNotifier.ConnectedMap() - for _, node := range update.ChangeNodes { - // If a node is not reported to be online, it might be - // because the value is outdated, check with the notifier. - // However, if it is set to Online, and not in the notifier, - // this might be because it has announced itself, but not - // reached the stage to actually create the notifier channel. - if node.IsOnline != nil && !*node.IsOnline { - isOnline := isConnectedMap[node.MachineKey] - node.IsOnline = &isOnline - } + for _, nodeID := range update.ChangeNodes { + changed[nodeID] = true } - data, err = mapp.PeerChangedResponse(mapRequest, node, update.ChangeNodes, h.ACLPolicy, update.Message) + lastMessage = update.Message + m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage)) + data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, update.ChangePatches, m.h.ACLPolicy, lastMessage) + updateType = "change" + case types.StatePeerChangedPatch: - logTrace("Sending PeerChangedPatch MapResponse") - data, err = mapp.PeerChangedPatchResponse(mapRequest, node, update.ChangePatches, h.ACLPolicy) + m.tracef(fmt.Sprintf("Sending Changed Patch MapResponse: %v", lastMessage)) + data, err = m.mapper.PeerChangedPatchResponse(m.req, m.node, update.ChangePatches, m.h.ACLPolicy) + updateType = "patch" case types.StatePeerRemoved: - logTrace("Sending PeerRemoved MapResponse") - data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed) - case types.StateSelfUpdate: - if len(update.ChangeNodes) == 1 { - logTrace("Sending SelfUpdate MapResponse") - node = update.ChangeNodes[0] - data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy, types.SelfUpdateIdentifier) - } else { - logWarn("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.") + changed := make(map[types.NodeID]bool, len(update.Removed)) + + for _, nodeID := range update.Removed { + changed[nodeID] = false } + m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage)) + data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, update.ChangePatches, m.h.ACLPolicy, lastMessage) + updateType = "remove" + case types.StateSelfUpdate: + lastMessage = update.Message + m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage)) + // create the map so an empty (self) update is sent + data, err = m.mapper.PeerChangedResponse(m.req, m.node, make(map[types.NodeID]bool), update.ChangePatches, m.h.ACLPolicy, lastMessage) + updateType = "remove" case types.StateDERPUpdated: - logTrace("Sending DERPUpdate MapResponse") - data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap) + m.tracef("Sending DERPUpdate MapResponse") + data, err = m.mapper.DERPMapResponse(m.req, m.node, m.h.DERPMap) + updateType = "derp" } if err != nil { - logErr(err, "Could not get the create map update") + m.errf(err, "Could not get the create map update") return } - log.Trace().Str("node", node.Hostname).TimeDiff("timeSpent", time.Now(), startMapResp).Str("mkey", node.MachineKey.String()).Int("type", int(update.Type)).Msg("finished making map response") - // Only send update if there is change if data != nil { startWrite := time.Now() - _, err = writer.Write(data) + _, err = m.w.Write(data) if err != nil { - logErr(err, "Could not write the map response") - - updateRequestsSentToNode.WithLabelValues(node.User.Name, node.Hostname, "failed"). - Inc() - + mapResponseSent.WithLabelValues("error", updateType).Inc() + m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m) return } - if flusher, ok := writer.(http.Flusher); ok { - flusher.Flush() - } else { - log.Error().Msg("Failed to create http flusher") - + err = rc.Flush() + if err != nil { + mapResponseSent.WithLabelValues("error", updateType).Inc() + m.errf(err, "flushing the map response to client, for mapSession: %p", m) return } - log.Trace().Str("node", node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", node.MachineKey.String()).Int("type", int(update.Type)).Msg("finished writing mapresp to node") - log.Info(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("machine_key", node.MachineKey.ShortString()). - Str("node", node.Hostname). - TimeDiff("timeSpent", time.Now(), now). - Msg("update sent") + log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node") + + if debugHighCardinalityMetrics { + mapResponseLastSentSeconds.WithLabelValues(updateType, m.node.ID.String()).Set(float64(time.Now().Unix())) + } + mapResponseSent.WithLabelValues("ok", updateType).Inc() + m.tracef("update sent") + m.resetKeepAlive() } - case <-ctx.Done(): - logTrace("The client has closed the connection") + case <-m.keepAliveTicker.C: + data, err := m.mapper.KeepAliveResponse(m.req, m.node) + if err != nil { + m.errf(err, "Error generating the keep alive msg") + mapResponseSent.WithLabelValues("error", "keepalive").Inc() + return + } + _, err = m.w.Write(data) + if err != nil { + m.errf(err, "Cannot write keep alive message") + mapResponseSent.WithLabelValues("error", "keepalive").Inc() + return + } + err = rc.Flush() + if err != nil { + m.errf(err, "flushing keep alive to client, for mapSession: %p", m) + mapResponseSent.WithLabelValues("error", "keepalive").Inc() + return + } - go h.updateNodeOnlineStatus(false, node) - - // Failover the node's routes if any. - go h.pollFailoverRoutes(logErr, "node closing connection", node) - - // The connection has been closed, so we can stop polling. - return - - case <-h.shutdownChan: - logTrace("The long-poll handler is shutting down") - - return + if debugHighCardinalityMetrics { + mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix())) + } + mapResponseSent.WithLabelValues("ok", "keepalive").Inc() } } } -func (h *Headscale) pollFailoverRoutes(logErr func(error, string), where string, node *types.Node) { - update, err := db.Write(h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { - return db.EnsureFailoverRouteIsAvailable(tx, h.nodeNotifier.ConnectedMap(), node) +func (m *mapSession) pollFailoverRoutes(where string, node *types.Node) { + update, err := db.Write(m.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { + return db.FailoverNodeRoutesIfNeccessary(tx, m.h.nodeNotifier.LikelyConnectedMap(), node) }) if err != nil { - logErr(err, fmt.Sprintf("failed to ensure failover routes, %s", where)) + m.errf(err, fmt.Sprintf("failed to ensure failover routes, %s", where)) return } - if update != nil && !update.Empty() && update.Valid() { + if update != nil && !update.Empty() { ctx := types.NotifyCtx(context.Background(), fmt.Sprintf("poll-%s-routes-ensurefailover", strings.ReplaceAll(where, " ", "-")), node.Hostname) - h.nodeNotifier.NotifyWithIgnore(ctx, *update, node.MachineKey.String()) + m.h.nodeNotifier.NotifyWithIgnore(ctx, *update, node.ID) } } @@ -558,82 +420,229 @@ func (h *Headscale) pollFailoverRoutes(logErr func(error, string), where string, // about change in their online/offline status. // It takes a StateUpdateType of either StatePeerOnlineChanged or StatePeerOfflineChanged. func (h *Headscale) updateNodeOnlineStatus(online bool, node *types.Node) { - now := time.Now() + change := &tailcfg.PeerChange{ + NodeID: tailcfg.NodeID(node.ID), + Online: &online, + } - node.LastSeen = &now + if !online { + now := time.Now() - statusUpdate := types.StateUpdate{ + // lastSeen is only relevant if the node is disconnected. + node.LastSeen = &now + change.LastSeen = &now + + err := h.db.Write(func(tx *gorm.DB) error { + return db.SetLastSeen(tx, node.ID, *node.LastSeen) + }) + if err != nil { + log.Error().Err(err).Msg("Cannot update node LastSeen") + + return + } + } + + ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-onlinestatus", node.Hostname) + h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{ Type: types.StatePeerChangedPatch, ChangePatches: []*tailcfg.PeerChange{ - { - NodeID: tailcfg.NodeID(node.ID), - Online: &online, - LastSeen: &now, - }, + change, }, - } - if statusUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-onlinestatus", node.Hostname) - h.nodeNotifier.NotifyWithIgnore(ctx, statusUpdate, node.MachineKey.String()) - } + }, node.ID) +} - err := h.db.DB.Transaction(func(tx *gorm.DB) error { - return db.UpdateLastSeen(tx, node.ID, *node.LastSeen) - }) - if err != nil { - log.Error().Err(err).Msg("Cannot update node LastSeen") +func (m *mapSession) handleEndpointUpdate() { + m.tracef("received endpoint update") + change := m.node.PeerChangeFromMapRequest(m.req) + + online := m.h.nodeNotifier.IsLikelyConnected(m.node.ID) + change.Online = &online + + m.node.ApplyPeerChange(&change) + + sendUpdate, routesChanged := hostInfoChanged(m.node.Hostinfo, m.req.Hostinfo) + + // The node might not set NetInfo if it has not changed and if + // the full HostInfo object is overrwritten, the information is lost. + // If there is no NetInfo, keep the previous one. + // From 1.66 the client only sends it if changed: + // https://github.com/tailscale/tailscale/commit/e1011f138737286ecf5123ff887a7a5800d129a2 + // TODO(kradalby): evaulate if we need better comparing of hostinfo + // before we take the changes. + if m.req.Hostinfo.NetInfo == nil { + m.req.Hostinfo.NetInfo = m.node.Hostinfo.NetInfo + } + m.node.Hostinfo = m.req.Hostinfo + + logTracePeerChange(m.node.Hostname, sendUpdate, &change) + + // If there is no changes and nothing to save, + // return early. + if peerChangeEmpty(change) && !sendUpdate { + mapResponseEndpointUpdates.WithLabelValues("noop").Inc() return } -} -func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) { - log.Trace(). - Str("handler", "PollNetMap"). - Str("node", node). - Str("channel", "Done"). - Msg(fmt.Sprintf("Closing %s channel", name)) + // Check if the Hostinfo of the node has changed. + // If it has changed, check if there has been a change to + // the routable IPs of the host and update update them in + // the database. Then send a Changed update + // (containing the whole node object) to peers to inform about + // the route change. + // If the hostinfo has changed, but not the routes, just update + // hostinfo and let the function continue. + if routesChanged { + var err error + _, err = m.h.db.SaveNodeRoutes(m.node) + if err != nil { + m.errf(err, "Error processing node routes") + http.Error(m.w, "", http.StatusInternalServerError) + mapResponseEndpointUpdates.WithLabelValues("error").Inc() - close(channel) -} + return + } -func (h *Headscale) handleLiteRequest( - writer http.ResponseWriter, - node *types.Node, - mapRequest tailcfg.MapRequest, -) { - logTrace, _, logErr := logPollFunc(mapRequest, node) + if m.h.ACLPolicy != nil { + // update routes with peer information + err := m.h.db.EnableAutoApprovedRoutes(m.h.ACLPolicy, m.node) + if err != nil { + m.errf(err, "Error running auto approved routes") + mapResponseEndpointUpdates.WithLabelValues("error").Inc() + } + } - mapp := mapper.NewMapper( - node, - types.Nodes{}, - h.DERPMap, - h.cfg.BaseDomain, - h.cfg.DNSConfig, - h.cfg.LogTail.Enabled, - h.cfg.RandomizeClientPort, - ) + // Send an update to the node itself with to ensure it + // has an updated packetfilter allowing the new route + // if it is defined in the ACL. + ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-self-hostinfochange", m.node.Hostname) + m.h.nodeNotifier.NotifyByNodeID( + ctx, + types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: []types.NodeID{m.node.ID}, + }, + m.node.ID) + } - logTrace("Client asked for a lite update, responding without peers") - - mapResp, err := mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy) - if err != nil { - logErr(err, "Failed to create MapResponse") - http.Error(writer, "", http.StatusInternalServerError) + if err := m.h.db.DB.Save(m.node).Error; err != nil { + m.errf(err, "Failed to persist/update node in the database") + http.Error(m.w, "", http.StatusInternalServerError) + mapResponseEndpointUpdates.WithLabelValues("error").Inc() return } - writer.Header().Set("Content-Type", "application/json; charset=utf-8") - writer.WriteHeader(http.StatusOK) - _, err = writer.Write(mapResp) - if err != nil { - logErr(err, "Failed to write response") + ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-peers-patch", m.node.Hostname) + m.h.nodeNotifier.NotifyWithIgnore( + ctx, + types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{m.node.ID}, + Message: "called from handlePoll -> update", + }, + m.node.ID) + + m.w.WriteHeader(http.StatusOK) + mapResponseEndpointUpdates.WithLabelValues("ok").Inc() + + return +} + +// handleSaveNode saves node updates in the maprequest _streaming_ +// path and is mostly the same code as in handleEndpointUpdate. +// It is not attempted to be deduplicated since it will go away +// when we stop supporting older than 68 which removes updates +// when the node is streaming. +func (m *mapSession) handleSaveNode() error { + m.tracef("saving node update from stream session") + + change := m.node.PeerChangeFromMapRequest(m.req) + + // A stream is being set up, the node is Online + online := true + change.Online = &online + + m.node.ApplyPeerChange(&change) + + sendUpdate, routesChanged := hostInfoChanged(m.node.Hostinfo, m.req.Hostinfo) + m.node.Hostinfo = m.req.Hostinfo + + // If there is no changes and nothing to save, + // return early. + if peerChangeEmpty(change) || !sendUpdate { + return nil } + + // Check if the Hostinfo of the node has changed. + // If it has changed, check if there has been a change to + // the routable IPs of the host and update update them in + // the database. Then send a Changed update + // (containing the whole node object) to peers to inform about + // the route change. + // If the hostinfo has changed, but not the routes, just update + // hostinfo and let the function continue. + if routesChanged { + var err error + _, err = m.h.db.SaveNodeRoutes(m.node) + if err != nil { + return err + } + + if m.h.ACLPolicy != nil { + // update routes with peer information + err := m.h.db.EnableAutoApprovedRoutes(m.h.ACLPolicy, m.node) + if err != nil { + return err + } + } + } + + if err := m.h.db.DB.Save(m.node).Error; err != nil { + return err + } + + ctx := types.NotifyCtx(context.Background(), "pre-68-update-while-stream", m.node.Hostname) + m.h.nodeNotifier.NotifyWithIgnore( + ctx, + types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{m.node.ID}, + Message: "called from handlePoll -> pre-68-update-while-stream", + }, + m.node.ID) + + return nil +} + +func (m *mapSession) handleReadOnlyRequest() { + m.tracef("Client asked for a lite update, responding without peers") + + mapResp, err := m.mapper.ReadOnlyMapResponse(m.req, m.node, m.h.ACLPolicy) + if err != nil { + m.errf(err, "Failed to create MapResponse") + http.Error(m.w, "", http.StatusInternalServerError) + mapResponseReadOnly.WithLabelValues("error").Inc() + return + } + + m.w.Header().Set("Content-Type", "application/json; charset=utf-8") + m.w.WriteHeader(http.StatusOK) + _, err = m.w.Write(mapResp) + if err != nil { + m.errf(err, "Failed to write response") + mapResponseReadOnly.WithLabelValues("error").Inc() + return + } + + m.w.WriteHeader(http.StatusOK) + mapResponseReadOnly.WithLabelValues("ok").Inc() + + return } func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.PeerChange) { - trace := log.Trace().Str("node_id", change.NodeID.String()).Str("hostname", hostname) + trace := log.Trace().Uint64("node.id", uint64(change.NodeID)).Str("hostname", hostname) if change.Key != nil { trace = trace.Str("node_key", change.Key.ShortString()) @@ -666,3 +675,98 @@ func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.Pe trace.Time("last_seen", *change.LastSeen).Msg("PeerChange received") } + +func peerChangeEmpty(chng tailcfg.PeerChange) bool { + return chng.Key == nil && + chng.DiscoKey == nil && + chng.Online == nil && + chng.Endpoints == nil && + chng.DERPRegion == 0 && + chng.LastSeen == nil && + chng.KeyExpiry == nil +} + +func logPollFunc( + mapRequest tailcfg.MapRequest, + node *types.Node, +) (func(string, ...any), func(string, ...any), func(string, ...any), func(error, string, ...any)) { + return func(msg string, a ...any) { + log.Warn(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Msgf(msg, a...) + }, + func(msg string, a ...any) { + log.Info(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Msgf(msg, a...) + }, + func(msg string, a ...any) { + log.Trace(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Msgf(msg, a...) + }, + func(err error, msg string, a ...any) { + log.Error(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Err(err). + Msgf(msg, a...) + } +} + +// hostInfoChanged reports if hostInfo has changed in two ways, +// - first bool reports if an update needs to be sent to nodes +// - second reports if there has been changes to routes +// the caller can then use this info to save and update nodes +// and routes as needed. +func hostInfoChanged(old, new *tailcfg.Hostinfo) (bool, bool) { + if old.Equal(new) { + return false, false + } + + // Routes + oldRoutes := old.RoutableIPs + newRoutes := new.RoutableIPs + + sort.Slice(oldRoutes, func(i, j int) bool { + return util.ComparePrefix(oldRoutes[i], oldRoutes[j]) > 0 + }) + sort.Slice(newRoutes, func(i, j int) bool { + return util.ComparePrefix(newRoutes[i], newRoutes[j]) > 0 + }) + + if !xslices.Equal(oldRoutes, newRoutes) { + return true, true + } + + // Services is mostly useful for discovery and not critical, + // except for peerapi, which is how nodes talk to eachother. + // If peerapi was not part of the initial mapresponse, we + // need to make sure its sent out later as it is needed for + // Taildrop. + // TODO(kradalby): Length comparison is a bit naive, replace. + if len(old.Services) != len(new.Services) { + return true, false + } + + return false, false +} diff --git a/hscontrol/poll_noise.go b/hscontrol/poll_noise.go deleted file mode 100644 index 53b1d47e..00000000 --- a/hscontrol/poll_noise.go +++ /dev/null @@ -1,96 +0,0 @@ -package hscontrol - -import ( - "encoding/json" - "errors" - "io" - "net/http" - - "github.com/rs/zerolog/log" - "gorm.io/gorm" - "tailscale.com/tailcfg" - "tailscale.com/types/key" -) - -const ( - MinimumCapVersion tailcfg.CapabilityVersion = 58 -) - -// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol -// -// This is the busiest endpoint, as it keeps the HTTP long poll that updates -// the clients when something in the network changes. -// -// The clients POST stuff like HostInfo and their Endpoints here, but -// only after their first request (marked with the ReadOnly field). -// -// At this moment the updates are sent in a quite horrendous way, but they kinda work. -func (ns *noiseServer) NoisePollNetMapHandler( - writer http.ResponseWriter, - req *http.Request, -) { - log.Trace(). - Str("handler", "NoisePollNetMap"). - Msg("PollNetMapHandler called") - - log.Trace(). - Any("headers", req.Header). - Caller(). - Msg("Headers") - - body, _ := io.ReadAll(req.Body) - - mapRequest := tailcfg.MapRequest{} - if err := json.Unmarshal(body, &mapRequest); err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Cannot parse MapRequest") - http.Error(writer, "Internal error", http.StatusInternalServerError) - - return - } - - // Reject unsupported versions - if mapRequest.Version < MinimumCapVersion { - log.Info(). - Caller(). - Int("min_version", int(MinimumCapVersion)). - Int("client_version", int(mapRequest.Version)). - Msg("unsupported client connected") - http.Error(writer, "Internal error", http.StatusBadRequest) - - return - } - - ns.nodeKey = mapRequest.NodeKey - - node, err := ns.headscale.db.GetNodeByAnyKey( - ns.conn.Peer(), - mapRequest.NodeKey, - key.NodePublic{}, - ) - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - log.Warn(). - Str("handler", "NoisePollNetMap"). - Msgf("Ignoring request, cannot find node with key %s", mapRequest.NodeKey.String()) - http.Error(writer, "Internal error", http.StatusNotFound) - - return - } - log.Error(). - Str("handler", "NoisePollNetMap"). - Msgf("Failed to fetch node from the database with node key: %s", mapRequest.NodeKey.String()) - http.Error(writer, "Internal error", http.StatusInternalServerError) - - return - } - log.Debug(). - Str("handler", "NoisePollNetMap"). - Str("node", node.Hostname). - Int("cap_ver", int(mapRequest.Version)). - Msg("A node sending a MapRequest with Noise protocol") - - ns.headscale.handlePoll(writer, req.Context(), node, mapRequest) -} diff --git a/hscontrol/templates/apple.html b/hscontrol/templates/apple.html index 4064dced..9582594a 100644 --- a/hscontrol/templates/apple.html +++ b/hscontrol/templates/apple.html @@ -25,17 +25,48 @@ +

headscale: iOS configuration

+

GUI

+
    +
  1. + Install the official Tailscale iOS client from the + App store +
  2. +
  3. + Open Tailscale and make sure you are not logged in to any account +
  4. +
  5. Open Settings on the iOS device
  6. +
  7. + Scroll down to the "third party apps" section, under "Game Center" or + "TV Provider" +
  8. +
  9. + Find Tailscale and select it + +
  10. +
  11. Enter "{{.URL}}" under "Alternate Coordination Server URL"
  12. +
  13. + Restart the app by closing it from the iOS app switcher, open the app + and select the regular sign in option (non-SSO). It should open + up to the headscale authentication page. +
  14. +
  15. + Enter your credentials and log in. Headscale should now be working on + your iOS device +
  16. +

headscale: macOS configuration

-

Recent Tailscale versions (1.34.0 and higher)

-

- Tailscale added Fast User Switching in version 1.34 and you can now use - the new login command to connect to one or more headscale (and Tailscale) - servers. The previously used profiles does not have an effect anymore. -

-

Command line

+

Command line

Use Tailscale's login command to add your profile:

tailscale login --login-server {{.URL}}
-

GUI

+

GUI

  1. ALT + Click the Tailscale icon in the menu and hover over the Debug menu @@ -46,44 +77,7 @@
  2. Follow the login procedure in the browser
-

Apple configuration profiles (1.32.0 and lower)

-

- This page provides - configuration profiles - for the official Tailscale clients for -

- -

- The profiles will configure Tailscale.app to use {{.URL}} as - its control server. -

-

Caution

-

- You should always download and inspect the profile before installing it: -

-

Profiles

-

macOS

Headscale can be set to the default server by installing a Headscale configuration profile: @@ -121,50 +115,17 @@

Restart Tailscale.app and log in.

-

headscale: iOS configuration

-

Recent Tailscale versions (1.38.1 and higher)

+

Caution

- Tailscale 1.38.1 on - iOS - added a configuration option to allow user to set an "Alternate - Coordination server". This can be used to connect to your headscale - server. + You should always download and inspect the profile before installing it:

-

GUI

-
    +
+ diff --git a/hscontrol/templates/windows.html b/hscontrol/templates/windows.html index c590494f..34aaa0ae 100644 --- a/hscontrol/templates/windows.html +++ b/hscontrol/templates/windows.html @@ -25,75 +25,21 @@

headscale: Windows configuration

-

Recent Tailscale versions (1.34.0 and higher)

- Tailscale added Fast User Switching in version 1.34 and you can now use - the new login command to connect to one or more headscale (and Tailscale) - servers. The previously used profiles does not have an effect anymore. -

-

Use Tailscale's login command to add your profile:

-
tailscale login --login-server {{.URL}}
- -

Windows registry configuration (1.32.0 and lower)

-

- This page provides Windows registry information for the official Windows - Tailscale client. -

- -

-

- The registry file will configure Tailscale to use {{.URL}} as - its control server. -

- -

-

Caution

-

- You should always download and inspect the registry file before installing - it: -

-
curl {{.URL}}/windows/tailscale.reg
- -

Installation

-

- Headscale can be set to the default server by running the registry file: -

- -

- Windows registry fileTailscale for Windows + and install it.

-
    -
  1. Download the registry file, then run it
  2. -
  3. Follow the prompts
  4. -
  5. Install and run the official windows Tailscale client
  6. -
  7. - When the installation has finished, start Tailscale, and log in by - clicking the icon in the system tray -
  8. -
-

Or using REG:

- Open command prompt with Administrator rights. Issue the following - commands to add the required registry entries: + Open a Command Prompt or Powershell and use Tailscale's login command to + connect with headscale:

-
-    REG ADD "HKLM\Software\Tailscale IPN" /v UnattendedMode /t REG_SZ /d always
-      REG ADD "HKLM\Software\Tailscale IPN" /v LoginURL /t REG_SZ /d "{{.URL}}"
-  
-

Or using Powershell

-

- Open Powershell with Administrator rights. Issue the following commands to - add the required registry entries: -

-
-    New-ItemProperty -Path 'HKLM:\Software\Tailscale IPN' -Name UnattendedMode -PropertyType String -Value always
-      New-ItemProperty -Path 'HKLM:\Software\Tailscale IPN' -Name LoginURL -PropertyType String -Value "{{.URL}}"
-  
-

Finally, restart Tailscale and log in.

- -

+
tailscale login --login-server {{.URL}}
diff --git a/hscontrol/types/common.go b/hscontrol/types/common.go index ceeceea0..35f5e5e4 100644 --- a/hscontrol/types/common.go +++ b/hscontrol/types/common.go @@ -10,6 +10,7 @@ import ( "time" "tailscale.com/tailcfg" + "tailscale.com/util/ctxkey" ) const ( @@ -90,6 +91,25 @@ func (i StringList) Value() (driver.Value, error) { type StateUpdateType int +func (su StateUpdateType) String() string { + switch su { + case StateFullUpdate: + return "StateFullUpdate" + case StatePeerChanged: + return "StatePeerChanged" + case StatePeerChangedPatch: + return "StatePeerChangedPatch" + case StatePeerRemoved: + return "StatePeerRemoved" + case StateSelfUpdate: + return "StateSelfUpdate" + case StateDERPUpdated: + return "StateDERPUpdated" + } + + return "unknown state update type" +} + const ( StateFullUpdate StateUpdateType = iota // StatePeerChanged is used for updates that needs @@ -118,7 +138,7 @@ type StateUpdate struct { // ChangeNodes must be set when Type is StatePeerAdded // and StatePeerChanged and contains the full node // object for added nodes. - ChangeNodes Nodes + ChangeNodes []NodeID // ChangePatches must be set when Type is StatePeerChangedPatch // and contains a populated PeerChange object. @@ -127,7 +147,7 @@ type StateUpdate struct { // Removed must be set when Type is StatePeerRemoved and // contain a list of the nodes that has been removed from // the network. - Removed []tailcfg.NodeID + Removed []NodeID // DERPMap must be set when Type is StateDERPUpdated and // contain the new DERP Map. @@ -138,39 +158,6 @@ type StateUpdate struct { Message string } -// Valid reports if a StateUpdate is correctly filled and -// panics if the mandatory fields for a type is not -// filled. -// Reports true if valid. -func (su *StateUpdate) Valid() bool { - switch su.Type { - case StatePeerChanged: - if su.ChangeNodes == nil { - panic("Mandatory field ChangeNodes is not set on StatePeerChanged update") - } - case StatePeerChangedPatch: - if su.ChangePatches == nil { - panic("Mandatory field ChangePatches is not set on StatePeerChangedPatch update") - } - case StatePeerRemoved: - if su.Removed == nil { - panic("Mandatory field Removed is not set on StatePeerRemove update") - } - case StateSelfUpdate: - if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 { - panic( - "Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node", - ) - } - case StateDERPUpdated: - if su.DERPMap == nil { - panic("Mandatory field DERPMap is not set on StateDERPUpdated update") - } - } - - return true -} - // Empty reports if there are any updates in the StateUpdate. func (su *StateUpdate) Empty() bool { switch su.Type { @@ -185,22 +172,26 @@ func (su *StateUpdate) Empty() bool { return false } -func StateUpdateExpire(nodeID uint64, expiry time.Time) StateUpdate { +func StateUpdateExpire(nodeID NodeID, expiry time.Time) StateUpdate { return StateUpdate{ Type: StatePeerChangedPatch, ChangePatches: []*tailcfg.PeerChange{ { - NodeID: tailcfg.NodeID(nodeID), + NodeID: nodeID.NodeID(), KeyExpiry: &expiry, }, }, } } +var ( + NotifyOriginKey = ctxkey.New("notify.origin", "") + NotifyHostnameKey = ctxkey.New("notify.hostname", "") +) + func NotifyCtx(ctx context.Context, origin, hostname string) context.Context { - ctx2, _ := context.WithTimeout( - context.WithValue(context.WithValue(ctx, "hostname", hostname), "origin", origin), - 3*time.Second, - ) + ctx2, _ := context.WithTimeout(ctx, 3*time.Second) + ctx2 = NotifyOriginKey.WithValue(ctx2, origin) + ctx2 = NotifyHostnameKey.WithValue(ctx2, hostname) return ctx2 } diff --git a/hscontrol/types/config.go b/hscontrol/types/config.go index 022d1279..50ce2f07 100644 --- a/hscontrol/types/config.go +++ b/hscontrol/types/config.go @@ -20,6 +20,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" + "tailscale.com/util/set" ) const ( @@ -31,6 +32,20 @@ var errOidcMutuallyExclusive = errors.New( "oidc_client_secret and oidc_client_secret_path are mutually exclusive", ) +type IPAllocationStrategy string + +const ( + IPAllocationStrategySequential IPAllocationStrategy = "sequential" + IPAllocationStrategyRandom IPAllocationStrategy = "random" +) + +type PolicyMode string + +const ( + PolicyModeDB = "database" + PolicyModeFile = "file" +) + // Config contains the initial Headscale configuration. type Config struct { ServerURL string @@ -39,9 +54,9 @@ type Config struct { GRPCAddr string GRPCAllowInsecure bool EphemeralNodeInactivityTimeout time.Duration - NodeUpdateCheckInterval time.Duration PrefixV4 *netip.Prefix PrefixV6 *netip.Prefix + IPAllocation IPAllocationStrategy NoisePrivateKeyPath string BaseDomain string Log LogConfig @@ -56,7 +71,8 @@ type Config struct { ACMEURL string ACMEEmail string - DNSConfig *tailcfg.DNSConfig + DNSConfig *tailcfg.DNSConfig + DNSUserNameInMagicDNS bool UnixSocket string UnixSocketPermission fs.FileMode @@ -68,11 +84,28 @@ type Config struct { CLI CLIConfig - ACL ACLConfig + Policy PolicyConfig + + Tuning Tuning +} + +type DNSConfig struct { + MagicDNS bool `mapstructure:"magic_dns"` + BaseDomain string `mapstructure:"base_domain"` + Nameservers Nameservers + SearchDomains []string `mapstructure:"search_domains"` + ExtraRecords []tailcfg.DNSRecord `mapstructure:"extra_records"` + UserNameInMagicDNS bool `mapstructure:"use_username_in_magic_dns"` +} + +type Nameservers struct { + Global []string + Split map[string][]string } type SqliteConfig struct { - Path string + Path string + WriteAheadLog bool } type PostgresConfig struct { @@ -87,11 +120,22 @@ type PostgresConfig struct { ConnMaxIdleTimeSecs int } +type GormConfig struct { + Debug bool + SlowThreshold time.Duration + SkipErrRecordNotFound bool + ParameterizedQueries bool + PrepareStmt bool +} + type DatabaseConfig struct { // Type sets the database type, either "sqlite3" or "postgres" Type string Debug bool + // Type sets the gorm configuration + Gorm GormConfig + Sqlite SqliteConfig Postgres PostgresConfig } @@ -152,8 +196,9 @@ type CLIConfig struct { Insecure bool } -type ACLConfig struct { - PolicyPath string +type PolicyConfig struct { + Path string + Mode PolicyMode } type LogConfig struct { @@ -161,6 +206,18 @@ type LogConfig struct { Level zerolog.Level } +type Tuning struct { + NotifierSendTimeout time.Duration + BatchChangeDelay time.Duration + NodeMapSessionBufferedChanSize int +} + +// LoadConfig prepares and loads the Headscale configuration into Viper. +// This means it sets the default values, reads the configuration file and +// environment variables, and handles deprecated configuration options. +// It has to be called before LoadServerConfig and LoadCLIConfig. +// The configuration is not validated and the caller should check for errors +// using a validation function. func LoadConfig(path string, isFile bool) error { if isFile { viper.SetConfigFile(path) @@ -176,18 +233,25 @@ func LoadConfig(path string, isFile bool) error { } } - viper.SetEnvPrefix("headscale") + envPrefix := "headscale" + viper.SetEnvPrefix(envPrefix) viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.AutomaticEnv() + viper.SetDefault("policy.mode", "file") + viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache") viper.SetDefault("tls_letsencrypt_challenge_type", HTTP01ChallengeType) viper.SetDefault("log.level", "info") viper.SetDefault("log.format", TextLogFormat) - viper.SetDefault("dns_config", nil) - viper.SetDefault("dns_config.override_local_dns", true) + viper.SetDefault("dns.magic_dns", true) + viper.SetDefault("dns.base_domain", "") + viper.SetDefault("dns.nameservers.global", []string{}) + viper.SetDefault("dns.nameservers.split", map[string]string{}) + viper.SetDefault("dns.search_domains", []string{}) + viper.SetDefault("dns.extra_records", []tailcfg.DNSRecord{}) viper.SetDefault("derp.server.enabled", false) viper.SetDefault("derp.server.stun.enabled", true) @@ -207,6 +271,8 @@ func LoadConfig(path string, isFile bool) error { viper.SetDefault("database.postgres.max_idle_conns", 10) viper.SetDefault("database.postgres.conn_max_idle_time_secs", 3600) + viper.SetDefault("database.sqlite.write_ahead_log", true) + viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"}) viper.SetDefault("oidc.strip_email_domain", true) viper.SetDefault("oidc.only_start_if_oidc_is_available", true) @@ -218,18 +284,45 @@ func LoadConfig(path string, isFile bool) error { viper.SetDefault("ephemeral_node_inactivity_timeout", "120s") - viper.SetDefault("node_update_check_interval", "10s") + viper.SetDefault("tuning.notifier_send_timeout", "800ms") + viper.SetDefault("tuning.batch_change_delay", "800ms") + viper.SetDefault("tuning.node_mapsession_buffered_chan_size", 30) - if IsCLIConfigured() { - return nil - } + viper.SetDefault("prefixes.allocation", string(IPAllocationStrategySequential)) if err := viper.ReadInConfig(); err != nil { - log.Warn().Err(err).Msg("Failed to read configuration from disk") - return fmt.Errorf("fatal error reading config file: %w", err) } + return nil +} + +func validateServerConfig() error { + depr := deprecator{ + warns: make(set.Set[string]), + fatals: make(set.Set[string]), + } + + // Register aliases for backward compatibility + // Has to be called _after_ viper.ReadInConfig() + // https://github.com/spf13/viper/issues/560 + + // Alias the old ACL Policy path with the new configuration option. + depr.fatalIfNewKeyIsNotUsed("policy.path", "acl_policy_path") + + // Move dns_config -> dns + depr.warn("dns_config.override_local_dns") + depr.fatalIfNewKeyIsNotUsed("dns.magic_dns", "dns_config.magic_dns") + depr.fatalIfNewKeyIsNotUsed("dns.base_domain", "dns_config.base_domain") + depr.fatalIfNewKeyIsNotUsed("dns.nameservers.global", "dns_config.nameservers") + depr.fatalIfNewKeyIsNotUsed("dns.nameservers.split", "dns_config.restricted_nameservers") + depr.fatalIfNewKeyIsNotUsed("dns.search_domains", "dns_config.domains") + depr.fatalIfNewKeyIsNotUsed("dns.extra_records", "dns_config.extra_records") + depr.warn("dns_config.use_username_in_magic_dns") + depr.warn("dns.use_username_in_magic_dns") + + depr.Log() + // Collect any validation errors and return them all at once var errorText string if (viper.GetString("tls_letsencrypt_hostname") != "") && @@ -270,24 +363,15 @@ func LoadConfig(path string, isFile bool) error { ) } - maxNodeUpdateCheckInterval, _ := time.ParseDuration("60s") - if viper.GetDuration("node_update_check_interval") > maxNodeUpdateCheckInterval { - errorText += fmt.Sprintf( - "Fatal config error: node_update_check_interval (%s) is set too high, must be less than %s", - viper.GetString("node_update_check_interval"), - maxNodeUpdateCheckInterval, - ) - } - if errorText != "" { // nolint return errors.New(strings.TrimSuffix(errorText, "\n")) - } else { - return nil } + + return nil } -func GetTLSConfig() TLSConfig { +func tlsConfig() TLSConfig { return TLSConfig{ LetsEncrypt: LetsEncryptConfig{ Hostname: viper.GetString("tls_letsencrypt_hostname"), @@ -306,7 +390,7 @@ func GetTLSConfig() TLSConfig { } } -func GetDERPConfig() DERPConfig { +func derpConfig() DERPConfig { serverEnabled := viper.GetBool("derp.server.enabled") serverRegionID := viper.GetInt("derp.server.region_id") serverRegionCode := viper.GetString("derp.server.region_code") @@ -367,7 +451,7 @@ func GetDERPConfig() DERPConfig { } } -func GetLogTailConfig() LogTailConfig { +func logtailConfig() LogTailConfig { enabled := viper.GetBool("logtail.enabled") return LogTailConfig{ @@ -375,15 +459,17 @@ func GetLogTailConfig() LogTailConfig { } } -func GetACLConfig() ACLConfig { - policyPath := viper.GetString("acl_policy_path") +func policyConfig() PolicyConfig { + policyPath := viper.GetString("policy.path") + policyMode := viper.GetString("policy.mode") - return ACLConfig{ - PolicyPath: policyPath, + return PolicyConfig{ + Path: policyPath, + Mode: PolicyMode(policyMode), } } -func GetLogConfig() LogConfig { +func logConfig() LogConfig { logLevelStr := viper.GetString("log.level") logLevel, err := zerolog.ParseLevel(logLevelStr) if err != nil { @@ -393,9 +479,9 @@ func GetLogConfig() LogConfig { logFormatOpt := viper.GetString("log.format") var logFormat string switch logFormatOpt { - case "json": + case JSONLogFormat: logFormat = JSONLogFormat - case "text": + case TextLogFormat: logFormat = TextLogFormat case "": logFormat = TextLogFormat @@ -411,11 +497,16 @@ func GetLogConfig() LogConfig { } } -func GetDatabaseConfig() DatabaseConfig { +func databaseConfig() DatabaseConfig { debug := viper.GetBool("database.debug") type_ := viper.GetString("database.type") + skipErrRecordNotFound := viper.GetBool("database.gorm.skip_err_record_not_found") + slowThreshold := viper.GetDuration("database.gorm.slow_threshold") * time.Millisecond + parameterizedQueries := viper.GetBool("database.gorm.parameterized_queries") + prepareStmt := viper.GetBool("database.gorm.prepare_stmt") + switch type_ { case DatabaseSqlite, DatabasePostgres: break @@ -429,10 +520,18 @@ func GetDatabaseConfig() DatabaseConfig { return DatabaseConfig{ Type: type_, Debug: debug, + Gorm: GormConfig{ + Debug: debug, + SkipErrRecordNotFound: skipErrRecordNotFound, + SlowThreshold: slowThreshold, + ParameterizedQueries: parameterizedQueries, + PrepareStmt: prepareStmt, + }, Sqlite: SqliteConfig{ Path: util.AbsolutePathFromConfigPath( viper.GetString("database.sqlite.path"), ), + WriteAheadLog: viper.GetBool("database.sqlite.write_ahead_log"), }, Postgres: PostgresConfig{ Host: viper.GetString("database.postgres.host"), @@ -450,142 +549,151 @@ func GetDatabaseConfig() DatabaseConfig { } } -func GetDNSConfig() (*tailcfg.DNSConfig, string) { - if viper.IsSet("dns_config") { - dnsConfig := &tailcfg.DNSConfig{} +func dns() (DNSConfig, error) { + var dns DNSConfig - overrideLocalDNS := viper.GetBool("dns_config.override_local_dns") + // TODO: Use this instead of manually getting settings when + // UnmarshalKey is compatible with Environment Variables. + // err := viper.UnmarshalKey("dns", &dns) + // if err != nil { + // return DNSConfig{}, fmt.Errorf("unmarshaling dns config: %w", err) + // } - if viper.IsSet("dns_config.nameservers") { - nameserversStr := viper.GetStringSlice("dns_config.nameservers") + dns.MagicDNS = viper.GetBool("dns.magic_dns") + dns.BaseDomain = viper.GetString("dns.base_domain") + dns.Nameservers.Global = viper.GetStringSlice("dns.nameservers.global") + dns.Nameservers.Split = viper.GetStringMapStringSlice("dns.nameservers.split") + dns.SearchDomains = viper.GetStringSlice("dns.search_domains") - nameservers := []netip.Addr{} - resolvers := []*dnstype.Resolver{} + if viper.IsSet("dns.extra_records") { + var extraRecords []tailcfg.DNSRecord - for _, nameserverStr := range nameserversStr { - // Search for explicit DNS-over-HTTPS resolvers - if strings.HasPrefix(nameserverStr, "https://") { - resolvers = append(resolvers, &dnstype.Resolver{ - Addr: nameserverStr, - }) - - // This nameserver can not be parsed as an IP address - continue - } - - // Parse nameserver as a regular IP - nameserver, err := netip.ParseAddr(nameserverStr) - if err != nil { - log.Error(). - Str("func", "getDNSConfig"). - Err(err). - Msgf("Could not parse nameserver IP: %s", nameserverStr) - } - - nameservers = append(nameservers, nameserver) - resolvers = append(resolvers, &dnstype.Resolver{ - Addr: nameserver.String(), - }) - } - - dnsConfig.Nameservers = nameservers - - if overrideLocalDNS { - dnsConfig.Resolvers = resolvers - } else { - dnsConfig.FallbackResolvers = resolvers - } + err := viper.UnmarshalKey("dns.extra_records", &extraRecords) + if err != nil { + return DNSConfig{}, fmt.Errorf("unmarshaling dns extra records: %w", err) } - if viper.IsSet("dns_config.restricted_nameservers") { - dnsConfig.Routes = make(map[string][]*dnstype.Resolver) - domains := []string{} - restrictedDNS := viper.GetStringMapStringSlice( - "dns_config.restricted_nameservers", - ) - for domain, restrictedNameservers := range restrictedDNS { - restrictedResolvers := make( - []*dnstype.Resolver, - len(restrictedNameservers), - ) - for index, nameserverStr := range restrictedNameservers { - nameserver, err := netip.ParseAddr(nameserverStr) - if err != nil { - log.Error(). - Str("func", "getDNSConfig"). - Err(err). - Msgf("Could not parse restricted nameserver IP: %s", nameserverStr) - } - restrictedResolvers[index] = &dnstype.Resolver{ - Addr: nameserver.String(), - } - } - dnsConfig.Routes[domain] = restrictedResolvers - domains = append(domains, domain) - } - dnsConfig.Domains = domains - } - - if viper.IsSet("dns_config.domains") { - domains := viper.GetStringSlice("dns_config.domains") - if len(dnsConfig.Resolvers) > 0 { - dnsConfig.Domains = domains - } else if domains != nil { - log.Warn(). - Msg("Warning: dns_config.domains is set, but no nameservers are configured. Ignoring domains.") - } - } - - if viper.IsSet("dns_config.extra_records") { - var extraRecords []tailcfg.DNSRecord - - err := viper.UnmarshalKey("dns_config.extra_records", &extraRecords) - if err != nil { - log.Error(). - Str("func", "getDNSConfig"). - Err(err). - Msgf("Could not parse dns_config.extra_records") - } - - dnsConfig.ExtraRecords = extraRecords - } - - if viper.IsSet("dns_config.magic_dns") { - dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns") - } - - var baseDomain string - if viper.IsSet("dns_config.base_domain") { - baseDomain = viper.GetString("dns_config.base_domain") - } else { - baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled - } - - log.Trace().Interface("dns_config", dnsConfig).Msg("DNS configuration loaded") - - return dnsConfig, baseDomain + dns.ExtraRecords = extraRecords } - return nil, "" + dns.UserNameInMagicDNS = viper.GetBool("dns.use_username_in_magic_dns") + + return dns, nil } -func Prefixes() (*netip.Prefix, *netip.Prefix, error) { +// globalResolvers returns the global DNS resolvers +// defined in the config file. +// If a nameserver is a valid IP, it will be used as a regular resolver. +// If a nameserver is a valid URL, it will be used as a DoH resolver. +// If a nameserver is neither a valid URL nor a valid IP, it will be ignored. +func (d *DNSConfig) globalResolvers() []*dnstype.Resolver { + var resolvers []*dnstype.Resolver + + for _, nsStr := range d.Nameservers.Global { + warn := "" + if _, err := netip.ParseAddr(nsStr); err == nil { + resolvers = append(resolvers, &dnstype.Resolver{ + Addr: nsStr, + }) + + continue + } else { + warn = fmt.Sprintf("Invalid global nameserver %q. Parsing error: %s ignoring", nsStr, err) + } + + if _, err := url.Parse(nsStr); err == nil { + resolvers = append(resolvers, &dnstype.Resolver{ + Addr: nsStr, + }) + + continue + } else { + warn = fmt.Sprintf("Invalid global nameserver %q. Parsing error: %s ignoring", nsStr, err) + } + + if warn != "" { + log.Warn().Msg(warn) + } + } + + return resolvers +} + +// splitResolvers returns a map of domain to DNS resolvers. +// If a nameserver is a valid IP, it will be used as a regular resolver. +// If a nameserver is a valid URL, it will be used as a DoH resolver. +// If a nameserver is neither a valid URL nor a valid IP, it will be ignored. +func (d *DNSConfig) splitResolvers() map[string][]*dnstype.Resolver { + routes := make(map[string][]*dnstype.Resolver) + for domain, nameservers := range d.Nameservers.Split { + var resolvers []*dnstype.Resolver + for _, nsStr := range nameservers { + warn := "" + if _, err := netip.ParseAddr(nsStr); err == nil { + resolvers = append(resolvers, &dnstype.Resolver{ + Addr: nsStr, + }) + + continue + } else { + warn = fmt.Sprintf("Invalid split dns nameserver %q. Parsing error: %s ignoring", nsStr, err) + } + + if _, err := url.Parse(nsStr); err == nil { + resolvers = append(resolvers, &dnstype.Resolver{ + Addr: nsStr, + }) + + continue + } else { + warn = fmt.Sprintf("Invalid split dns nameserver %q. Parsing error: %s ignoring", nsStr, err) + } + + if warn != "" { + log.Warn().Msg(warn) + } + } + routes[domain] = resolvers + } + + return routes +} + +func dnsToTailcfgDNS(dns DNSConfig) *tailcfg.DNSConfig { + cfg := tailcfg.DNSConfig{} + + if dns.BaseDomain == "" && dns.MagicDNS { + log.Fatal().Msg("dns.base_domain must be set when using MagicDNS (dns.magic_dns)") + } + + cfg.Proxied = dns.MagicDNS + cfg.ExtraRecords = dns.ExtraRecords + cfg.Resolvers = dns.globalResolvers() + + routes := dns.splitResolvers() + cfg.Routes = routes + if dns.BaseDomain != "" { + cfg.Domains = []string{dns.BaseDomain} + } + cfg.Domains = append(cfg.Domains, dns.SearchDomains...) + + return &cfg +} + +func prefixV4() (*netip.Prefix, error) { prefixV4Str := viper.GetString("prefixes.v4") - prefixV6Str := viper.GetString("prefixes.v6") + + if prefixV4Str == "" { + return nil, nil + } prefixV4, err := netip.ParsePrefix(prefixV4Str) if err != nil { - return nil, nil, err - } - - prefixV6, err := netip.ParsePrefix(prefixV6Str) - if err != nil { - return nil, nil, err + return nil, fmt.Errorf("parsing IPv4 prefix from config: %w", err) } builder := netipx.IPSetBuilder{} builder.AddPrefix(tsaddr.CGNATRange()) - builder.AddPrefix(tsaddr.TailscaleULARange()) ipSet, _ := builder.IPSet() if !ipSet.ContainsPrefix(prefixV4) { log.Warn(). @@ -593,35 +701,95 @@ func Prefixes() (*netip.Prefix, *netip.Prefix, error) { prefixV4Str, tsaddr.CGNATRange()) } + return &prefixV4, nil +} + +func prefixV6() (*netip.Prefix, error) { + prefixV6Str := viper.GetString("prefixes.v6") + + if prefixV6Str == "" { + return nil, nil + } + + prefixV6, err := netip.ParsePrefix(prefixV6Str) + if err != nil { + return nil, fmt.Errorf("parsing IPv6 prefix from config: %w", err) + } + + builder := netipx.IPSetBuilder{} + builder.AddPrefix(tsaddr.TailscaleULARange()) + ipSet, _ := builder.IPSet() + if !ipSet.ContainsPrefix(prefixV6) { log.Warn(). Msgf("Prefix %s is not in the %s range. This is an unsupported configuration.", prefixV6Str, tsaddr.TailscaleULARange()) } - return &prefixV4, &prefixV6, nil + return &prefixV6, nil } -func GetHeadscaleConfig() (*Config, error) { - if IsCLIConfigured() { - return &Config{ - CLI: CLIConfig{ - Address: viper.GetString("cli.address"), - APIKey: viper.GetString("cli.api_key"), - Timeout: viper.GetDuration("cli.timeout"), - Insecure: viper.GetBool("cli.insecure"), - }, - }, nil +// LoadCLIConfig returns the needed configuration for the CLI client +// of Headscale to connect to a Headscale server. +func LoadCLIConfig() (*Config, error) { + logConfig := logConfig() + zerolog.SetGlobalLevel(logConfig.Level) + + return &Config{ + DisableUpdateCheck: viper.GetBool("disable_check_updates"), + UnixSocket: viper.GetString("unix_socket"), + CLI: CLIConfig{ + Address: viper.GetString("cli.address"), + APIKey: viper.GetString("cli.api_key"), + Timeout: viper.GetDuration("cli.timeout"), + Insecure: viper.GetBool("cli.insecure"), + }, + Log: logConfig, + }, nil +} + +// LoadServerConfig returns the full Headscale configuration to +// host a Headscale server. This is called as part of `headscale serve`. +func LoadServerConfig() (*Config, error) { + if err := validateServerConfig(); err != nil { + return nil, err } - prefix4, prefix6, err := Prefixes() + logConfig := logConfig() + zerolog.SetGlobalLevel(logConfig.Level) + + prefix4, err := prefixV4() if err != nil { return nil, err } - dnsConfig, baseDomain := GetDNSConfig() - derpConfig := GetDERPConfig() - logConfig := GetLogTailConfig() + prefix6, err := prefixV6() + if err != nil { + return nil, err + } + + if prefix4 == nil && prefix6 == nil { + return nil, fmt.Errorf("no IPv4 or IPv6 prefix configured, minimum one prefix is required") + } + + allocStr := viper.GetString("prefixes.allocation") + var alloc IPAllocationStrategy + switch allocStr { + case string(IPAllocationStrategySequential): + alloc = IPAllocationStrategySequential + case string(IPAllocationStrategyRandom): + alloc = IPAllocationStrategyRandom + default: + return nil, fmt.Errorf("config error, prefixes.allocation is set to %s, which is not a valid strategy, allowed options: %s, %s", allocStr, IPAllocationStrategySequential, IPAllocationStrategyRandom) + } + + dnsConfig, err := dns() + if err != nil { + return nil, err + } + + derpConfig := derpConfig() + logTailConfig := logtailConfig() randomizeClientPort := viper.GetBool("randomize_client_port") oidcClientSecret := viper.GetString("oidc.client_secret") @@ -637,21 +805,37 @@ func GetHeadscaleConfig() (*Config, error) { oidcClientSecret = strings.TrimSpace(string(secretBytes)) } + serverURL := viper.GetString("server_url") + + // BaseDomain cannot be the same as the server URL. + // This is because Tailscale takes over the domain in BaseDomain, + // causing the headscale server and DERP to be unreachable. + // For Tailscale upstream, the following is true: + // - DERP run on their own domains + // - Control plane runs on login.tailscale.com/controlplane.tailscale.com + // - MagicDNS (BaseDomain) for users is on a *.ts.net domain per tailnet (e.g. tail-scale.ts.net) + // + // TODO(kradalby): remove dnsConfig.UserNameInMagicDNS check when removed. + if !dnsConfig.UserNameInMagicDNS && dnsConfig.BaseDomain != "" && strings.Contains(serverURL, dnsConfig.BaseDomain) { + return nil, errors.New("server_url cannot contain the base_domain, this will cause the headscale server and embedded DERP to become unreachable from the Tailscale node.") + } + return &Config{ - ServerURL: viper.GetString("server_url"), + ServerURL: serverURL, Addr: viper.GetString("listen_addr"), MetricsAddr: viper.GetString("metrics_listen_addr"), GRPCAddr: viper.GetString("grpc_listen_addr"), GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"), - DisableUpdateCheck: viper.GetBool("disable_check_updates"), + DisableUpdateCheck: false, - PrefixV4: prefix4, - PrefixV6: prefix6, + PrefixV4: prefix4, + PrefixV6: prefix6, + IPAllocation: IPAllocationStrategy(alloc), NoisePrivateKeyPath: util.AbsolutePathFromConfigPath( viper.GetString("noise.private_key_path"), ), - BaseDomain: baseDomain, + BaseDomain: dnsConfig.BaseDomain, DERP: derpConfig, @@ -659,15 +843,12 @@ func GetHeadscaleConfig() (*Config, error) { "ephemeral_node_inactivity_timeout", ), - NodeUpdateCheckInterval: viper.GetDuration( - "node_update_check_interval", - ), + Database: databaseConfig(), - Database: GetDatabaseConfig(), + TLS: tlsConfig(), - TLS: GetTLSConfig(), - - DNSConfig: dnsConfig, + DNSConfig: dnsToTailcfgDNS(dnsConfig), + DNSUserNameInMagicDNS: dnsConfig.UserNameInMagicDNS, ACMEEmail: viper.GetString("acme_email"), ACMEURL: viper.GetString("acme_url"), @@ -706,10 +887,10 @@ func GetHeadscaleConfig() (*Config, error) { UseExpiryFromToken: viper.GetBool("oidc.use_expiry_from_token"), }, - LogTail: logConfig, + LogTail: logTailConfig, RandomizeClientPort: randomizeClientPort, - ACL: GetACLConfig(), + Policy: policyConfig(), CLI: CLIConfig{ Address: viper.GetString("cli.address"), @@ -718,10 +899,81 @@ func GetHeadscaleConfig() (*Config, error) { Insecure: viper.GetBool("cli.insecure"), }, - Log: GetLogConfig(), + Log: logConfig, + + // TODO(kradalby): Document these settings when more stable + Tuning: Tuning{ + NotifierSendTimeout: viper.GetDuration("tuning.notifier_send_timeout"), + BatchChangeDelay: viper.GetDuration("tuning.batch_change_delay"), + NodeMapSessionBufferedChanSize: viper.GetInt("tuning.node_mapsession_buffered_chan_size"), + }, }, nil } -func IsCLIConfigured() bool { - return viper.GetString("cli.address") != "" && viper.GetString("cli.api_key") != "" +type deprecator struct { + warns set.Set[string] + fatals set.Set[string] +} + +// warnWithAlias will register an alias between the newKey and the oldKey, +// and log a deprecation warning if the oldKey is set. +func (d *deprecator) warnWithAlias(newKey, oldKey string) { + // NOTE: RegisterAlias is called with NEW KEY -> OLD KEY + viper.RegisterAlias(newKey, oldKey) + if viper.IsSet(oldKey) { + d.warns.Add(fmt.Sprintf("The %q configuration key is deprecated. Please use %q instead. %q will be removed in the future.", oldKey, newKey, oldKey)) + } +} + +// fatal deprecates and adds an entry to the fatal list of options if the oldKey is set. +func (d *deprecator) fatal(newKey, oldKey string) { + if viper.IsSet(oldKey) { + d.fatals.Add(fmt.Sprintf("The %q configuration key is deprecated. Please use %q instead. %q has been removed.", oldKey, newKey, oldKey)) + } +} + +// fatalIfNewKeyIsNotUsed deprecates and adds an entry to the fatal list of options if the oldKey is set and the new key is _not_ set. +// If the new key is set, a warning is emitted instead. +func (d *deprecator) fatalIfNewKeyIsNotUsed(newKey, oldKey string) { + if viper.IsSet(oldKey) && !viper.IsSet(newKey) { + d.fatals.Add(fmt.Sprintf("The %q configuration key is deprecated. Please use %q instead. %q has been removed.", oldKey, newKey, oldKey)) + } else if viper.IsSet(oldKey) { + d.warns.Add(fmt.Sprintf("The %q configuration key is deprecated. Please use %q instead. %q has been removed.", oldKey, newKey, oldKey)) + } +} + +// warn deprecates and adds an option to log a warning if the oldKey is set. +func (d *deprecator) warnNoAlias(newKey, oldKey string) { + if viper.IsSet(oldKey) { + d.warns.Add(fmt.Sprintf("The %q configuration key is deprecated. Please use %q instead. %q has been removed.", oldKey, newKey, oldKey)) + } +} + +// warn deprecates and adds an entry to the warn list of options if the oldKey is set. +func (d *deprecator) warn(oldKey string) { + if viper.IsSet(oldKey) { + d.warns.Add(fmt.Sprintf("The %q configuration key is deprecated and has been removed. Please see the changelog for more details.", oldKey)) + } +} + +func (d *deprecator) String() string { + var b strings.Builder + + for _, w := range d.warns.Slice() { + fmt.Fprintf(&b, "WARN: %s\n", w) + } + + for _, f := range d.fatals.Slice() { + fmt.Fprintf(&b, "FATAL: %s\n", f) + } + + return b.String() +} + +func (d *deprecator) Log() { + if len(d.fatals) > 0 { + log.Fatal().Msg("\n" + d.String()) + } else if len(d.warns) > 0 { + log.Warn().Msg("\n" + d.String()) + } } diff --git a/hscontrol/types/config_test.go b/hscontrol/types/config_test.go new file mode 100644 index 00000000..e6e8d6c2 --- /dev/null +++ b/hscontrol/types/config_test.go @@ -0,0 +1,339 @@ +package types + +import ( + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "tailscale.com/tailcfg" + "tailscale.com/types/dnstype" +) + +func TestReadConfig(t *testing.T) { + tests := []struct { + name string + configPath string + setup func(*testing.T) (any, error) + want any + wantErr string + }{ + { + name: "unmarshal-dns-full-config", + configPath: "testdata/dns_full.yaml", + setup: func(t *testing.T) (any, error) { + dns, err := dns() + if err != nil { + return nil, err + } + + return dns, nil + }, + want: DNSConfig{ + MagicDNS: true, + BaseDomain: "example.com", + Nameservers: Nameservers{ + Global: []string{"1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", "https://dns.nextdns.io/abc123"}, + Split: map[string][]string{"darp.headscale.net": {"1.1.1.1", "8.8.8.8"}, "foo.bar.com": {"1.1.1.1"}}, + }, + ExtraRecords: []tailcfg.DNSRecord{ + {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, + {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, + }, + SearchDomains: []string{"test.com", "bar.com"}, + UserNameInMagicDNS: true, + }, + }, + { + name: "dns-to-tailcfg.DNSConfig", + configPath: "testdata/dns_full.yaml", + setup: func(t *testing.T) (any, error) { + dns, err := dns() + if err != nil { + return nil, err + } + + return dnsToTailcfgDNS(dns), nil + }, + want: &tailcfg.DNSConfig{ + Proxied: true, + Domains: []string{"example.com", "test.com", "bar.com"}, + Resolvers: []*dnstype.Resolver{ + {Addr: "1.1.1.1"}, + {Addr: "1.0.0.1"}, + {Addr: "2606:4700:4700::1111"}, + {Addr: "2606:4700:4700::1001"}, + {Addr: "https://dns.nextdns.io/abc123"}, + }, + Routes: map[string][]*dnstype.Resolver{ + "darp.headscale.net": {{Addr: "1.1.1.1"}, {Addr: "8.8.8.8"}}, + "foo.bar.com": {{Addr: "1.1.1.1"}}, + }, + ExtraRecords: []tailcfg.DNSRecord{ + {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, + {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, + }, + }, + }, + { + name: "unmarshal-dns-full-no-magic", + configPath: "testdata/dns_full_no_magic.yaml", + setup: func(t *testing.T) (any, error) { + dns, err := dns() + if err != nil { + return nil, err + } + + return dns, nil + }, + want: DNSConfig{ + MagicDNS: false, + BaseDomain: "example.com", + Nameservers: Nameservers{ + Global: []string{"1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", "https://dns.nextdns.io/abc123"}, + Split: map[string][]string{"darp.headscale.net": {"1.1.1.1", "8.8.8.8"}, "foo.bar.com": {"1.1.1.1"}}, + }, + ExtraRecords: []tailcfg.DNSRecord{ + {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, + {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, + }, + SearchDomains: []string{"test.com", "bar.com"}, + UserNameInMagicDNS: true, + }, + }, + { + name: "dns-to-tailcfg.DNSConfig", + configPath: "testdata/dns_full_no_magic.yaml", + setup: func(t *testing.T) (any, error) { + dns, err := dns() + if err != nil { + return nil, err + } + + return dnsToTailcfgDNS(dns), nil + }, + want: &tailcfg.DNSConfig{ + Proxied: false, + Domains: []string{"example.com", "test.com", "bar.com"}, + Resolvers: []*dnstype.Resolver{ + {Addr: "1.1.1.1"}, + {Addr: "1.0.0.1"}, + {Addr: "2606:4700:4700::1111"}, + {Addr: "2606:4700:4700::1001"}, + {Addr: "https://dns.nextdns.io/abc123"}, + }, + Routes: map[string][]*dnstype.Resolver{ + "darp.headscale.net": {{Addr: "1.1.1.1"}, {Addr: "8.8.8.8"}}, + "foo.bar.com": {{Addr: "1.1.1.1"}}, + }, + ExtraRecords: []tailcfg.DNSRecord{ + {Name: "grafana.myvpn.example.com", Type: "A", Value: "100.64.0.3"}, + {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, + }, + }, + }, + { + name: "base-domain-in-server-url-err", + configPath: "testdata/base-domain-in-server-url.yaml", + setup: func(t *testing.T) (any, error) { + return LoadServerConfig() + }, + want: nil, + wantErr: "server_url cannot contain the base_domain, this will cause the headscale server and embedded DERP to become unreachable from the Tailscale node.", + }, + { + name: "base-domain-not-in-server-url", + configPath: "testdata/base-domain-not-in-server-url.yaml", + setup: func(t *testing.T) (any, error) { + cfg, err := LoadServerConfig() + if err != nil { + return nil, err + } + + return map[string]string{ + "server_url": cfg.ServerURL, + "base_domain": cfg.BaseDomain, + }, err + }, + want: map[string]string{ + "server_url": "https://derp.no", + "base_domain": "clients.derp.no", + }, + wantErr: "", + }, + { + name: "policy-path-is-loaded", + configPath: "testdata/policy-path-is-loaded.yaml", + setup: func(t *testing.T) (any, error) { + cfg, err := LoadServerConfig() + if err != nil { + return nil, err + } + + return map[string]string{ + "policy.mode": string(cfg.Policy.Mode), + "policy.path": cfg.Policy.Path, + }, err + }, + want: map[string]string{ + "policy.mode": "file", + "policy.path": "/etc/policy.hujson", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + viper.Reset() + err := LoadConfig(tt.configPath, true) + assert.NoError(t, err) + + conf, err := tt.setup(t) + + if tt.wantErr != "" { + assert.Equal(t, tt.wantErr, err.Error()) + + return + } + + assert.NoError(t, err) + + if diff := cmp.Diff(tt.want, conf); diff != "" { + t.Errorf("ReadConfig() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestReadConfigFromEnv(t *testing.T) { + tests := []struct { + name string + configEnv map[string]string + setup func(*testing.T) (any, error) + want any + }{ + { + name: "test-random-base-settings-with-env", + configEnv: map[string]string{ + "HEADSCALE_LOG_LEVEL": "trace", + "HEADSCALE_DATABASE_SQLITE_WRITE_AHEAD_LOG": "false", + "HEADSCALE_PREFIXES_V4": "100.64.0.0/10", + }, + setup: func(t *testing.T) (any, error) { + t.Logf("all settings: %#v", viper.AllSettings()) + + assert.Equal(t, "trace", viper.GetString("log.level")) + assert.Equal(t, "100.64.0.0/10", viper.GetString("prefixes.v4")) + assert.False(t, viper.GetBool("database.sqlite.write_ahead_log")) + return nil, nil + }, + want: nil, + }, + { + name: "unmarshal-dns-full-config", + configEnv: map[string]string{ + "HEADSCALE_DNS_MAGIC_DNS": "true", + "HEADSCALE_DNS_BASE_DOMAIN": "example.com", + "HEADSCALE_DNS_NAMESERVERS_GLOBAL": `1.1.1.1 8.8.8.8`, + "HEADSCALE_DNS_SEARCH_DOMAINS": "test.com bar.com", + "HEADSCALE_DNS_USE_USERNAME_IN_MAGIC_DNS": "true", + + // TODO(kradalby): Figure out how to pass these as env vars + // "HEADSCALE_DNS_NAMESERVERS_SPLIT": `{foo.bar.com: ["1.1.1.1"]}`, + // "HEADSCALE_DNS_EXTRA_RECORDS": `[{ name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.4" }]`, + }, + setup: func(t *testing.T) (any, error) { + t.Logf("all settings: %#v", viper.AllSettings()) + + dns, err := dns() + if err != nil { + return nil, err + } + + return dns, nil + }, + want: DNSConfig{ + MagicDNS: true, + BaseDomain: "example.com", + Nameservers: Nameservers{ + Global: []string{"1.1.1.1", "8.8.8.8"}, + Split: map[string][]string{ + // "foo.bar.com": {"1.1.1.1"}, + }, + }, + ExtraRecords: []tailcfg.DNSRecord{ + // {Name: "prometheus.myvpn.example.com", Type: "A", Value: "100.64.0.4"}, + }, + SearchDomains: []string{"test.com", "bar.com"}, + UserNameInMagicDNS: true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.configEnv { + t.Setenv(k, v) + } + + viper.Reset() + err := LoadConfig("testdata/minimal.yaml", true) + assert.NoError(t, err) + + conf, err := tt.setup(t) + assert.NoError(t, err) + + if diff := cmp.Diff(tt.want, conf); diff != "" { + t.Errorf("ReadConfig() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestTLSConfigValidation(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "headscale") + if err != nil { + t.Fatal(err) + } + // defer os.RemoveAll(tmpDir) + configYaml := []byte(`--- +tls_letsencrypt_hostname: example.com +tls_letsencrypt_challenge_type: "" +tls_cert_path: abc.pem +noise: + private_key_path: noise_private.key`) + + // Populate a custom config file + configFilePath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configFilePath, configYaml, 0o600) + if err != nil { + t.Fatalf("Couldn't write file %s", configFilePath) + } + + // Check configuration validation errors (1) + err = LoadConfig(tmpDir, false) + assert.NoError(t, err) + + err = validateServerConfig() + assert.Error(t, err) + assert.Contains(t, err.Error(), "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both") + assert.Contains(t, err.Error(), "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are") + assert.Contains(t, err.Error(), "Fatal config error: server_url must start with https:// or http://") + + // Check configuration validation errors (2) + configYaml = []byte(`--- +noise: + private_key_path: noise_private.key +server_url: http://127.0.0.1:8080 +tls_letsencrypt_hostname: example.com +tls_letsencrypt_challenge_type: TLS-ALPN-01 +`) + err = os.WriteFile(configFilePath, configYaml, 0o600) + if err != nil { + t.Fatalf("Couldn't write file %s", configFilePath) + } + err = LoadConfig(tmpDir, false) + assert.NoError(t, err) +} diff --git a/hscontrol/types/const.go b/hscontrol/types/const.go index e718eb2e..019c14b6 100644 --- a/hscontrol/types/const.go +++ b/hscontrol/types/const.go @@ -3,7 +3,7 @@ package types import "time" const ( - HTTPReadTimeout = 30 * time.Second + HTTPTimeout = 30 * time.Second HTTPShutdownTimeout = 3 * time.Second TLSALPN01ChallengeType = "TLS-ALPN-01" HTTP01ChallengeType = "HTTP-01" diff --git a/hscontrol/types/node.go b/hscontrol/types/node.go index 8b7917de..b19020e0 100644 --- a/hscontrol/types/node.go +++ b/hscontrol/types/node.go @@ -1,18 +1,18 @@ package types import ( - "database/sql/driver" + "database/sql" "encoding/json" "errors" "fmt" "net/netip" - "sort" + "strconv" "strings" "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy/matcher" - "github.com/rs/zerolog/log" + "github.com/juanfont/headscale/hscontrol/util" "go4.org/netipx" "google.golang.org/protobuf/types/known/timestamppb" "gorm.io/gorm" @@ -27,9 +27,29 @@ var ( ErrNodeUserHasNoName = errors.New("node user has no name") ) +type NodeID uint64 + +// type NodeConnectedMap *xsync.MapOf[NodeID, bool] + +func (id NodeID) StableID() tailcfg.StableNodeID { + return tailcfg.StableNodeID(strconv.FormatUint(uint64(id), util.Base10)) +} + +func (id NodeID) NodeID() tailcfg.NodeID { + return tailcfg.NodeID(id) +} + +func (id NodeID) Uint64() uint64 { + return uint64(id) +} + +func (id NodeID) String() string { + return strconv.FormatUint(id.Uint64(), util.Base10) +} + // Node is a Headscale client. type Node struct { - ID uint64 `gorm:"primary_key"` + ID NodeID `gorm:"primary_key"` // MachineKeyDatabaseField is the string representation of MachineKey // it is _only_ used for reading and writing the key to the @@ -66,7 +86,19 @@ type Node struct { HostinfoDatabaseField string `gorm:"column:host_info"` Hostinfo *tailcfg.Hostinfo `gorm:"-"` - IPAddresses NodeAddresses + // IPv4DatabaseField is the string representation of v4 address, + // it is _only_ used for reading and writing the key to the + // database and should not be used. + // Use V4 instead. + IPv4DatabaseField sql.NullString `gorm:"column:ipv4"` + IPv4 *netip.Addr `gorm:"-"` + + // IPv6DatabaseField is the string representation of v4 address, + // it is _only_ used for reading and writing the key to the + // database and should not be used. + // Use V6 instead. + IPv6DatabaseField sql.NullString `gorm:"column:ipv6"` + IPv6 *netip.Addr `gorm:"-"` // Hostname represents the name given by the Tailscale // client during registration @@ -80,20 +112,20 @@ type Node struct { // parts of headscale. GivenName string `gorm:"type:varchar(63);unique_index"` UserID uint - User User `gorm:"foreignKey:UserID"` + User User `gorm:"constraint:OnDelete:CASCADE;"` RegisterMethod string ForcedTags StringList // TODO(kradalby): This seems like irrelevant information? - AuthKeyID uint - AuthKey *PreAuthKey + AuthKeyID *uint64 `sql:"DEFAULT:NULL"` + AuthKey *PreAuthKey `gorm:"constraint:OnDelete:SET NULL;"` LastSeen *time.Time Expiry *time.Time - Routes []Route + Routes []Route `gorm:"constraint:OnDelete:CASCADE;"` CreatedAt time.Time UpdatedAt time.Time @@ -106,34 +138,41 @@ type ( Nodes []*Node ) -type NodeAddresses []netip.Addr - -func (na NodeAddresses) Sort() { - sort.Slice(na, func(index1, index2 int) bool { - if na[index1].Is4() && na[index2].Is6() { - return true - } - if na[index1].Is6() && na[index2].Is4() { - return false - } - - return na[index1].Compare(na[index2]) < 0 - }) -} - -func (na NodeAddresses) StringSlice() []string { - na.Sort() - strSlice := make([]string, 0, len(na)) - for _, addr := range na { - strSlice = append(strSlice, addr.String()) +// IsExpired returns whether the node registration has expired. +func (node Node) IsExpired() bool { + // If Expiry is not set, the client has not indicated that + // it wants an expiry time, it is therefor considered + // to mean "not expired" + if node.Expiry == nil || node.Expiry.IsZero() { + return false } - return strSlice + return time.Since(*node.Expiry) > 0 } -func (na NodeAddresses) Prefixes() []netip.Prefix { +// IsEphemeral returns if the node is registered as an Ephemeral node. +// https://tailscale.com/kb/1111/ephemeral-nodes/ +func (node *Node) IsEphemeral() bool { + return node.AuthKey != nil && node.AuthKey.Ephemeral +} + +func (node *Node) IPs() []netip.Addr { + var ret []netip.Addr + + if node.IPv4 != nil { + ret = append(ret, *node.IPv4) + } + + if node.IPv6 != nil { + ret = append(ret, *node.IPv6) + } + + return ret +} + +func (node *Node) Prefixes() []netip.Prefix { addrs := []netip.Prefix{} - for _, nodeAddress := range na { + for _, nodeAddress := range node.IPs() { ip := netip.PrefixFrom(nodeAddress, nodeAddress.BitLen()) addrs = append(addrs, ip) } @@ -141,8 +180,22 @@ func (na NodeAddresses) Prefixes() []netip.Prefix { return addrs } -func (na NodeAddresses) InIPSet(set *netipx.IPSet) bool { - for _, nodeAddr := range na { +func (node *Node) IPsAsString() []string { + var ret []string + + if node.IPv4 != nil { + ret = append(ret, node.IPv4.String()) + } + + if node.IPv6 != nil { + ret = append(ret, node.IPv6.String()) + } + + return ret +} + +func (node *Node) InIPSet(set *netipx.IPSet) bool { + for _, nodeAddr := range node.IPs() { if set.Contains(nodeAddr) { return true } @@ -153,62 +206,15 @@ func (na NodeAddresses) InIPSet(set *netipx.IPSet) bool { // AppendToIPSet adds the individual ips in NodeAddresses to a // given netipx.IPSetBuilder. -func (na NodeAddresses) AppendToIPSet(build *netipx.IPSetBuilder) { - for _, ip := range na { +func (node *Node) AppendToIPSet(build *netipx.IPSetBuilder) { + for _, ip := range node.IPs() { build.Add(ip) } } -func (na *NodeAddresses) Scan(destination interface{}) error { - switch value := destination.(type) { - case string: - addresses := strings.Split(value, ",") - *na = (*na)[:0] - for _, addr := range addresses { - if len(addr) < 1 { - continue - } - parsed, err := netip.ParseAddr(addr) - if err != nil { - return err - } - *na = append(*na, parsed) - } - - return nil - - default: - return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination) - } -} - -// Value return json value, implement driver.Valuer interface. -func (na NodeAddresses) Value() (driver.Value, error) { - addresses := strings.Join(na.StringSlice(), ",") - - return addresses, nil -} - -// IsExpired returns whether the node registration has expired. -func (node Node) IsExpired() bool { - // If Expiry is not set, the client has not indicated that - // it wants an expiry time, it is therefor considered - // to mean "not expired" - if node.Expiry == nil || node.Expiry.IsZero() { - return false - } - - return time.Now().UTC().After(*node.Expiry) -} - -// IsEphemeral returns if the node is registered as an Ephemeral node. -// https://tailscale.com/kb/1111/ephemeral-nodes/ -func (node *Node) IsEphemeral() bool { - return node.AuthKey != nil && node.AuthKey.Ephemeral -} - func (node *Node) CanAccess(filter []tailcfg.FilterRule, node2 *Node) bool { - allowedIPs := append([]netip.Addr{}, node2.IPAddresses...) + src := node.IPs() + allowedIPs := node2.IPs() for _, route := range node2.Routes { if route.Enabled { @@ -220,7 +226,7 @@ func (node *Node) CanAccess(filter []tailcfg.FilterRule, node2 *Node) bool { // TODO(kradalby): Cache or pregen this matcher := matcher.MatchFromFilterRule(rule) - if !matcher.SrcsContainsIPs([]netip.Addr(node.IPAddresses)) { + if !matcher.SrcsContainsIPs(src) { continue } @@ -233,13 +239,16 @@ func (node *Node) CanAccess(filter []tailcfg.FilterRule, node2 *Node) bool { } func (nodes Nodes) FilterByIP(ip netip.Addr) Nodes { - found := make(Nodes, 0) + var found Nodes for _, node := range nodes { - for _, mIP := range node.IPAddresses { - if ip == mIP { - found = append(found, node) - } + if node.IPv4 != nil && ip == *node.IPv4 { + found = append(found, node) + continue + } + + if node.IPv6 != nil && ip == *node.IPv6 { + found = append(found, node) } } @@ -264,10 +273,22 @@ func (node *Node) BeforeSave(tx *gorm.DB) error { hi, err := json.Marshal(node.Hostinfo) if err != nil { - return fmt.Errorf("failed to marshal Hostinfo to store in db: %w", err) + return fmt.Errorf("marshalling Hostinfo to store in db: %w", err) } node.HostinfoDatabaseField = string(hi) + if node.IPv4 != nil { + node.IPv4DatabaseField.String, node.IPv4DatabaseField.Valid = node.IPv4.String(), true + } else { + node.IPv4DatabaseField.String, node.IPv4DatabaseField.Valid = "", false + } + + if node.IPv6 != nil { + node.IPv6DatabaseField.String, node.IPv6DatabaseField.Valid = node.IPv6.String(), true + } else { + node.IPv6DatabaseField.String, node.IPv6DatabaseField.Valid = "", false + } + return nil } @@ -279,27 +300,31 @@ func (node *Node) BeforeSave(tx *gorm.DB) error { func (node *Node) AfterFind(tx *gorm.DB) error { var machineKey key.MachinePublic if err := machineKey.UnmarshalText([]byte(node.MachineKeyDatabaseField)); err != nil { - return fmt.Errorf("failed to unmarshal machine key from db: %w", err) + return fmt.Errorf("unmarshalling machine key from db: %w", err) } node.MachineKey = machineKey var nodeKey key.NodePublic if err := nodeKey.UnmarshalText([]byte(node.NodeKeyDatabaseField)); err != nil { - return fmt.Errorf("failed to unmarshal node key from db: %w", err) + return fmt.Errorf("unmarshalling node key from db: %w", err) } node.NodeKey = nodeKey - var discoKey key.DiscoPublic - if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil { - return fmt.Errorf("failed to unmarshal disco key from db: %w", err) + // DiscoKey might be empty if a node has not sent it to headscale. + // This means that this might fail if the disco key is empty. + if node.DiscoKeyDatabaseField != "" { + var discoKey key.DiscoPublic + if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil { + return fmt.Errorf("unmarshalling disco key from db: %w", err) + } + node.DiscoKey = discoKey } - node.DiscoKey = discoKey endpoints := make([]netip.AddrPort, len(node.EndpointsDatabaseField)) for idx, ep := range node.EndpointsDatabaseField { addrPort, err := netip.ParseAddrPort(ep) if err != nil { - return fmt.Errorf("failed to parse endpoint from db: %w", err) + return fmt.Errorf("parsing endpoint from db: %w", err) } endpoints[idx] = addrPort @@ -308,30 +333,47 @@ func (node *Node) AfterFind(tx *gorm.DB) error { var hi tailcfg.Hostinfo if err := json.Unmarshal([]byte(node.HostinfoDatabaseField), &hi); err != nil { - log.Trace().Err(err).Msgf("Hostinfo content: %s", node.HostinfoDatabaseField) - - return fmt.Errorf("failed to unmarshal Hostinfo from db: %w", err) + return fmt.Errorf("unmarshalling hostinfo from database: %w", err) } node.Hostinfo = &hi + if node.IPv4DatabaseField.Valid { + ip, err := netip.ParseAddr(node.IPv4DatabaseField.String) + if err != nil { + return fmt.Errorf("parsing IPv4 from database: %w", err) + } + + node.IPv4 = &ip + } + + if node.IPv6DatabaseField.Valid { + ip, err := netip.ParseAddr(node.IPv6DatabaseField.String) + if err != nil { + return fmt.Errorf("parsing IPv6 from database: %w", err) + } + + node.IPv6 = &ip + } + return nil } func (node *Node) Proto() *v1.Node { nodeProto := &v1.Node{ - Id: node.ID, + Id: uint64(node.ID), MachineKey: node.MachineKey.String(), - NodeKey: node.NodeKey.String(), - DiscoKey: node.DiscoKey.String(), - IpAddresses: node.IPAddresses.StringSlice(), + NodeKey: node.NodeKey.String(), + DiscoKey: node.DiscoKey.String(), + + // TODO(kradalby): replace list with v4, v6 field? + IpAddresses: node.IPsAsString(), Name: node.Hostname, GivenName: node.GivenName, User: node.User.Proto(), ForcedTags: node.ForcedTags, - // TODO(kradalby): Implement register method enum converter - // RegisterMethod: , + RegisterMethod: node.RegisterMethodToV1Enum(), CreatedAt: timestamppb.New(node.CreatedAt), } @@ -351,13 +393,22 @@ func (node *Node) Proto() *v1.Node { return nodeProto } -func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (string, error) { - var hostname string - if dnsConfig != nil && dnsConfig.Proxied { // MagicDNS - if node.GivenName == "" { - return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeHasNoGivenName) - } +func (node *Node) GetFQDN(cfg *Config, baseDomain string) (string, error) { + if node.GivenName == "" { + return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeHasNoGivenName) + } + hostname := node.GivenName + + if baseDomain != "" { + hostname = fmt.Sprintf( + "%s.%s", + node.GivenName, + baseDomain, + ) + } + + if cfg.DNSUserNameInMagicDNS { if node.User.Name == "" { return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeUserHasNoName) } @@ -368,15 +419,14 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri node.User.Name, baseDomain, ) - if len(hostname) > MaxHostnameLength { - return "", fmt.Errorf( - "failed to create valid FQDN (%s): %w", - hostname, - ErrHostnameTooLong, - ) - } - } else { - hostname = node.GivenName + } + + if len(hostname) > MaxHostnameLength { + return "", fmt.Errorf( + "failed to create valid FQDN (%s): %w", + hostname, + ErrHostnameTooLong, + ) } return hostname, nil @@ -437,6 +487,19 @@ func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerC return ret } +func (node *Node) RegisterMethodToV1Enum() v1.RegisterMethod { + switch node.RegisterMethod { + case "authkey": + return v1.RegisterMethod_REGISTER_METHOD_AUTH_KEY + case "oidc": + return v1.RegisterMethod_REGISTER_METHOD_OIDC + case "cli": + return v1.RegisterMethod_REGISTER_METHOD_CLI + default: + return v1.RegisterMethod_REGISTER_METHOD_UNSPECIFIED + } +} + // ApplyPeerChange takes a PeerChange struct and updates the node. func (node *Node) ApplyPeerChange(change *tailcfg.PeerChange) { if change.Key != nil { @@ -486,8 +549,8 @@ func (nodes Nodes) String() string { return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp)) } -func (nodes Nodes) IDMap() map[uint64]*Node { - ret := map[uint64]*Node{} +func (nodes Nodes) IDMap() map[NodeID]*Node { + ret := map[NodeID]*Node{} for _, node := range nodes { ret[node.ID] = node diff --git a/hscontrol/types/node_test.go b/hscontrol/types/node_test.go index 712a839e..885edf5d 100644 --- a/hscontrol/types/node_test.go +++ b/hscontrol/types/node_test.go @@ -6,12 +6,17 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/util" "tailscale.com/tailcfg" "tailscale.com/types/key" ) func Test_NodeCanAccess(t *testing.T) { + iap := func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip + } tests := []struct { name string node1 Node @@ -22,10 +27,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "no-rules", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + IPv4: iap("10.0.0.1"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + IPv4: iap("10.0.0.2"), }, rules: []tailcfg.FilterRule{}, want: false, @@ -33,10 +38,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "wildcard", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + IPv4: iap("10.0.0.1"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + IPv4: iap("10.0.0.2"), }, rules: []tailcfg.FilterRule{ { @@ -54,10 +59,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "other-cant-access-src", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, + IPv4: iap("100.64.0.1"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, + IPv4: iap("100.64.0.3"), }, rules: []tailcfg.FilterRule{ { @@ -72,10 +77,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "dest-cant-access-src", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, + IPv4: iap("100.64.0.3"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, + IPv4: iap("100.64.0.2"), }, rules: []tailcfg.FilterRule{ { @@ -90,10 +95,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "src-can-access-dest", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, + IPv4: iap("100.64.0.2"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, + IPv4: iap("100.64.0.3"), }, rules: []tailcfg.FilterRule{ { @@ -118,41 +123,91 @@ func Test_NodeCanAccess(t *testing.T) { } } -func TestNodeAddressesOrder(t *testing.T) { - machineAddresses := NodeAddresses{ - netip.MustParseAddr("2001:db8::2"), - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("2001:db8::1"), - netip.MustParseAddr("100.64.0.1"), - } - - strSlice := machineAddresses.StringSlice() - expected := []string{ - "100.64.0.1", - "100.64.0.2", - "2001:db8::1", - "2001:db8::2", - } - - if len(strSlice) != len(expected) { - t.Fatalf("unexpected slice length: got %v, want %v", len(strSlice), len(expected)) - } - for i, addr := range strSlice { - if addr != expected[i] { - t.Errorf("unexpected address at index %v: got %v, want %v", i, addr, expected[i]) - } - } -} - func TestNodeFQDN(t *testing.T) { tests := []struct { name string node Node - dns tailcfg.DNSConfig + cfg Config domain string want string wantErr string }{ + { + name: "all-set-with-username", + node: Node{ + GivenName: "test", + User: User{ + Name: "user", + }, + }, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + DNSUserNameInMagicDNS: true, + }, + domain: "example.com", + want: "test.user.example.com", + }, + { + name: "no-given-name-with-username", + node: Node{ + User: User{ + Name: "user", + }, + }, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + DNSUserNameInMagicDNS: true, + }, + domain: "example.com", + wantErr: "failed to create valid FQDN: node has no given name", + }, + { + name: "no-user-name-with-username", + node: Node{ + GivenName: "test", + User: User{}, + }, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + DNSUserNameInMagicDNS: true, + }, + domain: "example.com", + wantErr: "failed to create valid FQDN: node user has no name", + }, + { + name: "no-magic-dns-with-username", + node: Node{ + GivenName: "test", + User: User{ + Name: "user", + }, + }, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: false, + }, + DNSUserNameInMagicDNS: true, + }, + domain: "example.com", + want: "test.user.example.com", + }, + { + name: "no-dnsconfig-with-username", + node: Node{ + GivenName: "test", + User: User{ + Name: "user", + }, + }, + domain: "example.com", + want: "test.example.com", + }, { name: "all-set", node: Node{ @@ -161,11 +216,14 @@ func TestNodeFQDN(t *testing.T) { Name: "user", }, }, - dns: tailcfg.DNSConfig{ - Proxied: true, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + DNSUserNameInMagicDNS: false, }, domain: "example.com", - want: "test.user.example.com", + want: "test.example.com", }, { name: "no-given-name", @@ -174,8 +232,11 @@ func TestNodeFQDN(t *testing.T) { Name: "user", }, }, - dns: tailcfg.DNSConfig{ - Proxied: true, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + DNSUserNameInMagicDNS: false, }, domain: "example.com", wantErr: "failed to create valid FQDN: node has no given name", @@ -186,11 +247,14 @@ func TestNodeFQDN(t *testing.T) { GivenName: "test", User: User{}, }, - dns: tailcfg.DNSConfig{ - Proxied: true, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: true, + }, + DNSUserNameInMagicDNS: false, }, - domain: "example.com", - wantErr: "failed to create valid FQDN: node user has no name", + domain: "example.com", + want: "test.example.com", }, { name: "no-magic-dns", @@ -200,11 +264,14 @@ func TestNodeFQDN(t *testing.T) { Name: "user", }, }, - dns: tailcfg.DNSConfig{ - Proxied: false, + cfg: Config{ + DNSConfig: &tailcfg.DNSConfig{ + Proxied: false, + }, + DNSUserNameInMagicDNS: false, }, domain: "example.com", - want: "test", + want: "test.example.com", }, { name: "no-dnsconfig", @@ -215,13 +282,13 @@ func TestNodeFQDN(t *testing.T) { }, }, domain: "example.com", - want: "test", + want: "test.example.com", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got, err := tc.node.GetFQDN(&tc.dns, tc.domain) + got, err := tc.node.GetFQDN(&tc.cfg, tc.domain) if (err != nil) && (err.Error() != tc.wantErr) { t.Errorf("GetFQDN() error = %s, wantErr %s", err, tc.wantErr) @@ -474,3 +541,53 @@ func TestApplyPeerChange(t *testing.T) { }) } } + +func TestNodeRegisterMethodToV1Enum(t *testing.T) { + tests := []struct { + name string + node Node + want v1.RegisterMethod + }{ + { + name: "authkey", + node: Node{ + ID: 1, + RegisterMethod: util.RegisterMethodAuthKey, + }, + want: v1.RegisterMethod_REGISTER_METHOD_AUTH_KEY, + }, + { + name: "oidc", + node: Node{ + ID: 1, + RegisterMethod: util.RegisterMethodOIDC, + }, + want: v1.RegisterMethod_REGISTER_METHOD_OIDC, + }, + { + name: "cli", + node: Node{ + ID: 1, + RegisterMethod: util.RegisterMethodCLI, + }, + want: v1.RegisterMethod_REGISTER_METHOD_CLI, + }, + { + name: "unknown", + node: Node{ + ID: 0, + }, + want: v1.RegisterMethod_REGISTER_METHOD_UNSPECIFIED, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.node.RegisterMethodToV1Enum() + + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("RegisterMethodToV1Enum() unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/hscontrol/types/policy.go b/hscontrol/types/policy.go new file mode 100644 index 00000000..a30bf640 --- /dev/null +++ b/hscontrol/types/policy.go @@ -0,0 +1,20 @@ +package types + +import ( + "errors" + + "gorm.io/gorm" +) + +var ( + ErrPolicyNotFound = errors.New("acl policy not found") + ErrPolicyUpdateIsDisabled = errors.New("update is disabled for modes other than 'database'") +) + +// Policy represents a policy in the database. +type Policy struct { + gorm.Model + + // Data contains the policy in HuJSON format. + Data string +} diff --git a/hscontrol/types/preauth_key.go b/hscontrol/types/preauth_key.go index 0d8c9cff..8b02569a 100644 --- a/hscontrol/types/preauth_key.go +++ b/hscontrol/types/preauth_key.go @@ -14,11 +14,11 @@ type PreAuthKey struct { ID uint64 `gorm:"primary_key"` Key string UserID uint - User User + User User `gorm:"constraint:OnDelete:CASCADE;"` Reusable bool - Ephemeral bool `gorm:"default:false"` - Used bool `gorm:"default:false"` - ACLTags []PreAuthKeyACLTag + Ephemeral bool `gorm:"default:false"` + Used bool `gorm:"default:false"` + ACLTags []PreAuthKeyACLTag `gorm:"constraint:OnDelete:CASCADE;"` CreatedAt *time.Time Expiration *time.Time diff --git a/hscontrol/types/testdata/base-domain-in-server-url.yaml b/hscontrol/types/testdata/base-domain-in-server-url.yaml new file mode 100644 index 00000000..683e0218 --- /dev/null +++ b/hscontrol/types/testdata/base-domain-in-server-url.yaml @@ -0,0 +1,16 @@ +noise: + private_key_path: "private_key.pem" + +prefixes: + v6: fd7a:115c:a1e0::/48 + v4: 100.64.0.0/10 + +database: + type: sqlite3 + +server_url: "https://derp.no" + +dns: + magic_dns: true + base_domain: derp.no + use_username_in_magic_dns: false diff --git a/hscontrol/types/testdata/base-domain-not-in-server-url.yaml b/hscontrol/types/testdata/base-domain-not-in-server-url.yaml new file mode 100644 index 00000000..3af345e1 --- /dev/null +++ b/hscontrol/types/testdata/base-domain-not-in-server-url.yaml @@ -0,0 +1,16 @@ +noise: + private_key_path: "private_key.pem" + +prefixes: + v6: fd7a:115c:a1e0::/48 + v4: 100.64.0.0/10 + +database: + type: sqlite3 + +server_url: "https://derp.no" + +dns: + magic_dns: true + base_domain: clients.derp.no + use_username_in_magic_dns: false diff --git a/hscontrol/types/testdata/dns_full.yaml b/hscontrol/types/testdata/dns_full.yaml new file mode 100644 index 00000000..c47e7b0f --- /dev/null +++ b/hscontrol/types/testdata/dns_full.yaml @@ -0,0 +1,37 @@ +# minimum to not fatal +noise: + private_key_path: "private_key.pem" +server_url: "https://derp.no" + +dns: + magic_dns: true + base_domain: example.com + + nameservers: + global: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 + - https://dns.nextdns.io/abc123 + + split: + foo.bar.com: + - 1.1.1.1 + darp.headscale.net: + - 1.1.1.1 + - 8.8.8.8 + + search_domains: + - test.com + - bar.com + + extra_records: + - name: "grafana.myvpn.example.com" + type: "A" + value: "100.64.0.3" + + # you can also put it in one line + - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.4" } + + use_username_in_magic_dns: true diff --git a/hscontrol/types/testdata/dns_full_no_magic.yaml b/hscontrol/types/testdata/dns_full_no_magic.yaml new file mode 100644 index 00000000..ac3cc470 --- /dev/null +++ b/hscontrol/types/testdata/dns_full_no_magic.yaml @@ -0,0 +1,37 @@ +# minimum to not fatal +noise: + private_key_path: "private_key.pem" +server_url: "https://derp.no" + +dns: + magic_dns: false + base_domain: example.com + + nameservers: + global: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 + - https://dns.nextdns.io/abc123 + + split: + foo.bar.com: + - 1.1.1.1 + darp.headscale.net: + - 1.1.1.1 + - 8.8.8.8 + + search_domains: + - test.com + - bar.com + + extra_records: + - name: "grafana.myvpn.example.com" + type: "A" + value: "100.64.0.3" + + # you can also put it in one line + - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.4" } + + use_username_in_magic_dns: true diff --git a/hscontrol/types/testdata/minimal.yaml b/hscontrol/types/testdata/minimal.yaml new file mode 100644 index 00000000..1d9b1e00 --- /dev/null +++ b/hscontrol/types/testdata/minimal.yaml @@ -0,0 +1,3 @@ +noise: + private_key_path: "private_key.pem" +server_url: "https://derp.no" diff --git a/hscontrol/types/testdata/policy-path-is-loaded.yaml b/hscontrol/types/testdata/policy-path-is-loaded.yaml new file mode 100644 index 00000000..da0d29cd --- /dev/null +++ b/hscontrol/types/testdata/policy-path-is-loaded.yaml @@ -0,0 +1,18 @@ +noise: + private_key_path: "private_key.pem" + +prefixes: + v6: fd7a:115c:a1e0::/48 + v4: 100.64.0.0/10 + +database: + type: sqlite3 + +server_url: "https://derp.no" + +acl_policy_path: "/etc/acl_policy.yaml" +policy: + type: file + path: "/etc/policy.hujson" + +dns.magic_dns: false diff --git a/hscontrol/types/users.go b/hscontrol/types/users.go index 0b8324f2..63e73a56 100644 --- a/hscontrol/types/users.go +++ b/hscontrol/types/users.go @@ -19,32 +19,46 @@ type User struct { Name string `gorm:"unique"` } -func (n *User) TailscaleUser() *tailcfg.User { +// TODO(kradalby): See if we can fill in Gravatar here +func (u *User) profilePicURL() string { + return "" +} + +func (u *User) TailscaleUser() *tailcfg.User { user := tailcfg.User{ - ID: tailcfg.UserID(n.ID), - LoginName: n.Name, - DisplayName: n.Name, - // TODO(kradalby): See if we can fill in Gravatar here - ProfilePicURL: "", + ID: tailcfg.UserID(u.ID), + LoginName: u.Name, + DisplayName: u.Name, + ProfilePicURL: u.profilePicURL(), Logins: []tailcfg.LoginID{}, - Created: n.CreatedAt, + Created: u.CreatedAt, } return &user } -func (n *User) TailscaleLogin() *tailcfg.Login { +func (u *User) TailscaleLogin() *tailcfg.Login { login := tailcfg.Login{ - ID: tailcfg.LoginID(n.ID), - LoginName: n.Name, - DisplayName: n.Name, - // TODO(kradalby): See if we can fill in Gravatar here - ProfilePicURL: "", + ID: tailcfg.LoginID(u.ID), + // TODO(kradalby): this should reflect registration method. + Provider: "", + LoginName: u.Name, + DisplayName: u.Name, + ProfilePicURL: u.profilePicURL(), } return &login } +func (u *User) TailscaleUserProfile() tailcfg.UserProfile { + return tailcfg.UserProfile{ + ID: tailcfg.UserID(u.ID), + LoginName: u.Name, + DisplayName: u.Name, + ProfilePicURL: u.profilePicURL(), + } +} + func (n *User) Proto() *v1.User { return &v1.User{ Id: strconv.FormatUint(uint64(n.ID), util.Base10), diff --git a/hscontrol/util/dns.go b/hscontrol/util/dns.go index c6bd2b69..ab3c90b7 100644 --- a/hscontrol/util/dns.go +++ b/hscontrol/util/dns.go @@ -103,33 +103,7 @@ func CheckForFQDNRules(name string) error { // From the netmask we can find out the wildcard bits (the bits that are not set in the netmask). // This allows us to then calculate the subnets included in the subsequent class block and generate the entries. -func GenerateMagicDNSRootDomains(ipPrefixes []netip.Prefix) []dnsname.FQDN { - fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes)) - for _, ipPrefix := range ipPrefixes { - var generateDNSRoot func(netip.Prefix) []dnsname.FQDN - switch ipPrefix.Addr().BitLen() { - case ipv4AddressLength: - generateDNSRoot = generateIPv4DNSRootDomain - - case ipv6AddressLength: - generateDNSRoot = generateIPv6DNSRootDomain - - default: - panic( - fmt.Sprintf( - "unsupported IP version with address length %d", - ipPrefix.Addr().BitLen(), - ), - ) - } - - fqdns = append(fqdns, generateDNSRoot(ipPrefix)...) - } - - return fqdns -} - -func generateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { +func GenerateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { // Conversion to the std lib net.IPnet, a bit easier to operate netRange := netipx.PrefixIPNet(ipPrefix) maskBits, _ := netRange.Mask.Size() @@ -165,7 +139,27 @@ func generateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { return fqdns } -func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { +// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`. +// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS +// server (listening in 100.100.100.100 udp/53) should be used for. +// +// Tailscale.com includes in the list: +// - the `BaseDomain` of the user +// - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6) +// - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`. +// In the public SaaS this is [64-127].100.in-addr.arpa. +// +// The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this +// is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the +// subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet. +// +// How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this, +// and do not make use of RFC2317 ("Classless IN-ADDR.ARPA delegation") - hence generating the entries for the next +// class block only. + +// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask). +// This allows us to then calculate the subnets included in the subsequent class block and generate the entries. +func GenerateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { const nibbleLen = 4 maskBits, _ := netipx.PrefixIPNet(ipPrefix).Mask.Size() diff --git a/hscontrol/util/dns_test.go b/hscontrol/util/dns_test.go index 9d9b08b3..2559cae6 100644 --- a/hscontrol/util/dns_test.go +++ b/hscontrol/util/dns_test.go @@ -148,10 +148,7 @@ func TestCheckForFQDNRules(t *testing.T) { } func TestMagicDNSRootDomains100(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("100.64.0.0/10"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix("100.64.0.0/10")) found := false for _, domain := range domains { @@ -185,10 +182,7 @@ func TestMagicDNSRootDomains100(t *testing.T) { } func TestMagicDNSRootDomains172(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("172.16.0.0/16"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix("172.16.0.0/16")) found := false for _, domain := range domains { @@ -213,20 +207,14 @@ func TestMagicDNSRootDomains172(t *testing.T) { // Happens when netmask is a multiple of 4 bits (sounds likely). func TestMagicDNSRootDomainsIPv6Single(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("fd7a:115c:a1e0::/48"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix("fd7a:115c:a1e0::/48")) assert.Len(t, domains, 1) assert.Equal(t, "0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.", domains[0].WithTrailingDot()) } func TestMagicDNSRootDomainsIPv6SingleMultiple(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("fd7a:115c:a1e0::/50"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix("fd7a:115c:a1e0::/50")) yieldsRoot := func(dom string) bool { for _, candidate := range domains { diff --git a/hscontrol/util/log.go b/hscontrol/util/log.go index 41d667d1..12f646b1 100644 --- a/hscontrol/util/log.go +++ b/hscontrol/util/log.go @@ -1,7 +1,14 @@ package util import ( + "context" + "errors" + "time" + + "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "gorm.io/gorm" + gormLogger "gorm.io/gorm/logger" "tailscale.com/types/logger" ) @@ -14,3 +21,71 @@ func TSLogfWrapper() logger.Logf { log.Debug().Caller().Msgf(format, args...) } } + +type DBLogWrapper struct { + Logger *zerolog.Logger + Level zerolog.Level + Event *zerolog.Event + SlowThreshold time.Duration + SkipErrRecordNotFound bool + ParameterizedQueries bool +} + +func NewDBLogWrapper(origin *zerolog.Logger, slowThreshold time.Duration, skipErrRecordNotFound bool, parameterizedQueries bool) *DBLogWrapper { + l := &DBLogWrapper{ + Logger: origin, + Level: origin.GetLevel(), + SlowThreshold: slowThreshold, + SkipErrRecordNotFound: skipErrRecordNotFound, + ParameterizedQueries: parameterizedQueries, + } + + return l +} + +type DBLogWrapperOption func(*DBLogWrapper) + +func (l *DBLogWrapper) LogMode(gormLogger.LogLevel) gormLogger.Interface { + return l +} + +func (l *DBLogWrapper) Info(ctx context.Context, msg string, data ...interface{}) { + l.Logger.Info().Msgf(msg, data...) +} + +func (l *DBLogWrapper) Warn(ctx context.Context, msg string, data ...interface{}) { + l.Logger.Warn().Msgf(msg, data...) +} + +func (l *DBLogWrapper) Error(ctx context.Context, msg string, data ...interface{}) { + l.Logger.Error().Msgf(msg, data...) +} + +func (l *DBLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) { + elapsed := time.Since(begin) + sql, rowsAffected := fc() + fields := map[string]interface{}{ + "duration": elapsed, + "sql": sql, + "rowsAffected": rowsAffected, + } + + if err != nil && !(errors.Is(err, gorm.ErrRecordNotFound) && l.SkipErrRecordNotFound) { + l.Logger.Error().Err(err).Fields(fields).Msgf("") + return + } + + if l.SlowThreshold != 0 && elapsed > l.SlowThreshold { + l.Logger.Warn().Fields(fields).Msgf("") + return + } + + l.Logger.Debug().Fields(fields).Msgf("") +} + +func (l *DBLogWrapper) ParamsFilter(ctx context.Context, sql string, params ...interface{}) (string, []interface{}) { + if l.ParameterizedQueries { + return sql, nil + } + return sql, params +} diff --git a/hscontrol/util/net.go b/hscontrol/util/net.go index b704c936..c44b7287 100644 --- a/hscontrol/util/net.go +++ b/hscontrol/util/net.go @@ -1,8 +1,10 @@ package util import ( + "cmp" "context" "net" + "net/netip" ) func GrpcSocketDialer(ctx context.Context, addr string) (net.Conn, error) { @@ -10,3 +12,20 @@ func GrpcSocketDialer(ctx context.Context, addr string) (net.Conn, error) { return d.DialContext(ctx, "unix", addr) } + + +// TODO(kradalby): Remove after go 1.24, will be in stdlib. +// Compare returns an integer comparing two prefixes. +// The result will be 0 if p == p2, -1 if p < p2, and +1 if p > p2. +// Prefixes sort first by validity (invalid before valid), then +// address family (IPv4 before IPv6), then prefix length, then +// address. +func ComparePrefix(p, p2 netip.Prefix) int { + if c := cmp.Compare(p.Addr().BitLen(), p2.Addr().BitLen()); c != 0 { + return c + } + if c := cmp.Compare(p.Bits(), p2.Bits()); c != 0 { + return c + } + return p.Addr().Compare(p2.Addr()) +} diff --git a/hscontrol/util/string.go b/hscontrol/util/string.go index 6f018aff..ce38b82e 100644 --- a/hscontrol/util/string.go +++ b/hscontrol/util/string.go @@ -56,16 +56,6 @@ func GenerateRandomStringDNSSafe(size int) (string, error) { return str[:size], nil } -func IsStringInSlice(slice []string, str string) bool { - for _, s := range slice { - if s == str { - return true - } - } - - return false -} - func TailNodesToString(nodes []*tailcfg.Node) string { temp := make([]string, len(nodes)) diff --git a/hscontrol/util/test.go b/hscontrol/util/test.go index 0a23acb4..d93ae1f2 100644 --- a/hscontrol/util/test.go +++ b/hscontrol/util/test.go @@ -4,7 +4,9 @@ import ( "net/netip" "github.com/google/go-cmp/cmp" + "tailscale.com/types/ipproto" "tailscale.com/types/key" + "tailscale.com/types/views" ) var PrefixComparer = cmp.Comparer(func(x, y netip.Prefix) bool { @@ -31,6 +33,8 @@ var DkeyComparer = cmp.Comparer(func(x, y key.DiscoPublic) bool { return x.String() == y.String() }) +var ViewSliceIPProtoComparer = cmp.Comparer(func(a, b views.Slice[ipproto.Proto]) bool { return views.SliceEqual(a, b) }) + var Comparers []cmp.Option = []cmp.Option{ - IPComparer, PrefixComparer, AddrPortComparer, MkeyComparer, NkeyComparer, DkeyComparer, + IPComparer, PrefixComparer, AddrPortComparer, MkeyComparer, NkeyComparer, DkeyComparer, ViewSliceIPProtoComparer, } diff --git a/integration/acl_test.go b/integration/acl_test.go index 517e2dfb..f7b59eb7 100644 --- a/integration/acl_test.go +++ b/integration/acl_test.go @@ -1,11 +1,13 @@ package integration import ( + "encoding/json" "fmt" "net/netip" "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" @@ -51,7 +53,7 @@ func aclScenario( clientsPerUser int, ) *Scenario { t.Helper() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) spec := map[string]int{ @@ -264,7 +266,7 @@ func TestACLHostsInNetMapTable(t *testing.T) { for name, testCase := range tests { t.Run(name, func(t *testing.T) { - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) spec := testCase.users @@ -1012,3 +1014,156 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { }) } } + +func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + "user1": 1, + "user2": 1, + } + + err = scenario.CreateHeadscaleEnv(spec, + []tsic.Option{ + // Alpine containers dont have ip6tables set up, which causes + // tailscaled to stop configuring the wgengine, causing it + // to not configure DNS. + tsic.WithNetfilter("off"), + tsic.WithDockerEntrypoint([]string{ + "/bin/sh", + "-c", + "/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev", + }), + tsic.WithDockerWorkdir("/"), + }, + hsic.WithTestName("policyreload"), + hsic.WithConfigEnv(map[string]string{ + "HEADSCALE_POLICY_MODE": "database", + }), + ) + assertNoErr(t, err) + + _, err = scenario.ListTailscaleClientsFQDNs() + assertNoErrListFQDN(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + user1Clients, err := scenario.ListTailscaleClients("user1") + assertNoErr(t, err) + + user2Clients, err := scenario.ListTailscaleClients("user2") + assertNoErr(t, err) + + all := append(user1Clients, user2Clients...) + + // Initially all nodes can reach each other + for _, client := range all { + for _, peer := range all { + if client.ID() == peer.ID() { + continue + } + + fqdn, err := peer.FQDN() + assertNoErr(t, err) + + url := fmt.Sprintf("http://%s/etc/hostname", fqdn) + t.Logf("url from %s to %s", client.Hostname(), url) + + result, err := client.Curl(url) + assert.Len(t, result, 13) + assertNoErr(t, err) + } + } + + headscale, err := scenario.Headscale() + assertNoErr(t, err) + + p := policy.ACLPolicy{ + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"user1"}, + Destinations: []string{"user2:*"}, + }, + }, + Hosts: policy.Hosts{}, + } + + pBytes, _ := json.Marshal(p) + + policyFilePath := "/etc/headscale/policy.json" + + err = headscale.WriteFile(policyFilePath, pBytes) + assertNoErr(t, err) + + // No policy is present at this time. + // Add a new policy from a file. + _, err = headscale.Execute( + []string{ + "headscale", + "policy", + "set", + "-f", + policyFilePath, + }, + ) + assertNoErr(t, err) + + // Get the current policy and check + // if it is the same as the one we set. + var output *policy.ACLPolicy + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "policy", + "get", + "--output", + "json", + }, + &output, + ) + assertNoErr(t, err) + + assert.Len(t, output.ACLs, 1) + + if diff := cmp.Diff(p, *output); diff != "" { + t.Errorf("unexpected policy(-want +got):\n%s", diff) + } + + // Test that user1 can visit all user2 + for _, client := range user1Clients { + for _, peer := range user2Clients { + fqdn, err := peer.FQDN() + assertNoErr(t, err) + + url := fmt.Sprintf("http://%s/etc/hostname", fqdn) + t.Logf("url from %s to %s", client.Hostname(), url) + + result, err := client.Curl(url) + assert.Len(t, result, 13) + assertNoErr(t, err) + } + } + + // Test that user2 _cannot_ visit user1 + for _, client := range user2Clients { + for _, peer := range user1Clients { + fqdn, err := peer.FQDN() + assertNoErr(t, err) + + url := fmt.Sprintf("http://%s/etc/hostname", fqdn) + t.Logf("url from %s to %s", client.Hostname(), url) + + result, err := client.Curl(url) + assert.Empty(t, result) + assert.Error(t, err) + } + } +} diff --git a/integration/auth_oidc_test.go b/integration/auth_oidc_test.go index 36e74a8d..d24bf452 100644 --- a/integration/auth_oidc_test.go +++ b/integration/auth_oidc_test.go @@ -42,7 +42,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { IntegrationSkip(t) t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) scenario := AuthOIDCScenario{ @@ -83,7 +83,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -100,7 +100,7 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { shortAccessTTL := 5 * time.Minute - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) baseScenario.pool.MaxWait = 5 * time.Minute @@ -142,7 +142,7 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() diff --git a/integration/auth_web_flow_test.go b/integration/auth_web_flow_test.go index aa589fac..8e121ca0 100644 --- a/integration/auth_web_flow_test.go +++ b/integration/auth_web_flow_test.go @@ -26,7 +26,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { IntegrationSkip(t) t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) if err != nil { t.Fatalf("failed to create scenario: %s", err) } @@ -53,7 +53,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -67,7 +67,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { IntegrationSkip(t) t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) scenario := AuthWebFlowScenario{ @@ -92,7 +92,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() diff --git a/integration/cli_test.go b/integration/cli_test.go index af7b073b..fd7a8c1b 100644 --- a/integration/cli_test.go +++ b/integration/cli_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "sort" + "strings" "testing" "time" @@ -32,7 +33,7 @@ func TestUserCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -112,7 +113,7 @@ func TestPreAuthKeyCommand(t *testing.T) { user := "preauthkeyspace" count := 3 - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -254,7 +255,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { user := "pre-auth-key-without-exp-user" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -317,7 +318,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { user := "pre-auth-key-reus-ephm-user" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -388,13 +389,108 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { assert.Len(t, listedPreAuthKeys, 3) } +func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + user1 := "user1" + user2 := "user2" + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + user1: 1, + user2: 0, + } + + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak")) + assertNoErr(t, err) + + headscale, err := scenario.Headscale() + assertNoErr(t, err) + + var user2Key v1.PreAuthKey + + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "preauthkeys", + "--user", + user2, + "create", + "--reusable", + "--expiration", + "24h", + "--output", + "json", + "--tags", + "tag:test1,tag:test2", + }, + &user2Key, + ) + assertNoErr(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + assert.Len(t, allClients, 1) + + client := allClients[0] + + // Log out from user1 + err = client.Logout() + assertNoErr(t, err) + + err = scenario.WaitForTailscaleLogout() + assertNoErr(t, err) + + status, err := client.Status() + assertNoErr(t, err) + if status.BackendState == "Starting" || status.BackendState == "Running" { + t.Fatalf("expected node to be logged out, backend state: %s", status.BackendState) + } + + err = client.Login(headscale.GetEndpoint(), user2Key.GetKey()) + assertNoErr(t, err) + + status, err = client.Status() + assertNoErr(t, err) + if status.BackendState != "Running" { + t.Fatalf("expected node to be logged in, backend state: %s", status.BackendState) + } + + if status.Self.UserID.String() != "userid:2" { + t.Fatalf("expected node to be logged in as userid:2, got: %s", status.Self.UserID.String()) + } + + var listNodes []v1.Node + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "nodes", + "list", + "--output", + "json", + }, + &listNodes, + ) + assert.Nil(t, err) + assert.Len(t, listNodes, 1) + + assert.Equal(t, "user2", listNodes[0].GetUser().GetName()) +} + func TestApiKeyCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() count := 5 - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -562,7 +658,7 @@ func TestNodeTagCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -640,13 +736,7 @@ func TestNodeTagCommand(t *testing.T) { assert.Equal(t, []string{"tag:test"}, node.GetForcedTags()) - // try to set a wrong tag and retrieve the error - type errOutput struct { - Error string `json:"error"` - } - var errorOutput errOutput - err = executeAndUnmarshal( - headscale, + _, err = headscale.Execute( []string{ "headscale", "nodes", @@ -655,10 +745,8 @@ func TestNodeTagCommand(t *testing.T) { "-t", "wrong-tag", "--output", "json", }, - &errorOutput, ) - assert.Nil(t, err) - assert.Contains(t, errorOutput.Error, "tag must start with the string 'tag:'") + assert.ErrorContains(t, err, "tag must start with the string 'tag:'") // Test list all nodes after added seconds resultMachines := make([]*v1.Node, len(machineKeys)) @@ -695,7 +783,7 @@ func TestNodeAdvertiseTagNoACLCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -745,7 +833,7 @@ func TestNodeAdvertiseTagWithACLCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -808,7 +896,7 @@ func TestNodeCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1049,7 +1137,7 @@ func TestNodeExpireCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1176,7 +1264,7 @@ func TestNodeRenameCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1303,18 +1391,17 @@ func TestNodeRenameCommand(t *testing.T) { assert.Contains(t, listAllAfterRename[4].GetGivenName(), "node-5") // Test failure for too long names - result, err := headscale.Execute( + _, err = headscale.Execute( []string{ "headscale", "nodes", "rename", "--identifier", fmt.Sprintf("%d", listAll[4].GetId()), - "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine12345678901234567890", + strings.Repeat("t", 64), }, ) - assert.Nil(t, err) - assert.Contains(t, result, "not be over 63 chars") + assert.ErrorContains(t, err, "not be over 63 chars") var listAllAfterRenameAttempt []v1.Node err = executeAndUnmarshal( @@ -1343,7 +1430,7 @@ func TestNodeMoveCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1441,7 +1528,7 @@ func TestNodeMoveCommand(t *testing.T) { assert.Equal(t, allNodes[0].GetUser(), node.GetUser()) assert.Equal(t, allNodes[0].GetUser().GetName(), "new-user") - moveToNonExistingNSResult, err := headscale.Execute( + _, err = headscale.Execute( []string{ "headscale", "nodes", @@ -1454,11 +1541,9 @@ func TestNodeMoveCommand(t *testing.T) { "json", }, ) - assert.Nil(t, err) - - assert.Contains( + assert.ErrorContains( t, - moveToNonExistingNSResult, + err, "user not found", ) assert.Equal(t, node.GetUser().GetName(), "new-user") @@ -1501,3 +1586,157 @@ func TestNodeMoveCommand(t *testing.T) { assert.Equal(t, node.GetUser().GetName(), "old-user") } + +func TestPolicyCommand(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + "policy-user": 0, + } + + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clins"), + hsic.WithConfigEnv(map[string]string{ + "HEADSCALE_POLICY_MODE": "database", + }), + ) + assertNoErr(t, err) + + headscale, err := scenario.Headscale() + assertNoErr(t, err) + + p := policy.ACLPolicy{ + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + TagOwners: map[string][]string{ + "tag:exists": {"policy-user"}, + }, + } + + pBytes, _ := json.Marshal(p) + + policyFilePath := "/etc/headscale/policy.json" + + err = headscale.WriteFile(policyFilePath, pBytes) + assertNoErr(t, err) + + // No policy is present at this time. + // Add a new policy from a file. + _, err = headscale.Execute( + []string{ + "headscale", + "policy", + "set", + "-f", + policyFilePath, + }, + ) + + assertNoErr(t, err) + + // Get the current policy and check + // if it is the same as the one we set. + var output *policy.ACLPolicy + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "policy", + "get", + "--output", + "json", + }, + &output, + ) + assertNoErr(t, err) + + assert.Len(t, output.TagOwners, 1) + assert.Len(t, output.ACLs, 1) + assert.Equal(t, output.TagOwners["tag:exists"], []string{"policy-user"}) +} + +func TestPolicyBrokenConfigCommand(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + "policy-user": 1, + } + + err = scenario.CreateHeadscaleEnv( + spec, + []tsic.Option{}, + hsic.WithTestName("clins"), + hsic.WithConfigEnv(map[string]string{ + "HEADSCALE_POLICY_MODE": "database", + }), + ) + assertNoErr(t, err) + + headscale, err := scenario.Headscale() + assertNoErr(t, err) + + p := policy.ACLPolicy{ + ACLs: []policy.ACL{ + { + // This is an unknown action, so it will return an error + // and the config will not be applied. + Action: "acccept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + TagOwners: map[string][]string{ + "tag:exists": {"policy-user"}, + }, + } + + pBytes, _ := json.Marshal(p) + + policyFilePath := "/etc/headscale/policy.json" + + err = headscale.WriteFile(policyFilePath, pBytes) + assertNoErr(t, err) + + // No policy is present at this time. + // Add a new policy from a file. + _, err = headscale.Execute( + []string{ + "headscale", + "policy", + "set", + "-f", + policyFilePath, + }, + ) + assert.ErrorContains(t, err, "verifying policy rules: invalid action") + + // The new policy was invalid, the old one should still be in place, which + // is none. + _, err = headscale.Execute( + []string{ + "headscale", + "policy", + "get", + "--output", + "json", + }, + ) + assert.ErrorContains(t, err, "acl policy not found") +} diff --git a/integration/control.go b/integration/control.go index f5557495..b5699577 100644 --- a/integration/control.go +++ b/integration/control.go @@ -6,10 +6,11 @@ import ( ) type ControlServer interface { - Shutdown() error - SaveLog(string) error + Shutdown() (string, string, error) + SaveLog(string) (string, string, error) SaveProfile(string) error Execute(command []string) (string, error) + WriteFile(path string, content []byte) error ConnectToNetwork(network *dockertest.Network) error GetHealthEndpoint() string GetEndpoint() string diff --git a/integration/dns_test.go b/integration/dns_test.go new file mode 100644 index 00000000..f7973300 --- /dev/null +++ b/integration/dns_test.go @@ -0,0 +1,246 @@ +package integration + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/juanfont/headscale/integration/hsic" + "github.com/juanfont/headscale/integration/tsic" + "github.com/stretchr/testify/assert" +) + +func TestResolveMagicDNS(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + "magicdns1": len(MustTestVersions), + "magicdns2": len(MustTestVersions), + } + + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns")) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + // assertClientsState(t, allClients) + + // Poor mans cache + _, err = scenario.ListTailscaleClientsFQDNs() + assertNoErrListFQDN(t, err) + + _, err = scenario.ListTailscaleClientsIPs() + assertNoErrListClientIPs(t, err) + + for _, client := range allClients { + for _, peer := range allClients { + // It is safe to ignore this error as we handled it when caching it + peerFQDN, _ := peer.FQDN() + + assert.Equal(t, fmt.Sprintf("%s.headscale.net", peer.Hostname()), peerFQDN) + + command := []string{ + "tailscale", + "ip", peerFQDN, + } + result, _, err := client.Execute(command) + if err != nil { + t.Fatalf( + "failed to execute resolve/ip command %s from %s: %s", + peerFQDN, + client.Hostname(), + err, + ) + } + + ips, err := peer.IPs() + if err != nil { + t.Fatalf( + "failed to get ips for %s: %s", + peer.Hostname(), + err, + ) + } + + for _, ip := range ips { + if !strings.Contains(result, ip.String()) { + t.Fatalf("ip %s is not found in \n%s\n", ip.String(), result) + } + } + } + } +} + +// TestValidateResolvConf validates that the resolv.conf file +// ends up as expected in our Tailscale containers. +// All the containers are based on Alpine, meaning Tailscale +// will overwrite the resolv.conf file. +// On other platform, Tailscale will integrate with a dns manager +// if available (like systemd-resolved). +func TestValidateResolvConf(t *testing.T) { + IntegrationSkip(t) + + resolvconf := func(conf string) string { + return strings.ReplaceAll(`# resolv.conf(5) file generated by tailscale +# For more info, see https://tailscale.com/s/resolvconf-overwrite +# DO NOT EDIT THIS FILE BY HAND -- CHANGES WILL BE OVERWRITTEN +`+conf, "\t", "") + } + + tests := []struct { + name string + conf map[string]string + wantConfCompareFunc func(*testing.T, string) + }{ + // New config + { + name: "no-config", + conf: map[string]string{ + "HEADSCALE_DNS_BASE_DOMAIN": "", + "HEADSCALE_DNS_MAGIC_DNS": "false", + "HEADSCALE_DNS_NAMESERVERS_GLOBAL": "", + }, + wantConfCompareFunc: func(t *testing.T, got string) { + assert.NotContains(t, got, "100.100.100.100") + }, + }, + { + name: "global-only", + conf: map[string]string{ + "HEADSCALE_DNS_BASE_DOMAIN": "", + "HEADSCALE_DNS_MAGIC_DNS": "false", + "HEADSCALE_DNS_NAMESERVERS_GLOBAL": "8.8.8.8 1.1.1.1", + }, + wantConfCompareFunc: func(t *testing.T, got string) { + want := resolvconf(` + nameserver 100.100.100.100 + `) + assert.Equal(t, want, got) + }, + }, + { + name: "base-integration-config", + conf: map[string]string{ + "HEADSCALE_DNS_BASE_DOMAIN": "very-unique-domain.net", + }, + wantConfCompareFunc: func(t *testing.T, got string) { + want := resolvconf(` + nameserver 100.100.100.100 + search very-unique-domain.net + `) + assert.Equal(t, want, got) + }, + }, + { + name: "base-magic-dns-off", + conf: map[string]string{ + "HEADSCALE_DNS_MAGIC_DNS": "false", + "HEADSCALE_DNS_BASE_DOMAIN": "very-unique-domain.net", + }, + wantConfCompareFunc: func(t *testing.T, got string) { + want := resolvconf(` + nameserver 100.100.100.100 + search very-unique-domain.net + `) + assert.Equal(t, want, got) + }, + }, + { + name: "base-extra-search-domains", + conf: map[string]string{ + "HEADSCALE_DNS_SEARCH_DOMAINS": "test1.no test2.no", + "HEADSCALE_DNS_BASE_DOMAIN": "with-local-dns.net", + }, + wantConfCompareFunc: func(t *testing.T, got string) { + want := resolvconf(` + nameserver 100.100.100.100 + search with-local-dns.net test1.no test2.no + `) + assert.Equal(t, want, got) + }, + }, + { + name: "base-nameservers-split", + conf: map[string]string{ + "HEADSCALE_DNS_NAMESERVERS_SPLIT": `{foo.bar.com: ["1.1.1.1"]}`, + "HEADSCALE_DNS_BASE_DOMAIN": "with-local-dns.net", + }, + wantConfCompareFunc: func(t *testing.T, got string) { + want := resolvconf(` + nameserver 100.100.100.100 + search with-local-dns.net + `) + assert.Equal(t, want, got) + }, + }, + { + name: "base-full-no-magic", + conf: map[string]string{ + "HEADSCALE_DNS_MAGIC_DNS": "false", + "HEADSCALE_DNS_BASE_DOMAIN": "all-of.it", + "HEADSCALE_DNS_NAMESERVERS_GLOBAL": `8.8.8.8`, + "HEADSCALE_DNS_SEARCH_DOMAINS": "test1.no test2.no", + // TODO(kradalby): this currently isnt working, need to fix it + // "HEADSCALE_DNS_NAMESERVERS_SPLIT": `{foo.bar.com: ["1.1.1.1"]}`, + // "HEADSCALE_DNS_EXTRA_RECORDS": `[{ name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.4" }]`, + }, + wantConfCompareFunc: func(t *testing.T, got string) { + want := resolvconf(` + nameserver 100.100.100.100 + search all-of.it test1.no test2.no + `) + assert.Equal(t, want, got) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + "resolvconf1": 3, + "resolvconf2": 3, + } + + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("resolvconf"), hsic.WithConfigEnv(tt.conf)) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + // Poor mans cache + _, err = scenario.ListTailscaleClientsFQDNs() + assertNoErrListFQDN(t, err) + + _, err = scenario.ListTailscaleClientsIPs() + assertNoErrListClientIPs(t, err) + + time.Sleep(30 * time.Second) + + for _, client := range allClients { + b, err := client.ReadFile("/etc/resolv.conf") + assertNoErr(t, err) + + t.Logf("comparing resolv conf of %s", client.Hostname()) + tt.wantConfCompareFunc(t, string(b)) + } + }) + } + +} diff --git a/integration/dockertestutil/execute.go b/integration/dockertestutil/execute.go index 5a8e92b3..1b41e324 100644 --- a/integration/dockertestutil/execute.go +++ b/integration/dockertestutil/execute.go @@ -62,7 +62,7 @@ func ExecuteCommand( exitCode, err := resource.Exec( cmd, dockertest.ExecOptions{ - Env: append(env, "HEADSCALE_LOG_LEVEL=disabled"), + Env: append(env, "HEADSCALE_LOG_LEVEL=info"), StdOut: &stdout, StdErr: &stderr, }, diff --git a/integration/dockertestutil/logs.go b/integration/dockertestutil/logs.go index 98ba970a..64c3c9ac 100644 --- a/integration/dockertestutil/logs.go +++ b/integration/dockertestutil/logs.go @@ -17,10 +17,10 @@ func SaveLog( pool *dockertest.Pool, resource *dockertest.Resource, basePath string, -) error { +) (string, string, error) { err := os.MkdirAll(basePath, os.ModePerm) if err != nil { - return err + return "", "", err } var stdout bytes.Buffer @@ -41,28 +41,30 @@ func SaveLog( }, ) if err != nil { - return err + return "", "", err } log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath) + stdoutPath := path.Join(basePath, resource.Container.Name+".stdout.log") err = os.WriteFile( - path.Join(basePath, resource.Container.Name+".stdout.log"), + stdoutPath, stdout.Bytes(), filePerm, ) if err != nil { - return err + return "", "", err } + stderrPath := path.Join(basePath, resource.Container.Name+".stderr.log") err = os.WriteFile( - path.Join(basePath, resource.Container.Name+".stderr.log"), + stderrPath, stderr.Bytes(), filePerm, ) if err != nil { - return err + return "", "", err } - return nil + return stdoutPath, stderrPath, nil } diff --git a/integration/embedded_derp_test.go b/integration/embedded_derp_test.go index e4f76ec4..745f2c89 100644 --- a/integration/embedded_derp_test.go +++ b/integration/embedded_derp_test.go @@ -4,7 +4,9 @@ import ( "fmt" "log" "net/url" + "strings" "testing" + "time" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/dockertestutil" @@ -23,7 +25,7 @@ func TestDERPServerScenario(t *testing.T) { IntegrationSkip(t) // t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) scenario := EmbeddedDERPServerScenario{ @@ -33,49 +35,85 @@ func TestDERPServerScenario(t *testing.T) { defer scenario.Shutdown() spec := map[string]int{ - "user1": 10, - // "user1": len(MustTestVersions), - } - - headscaleConfig := map[string]string{ - "HEADSCALE_DERP_URLS": "", - "HEADSCALE_DERP_SERVER_ENABLED": "true", - "HEADSCALE_DERP_SERVER_REGION_ID": "999", - "HEADSCALE_DERP_SERVER_REGION_CODE": "headscale", - "HEADSCALE_DERP_SERVER_REGION_NAME": "Headscale Embedded DERP", - "HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR": "0.0.0.0:3478", - "HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH": "/tmp/derp.key", - - // Envknob for enabling DERP debug logs - "DERP_DEBUG_LOGS": "true", - "DERP_PROBER_DEBUG_LOGS": "true", + "user1": len(MustTestVersions), } err = scenario.CreateHeadscaleEnv( spec, - hsic.WithConfigEnv(headscaleConfig), hsic.WithTestName("derpserver"), hsic.WithExtraPorts([]string{"3478/udp"}), + hsic.WithEmbeddedDERPServerOnly(), hsic.WithTLS(), hsic.WithHostnameAsServerURL(), + hsic.WithConfigEnv(map[string]string{ + "HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "true", + "HEADSCALE_DERP_UPDATE_FREQUENCY": "10s", + }), ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() assertNoErrListClients(t, err) - allIps, err := scenario.ListTailscaleClientsIPs() - assertNoErrListClientIPs(t, err) - err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) allHostnames, err := scenario.ListTailscaleClientsFQDNs() assertNoErrListFQDN(t, err) + for _, client := range allClients { + status, err := client.Status() + assertNoErr(t, err) + + for _, health := range status.Health { + if strings.Contains(health, "could not connect to any relay server") { + t.Errorf("expected to be connected to derp, found: %s", health) + } + if strings.Contains(health, "could not connect to the 'Headscale Embedded DERP' relay server.") { + t.Errorf("expected to be connected to derp, found: %s", health) + } + } + } + success := pingDerpAllHelper(t, allClients, allHostnames) - t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + for _, client := range allClients { + status, err := client.Status() + assertNoErr(t, err) + + for _, health := range status.Health { + if strings.Contains(health, "could not connect to any relay server") { + t.Errorf("expected to be connected to derp, found: %s", health) + } + if strings.Contains(health, "could not connect to the 'Headscale Embedded DERP' relay server.") { + t.Errorf("expected to be connected to derp, found: %s", health) + } + } + } + + t.Logf("Run 1: %d successful pings out of %d", success, len(allClients)*len(allHostnames)) + + // Let the DERP updater run a couple of times to ensure it does not + // break the DERPMap. + time.Sleep(30 * time.Second) + + success = pingDerpAllHelper(t, allClients, allHostnames) + + for _, client := range allClients { + status, err := client.Status() + assertNoErr(t, err) + + for _, health := range status.Health { + if strings.Contains(health, "could not connect to any relay server") { + t.Errorf("expected to be connected to derp, found: %s", health) + } + if strings.Contains(health, "could not connect to the 'Headscale Embedded DERP' relay server.") { + t.Errorf("expected to be connected to derp, found: %s", health) + } + } + } + + t.Logf("Run2: %d successful pings out of %d", success, len(allClients)*len(allHostnames)) } func (s *EmbeddedDERPServerScenario) CreateHeadscaleEnv( diff --git a/integration/general_test.go b/integration/general_test.go index 9aae26fc..a8421f47 100644 --- a/integration/general_test.go +++ b/integration/general_test.go @@ -1,6 +1,7 @@ package integration import ( + "context" "encoding/json" "fmt" "net/netip" @@ -9,11 +10,13 @@ import ( "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" + "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" "github.com/rs/zerolog/log" "github.com/samber/lo" "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" "tailscale.com/client/tailscale/apitype" "tailscale.com/types/key" ) @@ -22,7 +25,7 @@ func TestPingAllByIP(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -33,26 +36,13 @@ func TestPingAllByIP(t *testing.T) { "user2": len(MustTestVersions), } - headscaleConfig := map[string]string{ - "HEADSCALE_DERP_URLS": "", - "HEADSCALE_DERP_SERVER_ENABLED": "true", - "HEADSCALE_DERP_SERVER_REGION_ID": "999", - "HEADSCALE_DERP_SERVER_REGION_CODE": "headscale", - "HEADSCALE_DERP_SERVER_REGION_NAME": "Headscale Embedded DERP", - "HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR": "0.0.0.0:3478", - "HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH": "/tmp/derp.key", - - // Envknob for enabling DERP debug logs - "DERP_DEBUG_LOGS": "true", - "DERP_PROBER_DEBUG_LOGS": "true", - } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"), - hsic.WithConfigEnv(headscaleConfig), + hsic.WithEmbeddedDERPServerOnly(), hsic.WithTLS(), hsic.WithHostnameAsServerURL(), + hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom), ) assertNoErrHeadscaleEnv(t, err) @@ -65,7 +55,7 @@ func TestPingAllByIP(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -79,7 +69,7 @@ func TestPingAllByIPPublicDERP(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -103,7 +93,7 @@ func TestPingAllByIPPublicDERP(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -117,7 +107,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -135,7 +125,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) clientIPs := make(map[TailscaleClient][]netip.Addr) for _, client := range allClients { @@ -176,7 +166,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allClients, err = scenario.ListTailscaleClients() assertNoErrListClients(t, err) @@ -225,10 +215,18 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { } func TestEphemeral(t *testing.T) { + testEphemeralWithOptions(t, hsic.WithTestName("ephemeral")) +} + +func TestEphemeralInAlternateTimezone(t *testing.T) { + testEphemeralWithOptions(t, hsic.WithTestName("ephemeral-tz"), hsic.WithTimezone("America/Los_Angeles")) +} + +func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -237,7 +235,7 @@ func TestEphemeral(t *testing.T) { "user2": len(MustTestVersions), } - headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral")) + headscale, err := scenario.Headscale(opts...) assertNoErrHeadscaleEnv(t, err) for userName, clientCount := range spec { @@ -307,11 +305,127 @@ func TestEphemeral(t *testing.T) { } } +// TestEphemeral2006DeletedTooQuickly verifies that ephemeral nodes are not +// deleted by accident if they are still online and active. +func TestEphemeral2006DeletedTooQuickly(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + "user1": len(MustTestVersions), + "user2": len(MustTestVersions), + } + + headscale, err := scenario.Headscale( + hsic.WithTestName("ephemeral2006"), + hsic.WithConfigEnv(map[string]string{ + "HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "1m6s", + }), + ) + assertNoErrHeadscaleEnv(t, err) + + for userName, clientCount := range spec { + err = scenario.CreateUser(userName) + if err != nil { + t.Fatalf("failed to create user %s: %s", userName, err) + } + + err = scenario.CreateTailscaleNodesInUser(userName, "all", clientCount, []tsic.Option{}...) + if err != nil { + t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err) + } + + key, err := scenario.CreatePreAuthKey(userName, true, true) + if err != nil { + t.Fatalf("failed to create pre-auth key for user %s: %s", userName, err) + } + + err = scenario.RunTailscaleUp(userName, headscale.GetEndpoint(), key.GetKey()) + if err != nil { + t.Fatalf("failed to run tailscale up for user %s: %s", userName, err) + } + } + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + allIps, err := scenario.ListTailscaleClientsIPs() + assertNoErrListClientIPs(t, err) + + allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { + return x.String() + }) + + // All ephemeral nodes should be online and reachable. + success := pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + + // Take down all clients, this should start an expiry timer for each. + for _, client := range allClients { + err := client.Down() + if err != nil { + t.Fatalf("failed to take down client %s: %s", client.Hostname(), err) + } + } + + // Wait a bit and bring up the clients again before the expiry + // time of the ephemeral nodes. + // Nodes should be able to reconnect and work fine. + time.Sleep(30 * time.Second) + + for _, client := range allClients { + err := client.Up() + if err != nil { + t.Fatalf("failed to take down client %s: %s", client.Hostname(), err) + } + } + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + success = pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + + // Take down all clients, this should start an expiry timer for each. + for _, client := range allClients { + err := client.Down() + if err != nil { + t.Fatalf("failed to take down client %s: %s", client.Hostname(), err) + } + } + + // This time wait for all of the nodes to expire and check that they are no longer + // registered. + time.Sleep(3 * time.Minute) + + for userName := range spec { + nodes, err := headscale.ListNodesInUser(userName) + if err != nil { + log.Error(). + Err(err). + Str("user", userName). + Msg("Error listing nodes in user") + + return + } + + if len(nodes) != 0 { + t.Fatalf("expected no nodes, got %d in user %s", len(nodes), userName) + } + } +} + func TestPingAllByHostname(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -329,7 +443,7 @@ func TestPingAllByHostname(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allHostnames, err := scenario.ListTailscaleClientsFQDNs() assertNoErrListFQDN(t, err) @@ -347,20 +461,20 @@ func TestTaildrop(t *testing.T) { IntegrationSkip(t) t.Parallel() - retry := func(times int, sleepInverval time.Duration, doWork func() error) error { + retry := func(times int, sleepInterval time.Duration, doWork func() error) error { var err error for attempts := 0; attempts < times; attempts++ { err = doWork() if err == nil { return nil } - time.Sleep(sleepInverval) + time.Sleep(sleepInterval) } return err } - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -517,79 +631,11 @@ func TestTaildrop(t *testing.T) { } } -func TestResolveMagicDNS(t *testing.T) { - IntegrationSkip(t) - t.Parallel() - - scenario, err := NewScenario() - assertNoErr(t, err) - defer scenario.Shutdown() - - spec := map[string]int{ - "magicdns1": len(MustTestVersions), - "magicdns2": len(MustTestVersions), - } - - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns")) - assertNoErrHeadscaleEnv(t, err) - - allClients, err := scenario.ListTailscaleClients() - assertNoErrListClients(t, err) - - err = scenario.WaitForTailscaleSync() - assertNoErrSync(t, err) - - assertClientsState(t, allClients) - - // Poor mans cache - _, err = scenario.ListTailscaleClientsFQDNs() - assertNoErrListFQDN(t, err) - - _, err = scenario.ListTailscaleClientsIPs() - assertNoErrListClientIPs(t, err) - - for _, client := range allClients { - for _, peer := range allClients { - // It is safe to ignore this error as we handled it when caching it - peerFQDN, _ := peer.FQDN() - - command := []string{ - "tailscale", - "ip", peerFQDN, - } - result, _, err := client.Execute(command) - if err != nil { - t.Fatalf( - "failed to execute resolve/ip command %s from %s: %s", - peerFQDN, - client.Hostname(), - err, - ) - } - - ips, err := peer.IPs() - if err != nil { - t.Fatalf( - "failed to get ips for %s: %s", - peer.Hostname(), - err, - ) - } - - for _, ip := range ips { - if !strings.Contains(result, ip.String()) { - t.Fatalf("ip %s is not found in \n%s\n", ip.String(), result) - } - } - } - } -} - func TestExpireNode(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -609,7 +655,7 @@ func TestExpireNode(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -711,11 +757,11 @@ func TestExpireNode(t *testing.T) { } } -func TestNodeOnlineLastSeenStatus(t *testing.T) { +func TestNodeOnlineStatus(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -723,7 +769,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { "user1": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("onlinelastseen")) + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("online")) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -735,7 +781,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -755,8 +801,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { headscale, err := scenario.Headscale() assertNoErr(t, err) - keepAliveInterval := 60 * time.Second - // Duration is chosen arbitrarily, 10m is reported in #1561 testDuration := 12 * time.Minute start := time.Now() @@ -780,11 +824,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { err = json.Unmarshal([]byte(result), &nodes) assertNoErr(t, err) - now := time.Now() - - // Threshold with some leeway - lastSeenThreshold := now.Add(-keepAliveInterval - (10 * time.Second)) - // Verify that headscale reports the nodes as online for _, node := range nodes { // All nodes should be online @@ -795,18 +834,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { node.GetName(), time.Since(start), ) - - lastSeen := node.GetLastSeen().AsTime() - // All nodes should have been last seen between now and the keepAliveInterval - assert.Truef( - t, - lastSeen.After(lastSeenThreshold), - "node (%s) lastSeen (%v) was not %s after the threshold (%v)", - node.GetName(), - lastSeen, - keepAliveInterval, - lastSeenThreshold, - ) } // Verify that all nodes report all nodes to be online @@ -824,7 +851,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { continue } - // All peers of this nodess are reporting to be + // All peers of this nodes are reporting to be // connected to the control server assert.Truef( t, @@ -834,15 +861,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { client.Hostname(), time.Since(start), ) - - // from docs: last seen to tailcontrol; only present if offline - // assert.Nilf( - // t, - // peerStatus.LastSeen, - // "expected node %s to not have LastSeen set, got %s", - // peerStatus.HostName, - // peerStatus.LastSeen, - // ) } } @@ -850,3 +868,188 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { time.Sleep(time.Second) } } + +// TestPingAllByIPManyUpDown is a variant of the PingAll +// test which will take the tailscale node up and down +// five times ensuring they are able to restablish connectivity. +func TestPingAllByIPManyUpDown(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + // TODO(kradalby): it does not look like the user thing works, only second + // get created? maybe only when many? + spec := map[string]int{ + "user1": len(MustTestVersions), + "user2": len(MustTestVersions), + } + + err = scenario.CreateHeadscaleEnv(spec, + []tsic.Option{}, + hsic.WithTestName("pingallbyipmany"), + hsic.WithEmbeddedDERPServerOnly(), + hsic.WithTLS(), + hsic.WithHostnameAsServerURL(), + ) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + allIps, err := scenario.ListTailscaleClientsIPs() + assertNoErrListClientIPs(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + // assertClientsState(t, allClients) + + allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { + return x.String() + }) + + success := pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + + wg, _ := errgroup.WithContext(context.Background()) + + for run := range 3 { + t.Logf("Starting DownUpPing run %d", run+1) + + for _, client := range allClients { + c := client + wg.Go(func() error { + t.Logf("taking down %q", c.Hostname()) + return c.Down() + }) + } + + if err := wg.Wait(); err != nil { + t.Fatalf("failed to take down all nodes: %s", err) + } + + time.Sleep(5 * time.Second) + + for _, client := range allClients { + c := client + wg.Go(func() error { + t.Logf("bringing up %q", c.Hostname()) + return c.Up() + }) + } + + if err := wg.Wait(); err != nil { + t.Fatalf("failed to take down all nodes: %s", err) + } + + time.Sleep(5 * time.Second) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + success := pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + } +} + +func Test2118DeletingOnlineNodePanics(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.ShutdownAssertNoPanics(t) + + // TODO(kradalby): it does not look like the user thing works, only second + // get created? maybe only when many? + spec := map[string]int{ + "user1": 1, + "user2": 1, + } + + err = scenario.CreateHeadscaleEnv(spec, + []tsic.Option{}, + hsic.WithTestName("deletenocrash"), + hsic.WithEmbeddedDERPServerOnly(), + hsic.WithTLS(), + hsic.WithHostnameAsServerURL(), + ) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + allIps, err := scenario.ListTailscaleClientsIPs() + assertNoErrListClientIPs(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { + return x.String() + }) + + success := pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + + headscale, err := scenario.Headscale() + assertNoErr(t, err) + + // Test list all nodes after added otherUser + var nodeList []v1.Node + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "nodes", + "list", + "--output", + "json", + }, + &nodeList, + ) + assert.Nil(t, err) + assert.Len(t, nodeList, 2) + assert.True(t, nodeList[0].Online) + assert.True(t, nodeList[1].Online) + + // Delete the first node, which is online + _, err = headscale.Execute( + []string{ + "headscale", + "nodes", + "delete", + "--identifier", + // Delete the last added machine + fmt.Sprintf("%d", nodeList[0].Id), + "--output", + "json", + "--force", + }, + ) + assert.Nil(t, err) + + time.Sleep(2 * time.Second) + + // Ensure that the node has been deleted, this did not occur due to a panic. + var nodeListAfter []v1.Node + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "nodes", + "list", + "--output", + "json", + }, + &nodeListAfter, + ) + assert.Nil(t, err) + assert.Len(t, nodeListAfter, 1) + assert.True(t, nodeListAfter[0].Online) + assert.Equal(t, nodeList[1].Id, nodeListAfter[0].Id) + +} diff --git a/integration/hsic/config.go b/integration/hsic/config.go index 606718c7..244470f2 100644 --- a/integration/hsic/config.go +++ b/integration/hsic/config.go @@ -1,103 +1,6 @@ package hsic -// const ( -// defaultEphemeralNodeInactivityTimeout = time.Second * 30 -// defaultNodeUpdateCheckInterval = time.Second * 10 -// ) - -// TODO(kradalby): This approach doesnt work because we cannot -// serialise our config object to YAML or JSON. -// func DefaultConfig() headscale.Config { -// derpMap, _ := url.Parse("https://controlplane.tailscale.com/derpmap/default") -// -// config := headscale.Config{ -// Log: headscale.LogConfig{ -// Level: zerolog.TraceLevel, -// }, -// ACL: headscale.GetACLConfig(), -// DBtype: "sqlite3", -// EphemeralNodeInactivityTimeout: defaultEphemeralNodeInactivityTimeout, -// NodeUpdateCheckInterval: defaultNodeUpdateCheckInterval, -// IPPrefixes: []netip.Prefix{ -// netip.MustParsePrefix("fd7a:115c:a1e0::/48"), -// netip.MustParsePrefix("100.64.0.0/10"), -// }, -// DNSConfig: &tailcfg.DNSConfig{ -// Proxied: true, -// Nameservers: []netip.Addr{ -// netip.MustParseAddr("127.0.0.11"), -// netip.MustParseAddr("1.1.1.1"), -// }, -// Resolvers: []*dnstype.Resolver{ -// { -// Addr: "127.0.0.11", -// }, -// { -// Addr: "1.1.1.1", -// }, -// }, -// }, -// BaseDomain: "headscale.net", -// -// DBpath: "/tmp/integration_test_db.sqlite3", -// -// PrivateKeyPath: "/tmp/integration_private.key", -// NoisePrivateKeyPath: "/tmp/noise_integration_private.key", -// Addr: "0.0.0.0:8080", -// MetricsAddr: "127.0.0.1:9090", -// ServerURL: "http://headscale:8080", -// -// DERP: headscale.DERPConfig{ -// URLs: []url.URL{ -// *derpMap, -// }, -// AutoUpdate: false, -// UpdateFrequency: 1 * time.Minute, -// }, -// } -// -// return config -// } - -// TODO: Reuse the actual configuration object above. -// Deprecated: use env function instead as it is easier to -// override. -func DefaultConfigYAML() string { - yaml := ` -log: - level: trace -acl_policy_path: "" -database: - type: sqlite3 - sqlite.path: /tmp/integration_test_db.sqlite3 -ephemeral_node_inactivity_timeout: 30m -node_update_check_interval: 10s -prefixes: - v6: fd7a:115c:a1e0::/48 - v4: 100.64.0.0/10 -dns_config: - base_domain: headscale.net - magic_dns: true - domains: [] - nameservers: - - 127.0.0.11 - - 1.1.1.1 -private_key_path: /tmp/private.key -noise: - private_key_path: /tmp/noise_private.key -listen_addr: 0.0.0.0:8080 -metrics_listen_addr: 127.0.0.1:9090 -server_url: http://headscale:8080 - -derp: - urls: - - https://controlplane.tailscale.com/derpmap/default - auto_update_enabled: false - update_frequency: 1m -` - - return yaml -} +import "github.com/juanfont/headscale/hscontrol/types" func MinimumConfigYAML() string { return ` @@ -110,24 +13,26 @@ noise: func DefaultConfigEnv() map[string]string { return map[string]string{ "HEADSCALE_LOG_LEVEL": "trace", - "HEADSCALE_ACL_POLICY_PATH": "", + "HEADSCALE_POLICY_PATH": "", "HEADSCALE_DATABASE_TYPE": "sqlite", "HEADSCALE_DATABASE_SQLITE_PATH": "/tmp/integration_test_db.sqlite3", "HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "30m", - "HEADSCALE_NODE_UPDATE_CHECK_INTERVAL": "10s", "HEADSCALE_PREFIXES_V4": "100.64.0.0/10", "HEADSCALE_PREFIXES_V6": "fd7a:115c:a1e0::/48", - "HEADSCALE_DNS_CONFIG_BASE_DOMAIN": "headscale.net", - "HEADSCALE_DNS_CONFIG_MAGIC_DNS": "true", - "HEADSCALE_DNS_CONFIG_DOMAINS": "", - "HEADSCALE_DNS_CONFIG_NAMESERVERS": "127.0.0.11 1.1.1.1", + "HEADSCALE_DNS_BASE_DOMAIN": "headscale.net", + "HEADSCALE_DNS_MAGIC_DNS": "true", + "HEADSCALE_DNS_NAMESERVERS_GLOBAL": "127.0.0.11 1.1.1.1", "HEADSCALE_PRIVATE_KEY_PATH": "/tmp/private.key", "HEADSCALE_NOISE_PRIVATE_KEY_PATH": "/tmp/noise_private.key", "HEADSCALE_LISTEN_ADDR": "0.0.0.0:8080", - "HEADSCALE_METRICS_LISTEN_ADDR": "127.0.0.1:9090", + "HEADSCALE_METRICS_LISTEN_ADDR": "0.0.0.0:9090", "HEADSCALE_SERVER_URL": "http://headscale:8080", "HEADSCALE_DERP_URLS": "https://controlplane.tailscale.com/derpmap/default", "HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "false", "HEADSCALE_DERP_UPDATE_FREQUENCY": "1m", + + // a bunch of tests (ACL/Policy) rely on predicable IP alloc, + // so ensure the sequential alloc is used by default. + "HEADSCALE_PREFIXES_ALLOCATION": string(types.IPAllocationStrategySequential), } } diff --git a/integration/hsic/hsic.go b/integration/hsic/hsic.go index b61827ac..20a778b8 100644 --- a/integration/hsic/hsic.go +++ b/integration/hsic/hsic.go @@ -11,6 +11,7 @@ import ( "encoding/pem" "errors" "fmt" + "io" "log" "math/big" "net" @@ -18,12 +19,14 @@ import ( "net/url" "os" "path" + "strconv" "strings" "time" "github.com/davecgh/go-spew/spew" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy" + "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/dockertestutil" "github.com/juanfont/headscale/integration/integrationutil" @@ -79,7 +82,7 @@ type Option = func(c *HeadscaleInContainer) func WithACLPolicy(acl *policy.ACLPolicy) Option { return func(hsic *HeadscaleInContainer) { // TODO(kradalby): Move somewhere appropriate - hsic.env["HEADSCALE_ACL_POLICY_PATH"] = aclPolicyPath + hsic.env["HEADSCALE_POLICY_PATH"] = aclPolicyPath hsic.aclPolicy = acl } @@ -173,6 +176,47 @@ func WithPostgres() Option { } } +// WithIPAllocationStrategy sets the tests IP Allocation strategy. +func WithIPAllocationStrategy(strategy types.IPAllocationStrategy) Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["HEADSCALE_PREFIXES_ALLOCATION"] = string(strategy) + } +} + +// WithEmbeddedDERPServerOnly configures Headscale to start +// and only use the embedded DERP server. +// It requires WithTLS and WithHostnameAsServerURL to be +// set. +func WithEmbeddedDERPServerOnly() Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["HEADSCALE_DERP_URLS"] = "" + hsic.env["HEADSCALE_DERP_SERVER_ENABLED"] = "true" + hsic.env["HEADSCALE_DERP_SERVER_REGION_ID"] = "999" + hsic.env["HEADSCALE_DERP_SERVER_REGION_CODE"] = "headscale" + hsic.env["HEADSCALE_DERP_SERVER_REGION_NAME"] = "Headscale Embedded DERP" + hsic.env["HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR"] = "0.0.0.0:3478" + hsic.env["HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH"] = "/tmp/derp.key" + + // Envknob for enabling DERP debug logs + hsic.env["DERP_DEBUG_LOGS"] = "true" + hsic.env["DERP_PROBER_DEBUG_LOGS"] = "true" + } +} + +// WithTuning allows changing the tuning settings easily. +func WithTuning(batchTimeout time.Duration, mapSessionChanSize int) Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["HEADSCALE_TUNING_BATCH_CHANGE_DELAY"] = batchTimeout.String() + hsic.env["HEADSCALE_TUNING_NODE_MAPSESSION_BUFFERED_CHAN_SIZE"] = strconv.Itoa(mapSessionChanSize) + } +} + +func WithTimezone(timezone string) Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["TZ"] = timezone + } +} + // New returns a new HeadscaleInContainer instance. func New( pool *dockertest.Pool, @@ -248,9 +292,13 @@ func New( } env := []string{ - "HEADSCALE_PROFILING_ENABLED=1", - "HEADSCALE_PROFILING_PATH=/tmp/profile", + "HEADSCALE_DEBUG_PROFILING_ENABLED=1", + "HEADSCALE_DEBUG_PROFILING_PATH=/tmp/profile", "HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH=/tmp/mapresponses", + "HEADSCALE_DEBUG_DEADLOCK=1", + "HEADSCALE_DEBUG_DEADLOCK_TIMEOUT=5s", + "HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS=1", + "HEADSCALE_DEBUG_DUMP_CONFIG=1", } for key, value := range hsic.env { env = append(env, fmt.Sprintf("%s=%s", key, value)) @@ -260,7 +308,7 @@ func New( runOptions := &dockertest.RunOptions{ Name: hsic.hostname, - ExposedPorts: append([]string{portProto}, hsic.extraPorts...), + ExposedPorts: append([]string{portProto, "9090/tcp"}, hsic.extraPorts...), Networks: []*dockertest.Network{network}, // Cmd: []string{"headscale", "serve"}, // TODO(kradalby): Get rid of this hack, we currently need to give us some @@ -350,8 +398,8 @@ func (t *HeadscaleInContainer) hasTLS() bool { } // Shutdown stops and cleans up the Headscale container. -func (t *HeadscaleInContainer) Shutdown() error { - err := t.SaveLog("/tmp/control") +func (t *HeadscaleInContainer) Shutdown() (string, string, error) { + stdoutPath, stderrPath, err := t.SaveLog("/tmp/control") if err != nil { log.Printf( "Failed to save log from control: %s", @@ -359,6 +407,14 @@ func (t *HeadscaleInContainer) Shutdown() error { ) } + err = t.SaveMetrics(fmt.Sprintf("/tmp/control/%s_metrics.txt", t.hostname)) + if err != nil { + log.Printf( + "Failed to metrics from control: %s", + err, + ) + } + // Send a interrupt signal to the "headscale" process inside the container // allowing it to shut down gracefully and flush the profile to disk. // The container will live for a bit longer due to the sleep at the end. @@ -402,15 +458,34 @@ func (t *HeadscaleInContainer) Shutdown() error { t.pool.Purge(t.pgContainer) } - return t.pool.Purge(t.container) + return stdoutPath, stderrPath, t.pool.Purge(t.container) } // SaveLog saves the current stdout log of the container to a path // on the host system. -func (t *HeadscaleInContainer) SaveLog(path string) error { +func (t *HeadscaleInContainer) SaveLog(path string) (string, string, error) { return dockertestutil.SaveLog(t.pool, t.container, path) } +func (t *HeadscaleInContainer) SaveMetrics(savePath string) error { + resp, err := http.Get(fmt.Sprintf("http://%s:9090/metrics", t.hostname)) + if err != nil { + return fmt.Errorf("getting metrics: %w", err) + } + defer resp.Body.Close() + out, err := os.Create(savePath) + if err != nil { + return fmt.Errorf("creating file for metrics: %w", err) + } + defer out.Close() + _, err = io.Copy(out, resp.Body) + if err != nil { + return fmt.Errorf("copy response to file: %w", err) + } + + return nil +} + func (t *HeadscaleInContainer) SaveProfile(savePath string) error { tarFile, err := t.FetchPath("/tmp/profile") if err != nil { @@ -482,7 +557,7 @@ func (t *HeadscaleInContainer) Execute( log.Printf("command stdout: %s\n", stdout) } - return "", err + return stdout, fmt.Errorf("executing command in docker: %w, stderr: %s", err, stderr) } return stdout, nil @@ -682,7 +757,7 @@ func createCertificate(hostname string) ([]byte, []byte, error) { Locality: []string{"Leiden"}, }, NotBefore: time.Now(), - NotAfter: time.Now().Add(60 * time.Minute), + NotAfter: time.Now().Add(60 * time.Hour), IsCA: true, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, diff --git a/integration/route_test.go b/integration/route_test.go index 75296fd5..0252e702 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/util" @@ -17,6 +18,7 @@ import ( "github.com/juanfont/headscale/integration/tsic" "github.com/stretchr/testify/assert" "tailscale.com/types/ipproto" + "tailscale.com/types/views" "tailscale.com/wgengine/filter" ) @@ -28,7 +30,7 @@ func TestEnablingRoutes(t *testing.T) { user := "enable-routing" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -212,7 +214,11 @@ func TestEnablingRoutes(t *testing.T) { if route.GetId() == routeToBeDisabled.GetId() { assert.Equal(t, false, route.GetEnabled()) - assert.Equal(t, false, route.GetIsPrimary()) + + // since this is the only route of this cidr, + // it will not failover, and remain Primary + // until something can replace it. + assert.Equal(t, true, route.GetIsPrimary()) } else { assert.Equal(t, true, route.GetEnabled()) assert.Equal(t, true, route.GetIsPrimary()) @@ -246,7 +252,7 @@ func TestHASubnetRouterFailover(t *testing.T) { user := "enable-routing" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -291,6 +297,7 @@ func TestHASubnetRouterFailover(t *testing.T) { client := allClients[2] + t.Logf("Advertise route from r1 (%s) and r2 (%s), making it HA, n1 is primary", subRouter1.Hostname(), subRouter2.Hostname()) // advertise HA route on node 1 and 2 // ID 1 will be primary // ID 2 will be secondary @@ -384,12 +391,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is primary assert.Equal(t, true, enablingRoutes[0].GetAdvertised()) assert.Equal(t, true, enablingRoutes[0].GetEnabled()) - assert.Equal(t, true, enablingRoutes[0].GetIsPrimary()) + assert.Equal(t, true, enablingRoutes[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be primary") // Node 2 is not primary assert.Equal(t, true, enablingRoutes[1].GetAdvertised()) assert.Equal(t, true, enablingRoutes[1].GetEnabled()) - assert.Equal(t, false, enablingRoutes[1].GetIsPrimary()) + assert.Equal(t, false, enablingRoutes[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be non-primary") // Verify that the client has routes from the primary machine srs1, err := subRouter1.Status() @@ -401,6 +408,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus := clientStatus.Peer[srs2.Self.PublicKey] + assert.True(t, srs1PeerStatus.Online, "r1 up, r2 up") + assert.True(t, srs2PeerStatus.Online, "r1 up, r2 up") + assertNotNil(t, srs1PeerStatus.PrimaryRoutes) assert.Nil(t, srs2PeerStatus.PrimaryRoutes) @@ -411,7 +421,8 @@ func TestHASubnetRouterFailover(t *testing.T) { ) // Take down the current primary - t.Logf("taking down subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("taking down subnet router r1 (%s)", subRouter1.Hostname()) + t.Logf("expecting r2 (%s) to take over as primary", subRouter2.Hostname()) err = subRouter1.Down() assertNoErr(t, err) @@ -435,15 +446,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is not primary assert.Equal(t, true, routesAfterMove[0].GetAdvertised()) assert.Equal(t, true, routesAfterMove[0].GetEnabled()) - assert.Equal(t, false, routesAfterMove[0].GetIsPrimary()) + assert.Equal(t, false, routesAfterMove[0].GetIsPrimary(), "r1 is down, expected r2 to be primary") // Node 2 is primary assert.Equal(t, true, routesAfterMove[1].GetAdvertised()) assert.Equal(t, true, routesAfterMove[1].GetEnabled()) - assert.Equal(t, true, routesAfterMove[1].GetIsPrimary()) - - // TODO(kradalby): Check client status - // Route is expected to be on SR2 + assert.Equal(t, true, routesAfterMove[1].GetIsPrimary(), "r1 is down, expected r2 to be primary") srs2, err = subRouter2.Status() @@ -453,6 +461,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.False(t, srs1PeerStatus.Online, "r1 down, r2 down") + assert.True(t, srs2PeerStatus.Online, "r1 down, r2 up") + assert.Nil(t, srs1PeerStatus.PrimaryRoutes) assertNotNil(t, srs2PeerStatus.PrimaryRoutes) @@ -465,7 +476,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Take down subnet router 2, leaving none available - t.Logf("taking down subnet router 2 (%s)", subRouter2.Hostname()) + t.Logf("taking down subnet router r2 (%s)", subRouter2.Hostname()) + t.Logf("expecting r2 (%s) to remain primary, no other available", subRouter2.Hostname()) err = subRouter2.Down() assertNoErr(t, err) @@ -489,14 +501,14 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is not primary assert.Equal(t, true, routesAfterBothDown[0].GetAdvertised()) assert.Equal(t, true, routesAfterBothDown[0].GetEnabled()) - assert.Equal(t, false, routesAfterBothDown[0].GetIsPrimary()) + assert.Equal(t, false, routesAfterBothDown[0].GetIsPrimary(), "r1 and r2 is down, expected r2 to _still_ be primary") // Node 2 is primary // if the node goes down, but no other suitable route is // available, keep the last known good route. assert.Equal(t, true, routesAfterBothDown[1].GetAdvertised()) assert.Equal(t, true, routesAfterBothDown[1].GetEnabled()) - assert.Equal(t, true, routesAfterBothDown[1].GetIsPrimary()) + assert.Equal(t, true, routesAfterBothDown[1].GetIsPrimary(), "r1 and r2 is down, expected r2 to _still_ be primary") // TODO(kradalby): Check client status // Both are expected to be down @@ -508,6 +520,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.False(t, srs1PeerStatus.Online, "r1 down, r2 down") + assert.False(t, srs2PeerStatus.Online, "r1 down, r2 down") + assert.Nil(t, srs1PeerStatus.PrimaryRoutes) assertNotNil(t, srs2PeerStatus.PrimaryRoutes) @@ -520,7 +535,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Bring up subnet router 1, making the route available from there. - t.Logf("bringing up subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("bringing up subnet router r1 (%s)", subRouter1.Hostname()) + t.Logf("expecting r1 (%s) to take over as primary (only one online)", subRouter1.Hostname()) err = subRouter1.Up() assertNoErr(t, err) @@ -544,12 +560,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is primary assert.Equal(t, true, routesAfter1Up[0].GetAdvertised()) assert.Equal(t, true, routesAfter1Up[0].GetEnabled()) - assert.Equal(t, true, routesAfter1Up[0].GetIsPrimary()) + assert.Equal(t, true, routesAfter1Up[0].GetIsPrimary(), "r1 is back up, expected r1 to become be primary") // Node 2 is not primary assert.Equal(t, true, routesAfter1Up[1].GetAdvertised()) assert.Equal(t, true, routesAfter1Up[1].GetEnabled()) - assert.Equal(t, false, routesAfter1Up[1].GetIsPrimary()) + assert.Equal(t, false, routesAfter1Up[1].GetIsPrimary(), "r1 is back up, expected r1 to become be primary") // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -558,6 +574,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.True(t, srs1PeerStatus.Online, "r1 is back up, r2 down") + assert.False(t, srs2PeerStatus.Online, "r1 is back up, r2 down") + assert.NotNil(t, srs1PeerStatus.PrimaryRoutes) assert.Nil(t, srs2PeerStatus.PrimaryRoutes) @@ -570,7 +589,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Bring up subnet router 2, should result in no change. - t.Logf("bringing up subnet router 2 (%s)", subRouter2.Hostname()) + t.Logf("bringing up subnet router r2 (%s)", subRouter2.Hostname()) + t.Logf("both online, expecting r1 (%s) to still be primary (no flapping)", subRouter1.Hostname()) err = subRouter2.Up() assertNoErr(t, err) @@ -594,12 +614,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is not primary assert.Equal(t, true, routesAfter2Up[0].GetAdvertised()) assert.Equal(t, true, routesAfter2Up[0].GetEnabled()) - assert.Equal(t, true, routesAfter2Up[0].GetIsPrimary()) + assert.Equal(t, true, routesAfter2Up[0].GetIsPrimary(), "r1 and r2 is back up, expected r1 to _still_ be primary") // Node 2 is primary assert.Equal(t, true, routesAfter2Up[1].GetAdvertised()) assert.Equal(t, true, routesAfter2Up[1].GetEnabled()) - assert.Equal(t, false, routesAfter2Up[1].GetIsPrimary()) + assert.Equal(t, false, routesAfter2Up[1].GetIsPrimary(), "r1 and r2 is back up, expected r1 to _still_ be primary") // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -608,6 +628,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.True(t, srs1PeerStatus.Online, "r1 up, r2 up") + assert.True(t, srs2PeerStatus.Online, "r1 up, r2 up") + assert.NotNil(t, srs1PeerStatus.PrimaryRoutes) assert.Nil(t, srs2PeerStatus.PrimaryRoutes) @@ -620,7 +643,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Disable the route of subnet router 1, making it failover to 2 - t.Logf("disabling route in subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("disabling route in subnet router r1 (%s)", subRouter1.Hostname()) + t.Logf("expecting route to failover to r2 (%s), which is still available", subRouter2.Hostname()) _, err = headscale.Execute( []string{ "headscale", @@ -648,7 +672,7 @@ func TestHASubnetRouterFailover(t *testing.T) { assertNoErr(t, err) assert.Len(t, routesAfterDisabling1, 2) - t.Logf("routes after disabling1 %#v", routesAfterDisabling1) + t.Logf("routes after disabling r1 %#v", routesAfterDisabling1) // Node 1 is not primary assert.Equal(t, true, routesAfterDisabling1[0].GetAdvertised()) @@ -680,6 +704,7 @@ func TestHASubnetRouterFailover(t *testing.T) { // enable the route of subnet router 1, no change expected t.Logf("enabling route in subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("both online, expecting r2 (%s) to still be primary (no flapping)", subRouter2.Hostname()) _, err = headscale.Execute( []string{ "headscale", @@ -736,7 +761,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // delete the route of subnet router 2, failover to one expected - t.Logf("deleting route in subnet router 2 (%s)", subRouter2.Hostname()) + t.Logf("deleting route in subnet router r2 (%s)", subRouter2.Hostname()) + t.Logf("expecting route to failover to r1 (%s)", subRouter1.Hostname()) _, err = headscale.Execute( []string{ "headscale", @@ -764,7 +790,7 @@ func TestHASubnetRouterFailover(t *testing.T) { assertNoErr(t, err) assert.Len(t, routesAfterDeleting2, 1) - t.Logf("routes after deleting2 %#v", routesAfterDeleting2) + t.Logf("routes after deleting r2 %#v", routesAfterDeleting2) // Node 1 is primary assert.Equal(t, true, routesAfterDeleting2[0].GetAdvertised()) @@ -798,7 +824,7 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { user := "enable-disable-routing" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -932,6 +958,95 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { assert.Equal(t, true, reAdvertisedRoutes[0].GetIsPrimary()) } +func TestAutoApprovedSubRoute2068(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + expectedRoutes := "10.42.7.0/24" + + user := "subroute" + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErrf(t, "failed to create scenario: %s", err) + defer scenario.Shutdown() + + spec := map[string]int{ + user: 1, + } + + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy( + &policy.ACLPolicy{ + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + TagOwners: map[string][]string{ + "tag:approve": {user}, + }, + AutoApprovers: policy.AutoApprovers{ + Routes: map[string][]string{ + "10.42.0.0/16": {"tag:approve"}, + }, + }, + }, + )) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + headscale, err := scenario.Headscale() + assertNoErrGetHeadscale(t, err) + + subRouter1 := allClients[0] + + // Initially advertise route + command := []string{ + "tailscale", + "set", + "--advertise-routes=" + expectedRoutes, + } + _, _, err = subRouter1.Execute(command) + assertNoErrf(t, "failed to advertise route: %s", err) + + time.Sleep(10 * time.Second) + + var routes []*v1.Route + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "routes", + "list", + "--output", + "json", + }, + &routes, + ) + assertNoErr(t, err) + assert.Len(t, routes, 1) + + want := []*v1.Route{ + { + Id: 1, + Prefix: expectedRoutes, + Advertised: true, + Enabled: true, + IsPrimary: true, + }, + } + + if diff := cmp.Diff(want, routes, cmpopts.IgnoreUnexported(v1.Route{}), cmpopts.IgnoreFields(v1.Route{}, "Node", "CreatedAt", "UpdatedAt", "DeletedAt")); diff != "" { + t.Errorf("unexpected routes (-want +got):\n%s", diff) + } +} + // TestSubnetRouteACL verifies that Subnet routes are distributed // as expected when ACLs are activated. // It implements the issue from @@ -942,7 +1057,7 @@ func TestSubnetRouteACL(t *testing.T) { user := "subnet-route-acl" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -1122,9 +1237,9 @@ func TestSubnetRouteACL(t *testing.T) { wantClientFilter := []filter.Match{ { - IPProto: []ipproto.Proto{ + IPProto: views.SliceOf([]ipproto.Proto{ ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, - }, + }), Srcs: []netip.Prefix{ netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("100.64.0.2/32"), @@ -1145,7 +1260,7 @@ func TestSubnetRouteACL(t *testing.T) { }, } - if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.PrefixComparer); diff != "" { + if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.ViewSliceIPProtoComparer, util.PrefixComparer); diff != "" { t.Errorf("Client (%s) filter, unexpected result (-want +got):\n%s", client.Hostname(), diff) } @@ -1154,9 +1269,9 @@ func TestSubnetRouteACL(t *testing.T) { wantSubnetFilter := []filter.Match{ { - IPProto: []ipproto.Proto{ + IPProto: views.SliceOf([]ipproto.Proto{ ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, - }, + }), Srcs: []netip.Prefix{ netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("100.64.0.2/32"), @@ -1176,9 +1291,9 @@ func TestSubnetRouteACL(t *testing.T) { Caps: []filter.CapMatch{}, }, { - IPProto: []ipproto.Proto{ + IPProto: views.SliceOf([]ipproto.Proto{ ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, - }, + }), Srcs: []netip.Prefix{ netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("100.64.0.2/32"), @@ -1195,7 +1310,7 @@ func TestSubnetRouteACL(t *testing.T) { }, } - if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.PrefixComparer); diff != "" { + if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.ViewSliceIPProtoComparer, util.PrefixComparer); diff != "" { t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff) } } diff --git a/integration/run.sh b/integration/run.sh index 8cad3f02..137bcfb7 100755 --- a/integration/run.sh +++ b/integration/run.sh @@ -26,6 +26,7 @@ run_tests() { --volume "$PWD:$PWD" -w "$PWD"/integration \ --volume /var/run/docker.sock:/var/run/docker.sock \ --volume "$PWD"/control_logs:/tmp/control \ + -e "HEADSCALE_INTEGRATION_POSTGRES" \ golang:1 \ go test ./... \ -failfast \ diff --git a/integration/scenario.go b/integration/scenario.go index a2c63e6f..df978f2a 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -8,6 +8,8 @@ import ( "os" "sort" "sync" + "testing" + "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/util" @@ -17,6 +19,7 @@ import ( "github.com/ory/dockertest/v3" "github.com/puzpuzpuz/xsync/v3" "github.com/samber/lo" + "github.com/stretchr/testify/assert" "golang.org/x/sync/errgroup" "tailscale.com/envknob" ) @@ -50,16 +53,23 @@ var ( tailscaleVersions2021 = map[string]bool{ "head": true, "unstable": true, + "1.70": true, // CapVer: not checked + "1.68": true, // CapVer: not checked + "1.66": true, // CapVer: not checked + "1.64": true, // CapVer: not checked + "1.62": true, // CapVer: not checked + "1.60": true, // CapVer: not checked + "1.58": true, // CapVer: not checked "1.56": true, // CapVer: 82 "1.54": true, // CapVer: 79 "1.52": true, // CapVer: 79 "1.50": true, // CapVer: 74 "1.48": true, // CapVer: 68 "1.46": true, // CapVer: 65 - "1.44": true, // CapVer: 63 - "1.42": true, // CapVer: 61 - "1.40": true, // CapVer: 61 - "1.38": true, // Oldest supported version, CapVer: 58 + "1.44": false, // CapVer: 63 + "1.42": false, // Oldest supported version, CapVer: 61 + "1.40": false, // CapVer: 61 + "1.38": false, // CapVer: 58 "1.36": false, // CapVer: 56 "1.34": false, // CapVer: 51 "1.32": false, // CapVer: 46 @@ -139,7 +149,7 @@ type Scenario struct { // NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with // a set of Users and TailscaleClients. -func NewScenario() (*Scenario, error) { +func NewScenario(maxWait time.Duration) (*Scenario, error) { hash, err := util.GenerateRandomStringDNSSafe(scenarioHashLength) if err != nil { return nil, err @@ -150,7 +160,7 @@ func NewScenario() (*Scenario, error) { return nil, fmt.Errorf("could not connect to docker: %w", err) } - pool.MaxWait = dockertestMaxWait() + pool.MaxWait = maxWait networkName := fmt.Sprintf("hs-%s", hash) if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" { @@ -179,13 +189,9 @@ func NewScenario() (*Scenario, error) { }, nil } -// Shutdown shuts down and cleans up all the containers (ControlServer, TailscaleClient) -// and networks associated with it. -// In addition, it will save the logs of the ControlServer to `/tmp/control` in the -// environment running the tests. -func (s *Scenario) Shutdown() { +func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) { s.controlServers.Range(func(_ string, control ControlServer) bool { - err := control.Shutdown() + stdoutPath, stderrPath, err := control.Shutdown() if err != nil { log.Printf( "Failed to shut down control: %s", @@ -193,6 +199,16 @@ func (s *Scenario) Shutdown() { ) } + if t != nil { + stdout, err := os.ReadFile(stdoutPath) + assert.NoError(t, err) + assert.NotContains(t, string(stdout), "panic") + + stderr, err := os.ReadFile(stderrPath) + assert.NoError(t, err) + assert.NotContains(t, string(stderr), "panic") + } + return true }) @@ -216,6 +232,14 @@ func (s *Scenario) Shutdown() { // } } +// Shutdown shuts down and cleans up all the containers (ControlServer, TailscaleClient) +// and networks associated with it. +// In addition, it will save the logs of the ControlServer to `/tmp/control` in the +// environment running the tests. +func (s *Scenario) Shutdown() { + s.ShutdownAssertNoPanics(nil) +} + // Users returns the name of all users associated with the Scenario. func (s *Scenario) Users() []string { users := make([]string, 0) @@ -241,6 +265,10 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) { return headscale, nil } + if usePostgresForTest { + opts = append(opts, hsic.WithPostgres()) + } + headscale, err := hsic.New(s.pool, s.network, opts...) if err != nil { return nil, fmt.Errorf("failed to create headscale container: %w", err) @@ -420,8 +448,10 @@ func (s *Scenario) WaitForTailscaleSync() error { if err != nil { for _, user := range s.users { for _, client := range user.Clients { - peers, _ := client.PrettyPeers() - log.Println(peers) + peers, allOnline, _ := client.FailingPeersAsString() + if !allOnline { + log.Println(peers) + } } } } @@ -447,7 +477,7 @@ func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int) error { return nil } -// CreateHeadscaleEnv is a conventient method returning a complete Headcale +// CreateHeadscaleEnv is a convenient method returning a complete Headcale // test environment with nodes of all versions, joined to the server with X // users. func (s *Scenario) CreateHeadscaleEnv( @@ -455,10 +485,6 @@ func (s *Scenario) CreateHeadscaleEnv( tsOpts []tsic.Option, opts ...hsic.Option, ) error { - if usePostgresForTest { - opts = append(opts, hsic.WithPostgres()) - } - headscale, err := s.Headscale(opts...) if err != nil { return err @@ -508,7 +534,7 @@ func (s *Scenario) GetIPs(user string) ([]netip.Addr, error) { return ips, fmt.Errorf("failed to get ips: %w", errNoUserAvailable) } -// GetIPs returns all TailscaleClients associated with a User in a Scenario. +// GetClients returns all TailscaleClients associated with a User in a Scenario. func (s *Scenario) GetClients(user string) ([]TailscaleClient, error) { var clients []TailscaleClient if ns, ok := s.users[user]; ok { @@ -584,7 +610,7 @@ func (s *Scenario) ListTailscaleClientsIPs(users ...string) ([]netip.Addr, error return allIps, nil } -// ListTailscaleClientsIPs returns a list of FQDN based on Users +// ListTailscaleClientsFQDNs returns a list of FQDN based on Users // passed as parameters. func (s *Scenario) ListTailscaleClientsFQDNs(users ...string) ([]string, error) { allFQDNs := make([]string, 0) diff --git a/integration/scenario_test.go b/integration/scenario_test.go index cc9810a4..9db4c3a0 100644 --- a/integration/scenario_test.go +++ b/integration/scenario_test.go @@ -7,7 +7,7 @@ import ( ) // This file is intended to "test the test framework", by proxy it will also test -// some Headcsale/Tailscale stuff, but mostly in very simple ways. +// some Headscale/Tailscale stuff, but mostly in very simple ways. func IntegrationSkip(t *testing.T) { t.Helper() @@ -33,7 +33,7 @@ func TestHeadscale(t *testing.T) { user := "test-space" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -78,7 +78,7 @@ func TestCreateTailscale(t *testing.T) { user := "only-create-containers" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -114,7 +114,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) { count := 1 - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() diff --git a/integration/ssh_test.go b/integration/ssh_test.go index 587190e4..6d053b0d 100644 --- a/integration/ssh_test.go +++ b/integration/ssh_test.go @@ -44,7 +44,7 @@ var retry = func(times int, sleepInterval time.Duration, func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario { t.Helper() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) spec := map[string]int{ diff --git a/integration/tailscale.go b/integration/tailscale.go index 9d6796bd..5b1baf1b 100644 --- a/integration/tailscale.go +++ b/integration/tailscale.go @@ -27,7 +27,7 @@ type TailscaleClient interface { Down() error IPs() ([]netip.Addr, error) FQDN() (string, error) - Status() (*ipnstate.Status, error) + Status(...bool) (*ipnstate.Status, error) Netmap() (*netmap.NetworkMap, error) Netcheck() (*netcheck.Report, error) WaitForNeedsLogin() error @@ -36,5 +36,9 @@ type TailscaleClient interface { Ping(hostnameOrIP string, opts ...tsic.PingOption) error Curl(url string, opts ...tsic.CurlOption) (string, error) ID() string - PrettyPeers() (string, error) + ReadFile(path string) ([]byte, error) + + // FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client + // and a bool indicating if the clients online count and peer count is equal. + FailingPeersAsString() (string, bool, error) } diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index 320ae0d5..a3fac17c 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -1,6 +1,8 @@ package tsic import ( + "archive/tar" + "bytes" "context" "encoding/json" "errors" @@ -9,6 +11,7 @@ import ( "log" "net/netip" "net/url" + "os" "strconv" "strings" "time" @@ -503,7 +506,7 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) { } // Status returns the ipnstate.Status of the Tailscale instance. -func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) { +func (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) { command := []string{ "tailscale", "status", @@ -521,60 +524,70 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) { return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err) } + err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_status.json", t.hostname), []byte(result), 0o755) + if err != nil { + return nil, fmt.Errorf("status netmap to /tmp/control: %w", err) + } + return &status, err } // Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance. // Only works with Tailscale 1.56 and newer. // Panics if version is lower then minimum. -// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { -// if !util.TailscaleVersionNewerOrEqual("1.56", t.version) { -// panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version)) -// } +func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { + if !util.TailscaleVersionNewerOrEqual("1.56", t.version) { + panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version)) + } -// command := []string{ -// "tailscale", -// "debug", -// "netmap", -// } + command := []string{ + "tailscale", + "debug", + "netmap", + } -// result, stderr, err := t.Execute(command) -// if err != nil { -// fmt.Printf("stderr: %s\n", stderr) -// return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err) -// } + result, stderr, err := t.Execute(command) + if err != nil { + fmt.Printf("stderr: %s\n", stderr) + return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err) + } -// var nm netmap.NetworkMap -// err = json.Unmarshal([]byte(result), &nm) -// if err != nil { -// return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err) -// } + var nm netmap.NetworkMap + err = json.Unmarshal([]byte(result), &nm) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err) + } -// return &nm, err -// } + err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_netmap.json", t.hostname), []byte(result), 0o755) + if err != nil { + return nil, fmt.Errorf("saving netmap to /tmp/control: %w", err) + } + + return &nm, err +} // Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance. // This implementation is based on getting the netmap from `tailscale debug watch-ipn` // as there seem to be some weirdness omitting endpoint and DERP info if we use // Patch updates. // This implementation works on all supported versions. -func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { - // watch-ipn will only give an update if something is happening, - // since we send keep alives, the worst case for this should be - // 1 minute, but set a slightly more conservative time. - ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute) +// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { +// // watch-ipn will only give an update if something is happening, +// // since we send keep alives, the worst case for this should be +// // 1 minute, but set a slightly more conservative time. +// ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute) - notify, err := t.watchIPN(ctx) - if err != nil { - return nil, err - } +// notify, err := t.watchIPN(ctx) +// if err != nil { +// return nil, err +// } - if notify.NetMap == nil { - return nil, fmt.Errorf("no netmap present in ipn.Notify") - } +// if notify.NetMap == nil { +// return nil, fmt.Errorf("no netmap present in ipn.Notify") +// } - return notify.NetMap, nil -} +// return notify.NetMap, nil +// } // watchIPN watches `tailscale debug watch-ipn` for a ipn.Notify object until // it gets one that has a netmap.NetworkMap. @@ -680,15 +693,18 @@ func (t *TailscaleInContainer) FQDN() (string, error) { return status.Self.DNSName, nil } -// PrettyPeers returns a formatted-ish table of peers in the client. -func (t *TailscaleInContainer) PrettyPeers() (string, error) { +// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client +// and a bool indicating if the clients online count and peer count is equal. +func (t *TailscaleInContainer) FailingPeersAsString() (string, bool, error) { status, err := t.Status() if err != nil { - return "", fmt.Errorf("failed to get FQDN: %w", err) + return "", false, fmt.Errorf("failed to get FQDN: %w", err) } - str := fmt.Sprintf("Peers of %s\n", t.hostname) - str += "Hostname\tOnline\tLastSeen\n" + var b strings.Builder + + fmt.Fprintf(&b, "Peers of %s\n", t.hostname) + fmt.Fprint(&b, "Hostname\tOnline\tLastSeen\n") peerCount := len(status.Peers()) onlineCount := 0 @@ -700,12 +716,12 @@ func (t *TailscaleInContainer) PrettyPeers() (string, error) { onlineCount++ } - str += fmt.Sprintf("%s\t%t\t%s\n", peer.HostName, peer.Online, peer.LastSeen) + fmt.Fprintf(&b, "%s\t%t\t%s\n", peer.HostName, peer.Online, peer.LastSeen) } - str += fmt.Sprintf("Peer Count: %d, Online Count: %d\n\n", peerCount, onlineCount) + fmt.Fprintf(&b, "Peer Count: %d, Online Count: %d\n\n", peerCount, onlineCount) - return str, nil + return b.String(), peerCount == onlineCount, nil } // WaitForNeedsLogin blocks until the Tailscale (tailscaled) instance has @@ -982,5 +998,45 @@ func (t *TailscaleInContainer) WriteFile(path string, data []byte) error { // SaveLog saves the current stdout log of the container to a path // on the host system. func (t *TailscaleInContainer) SaveLog(path string) error { - return dockertestutil.SaveLog(t.pool, t.container, path) + // TODO(kradalby): Assert if tailscale logs contains panics. + _, _, err := dockertestutil.SaveLog(t.pool, t.container, path) + return err +} + +// ReadFile reads a file from the Tailscale container. +// It returns the content of the file as a byte slice. +func (t *TailscaleInContainer) ReadFile(path string) ([]byte, error) { + tarBytes, err := integrationutil.FetchPathFromContainer(t.pool, t.container, path) + if err != nil { + return nil, fmt.Errorf("reading file from container: %w", err) + } + + var out bytes.Buffer + tr := tar.NewReader(bytes.NewReader(tarBytes)) + for { + hdr, err := tr.Next() + if err == io.EOF { + break // End of archive + } + if err != nil { + return nil, fmt.Errorf("reading tar header: %w", err) + } + + if !strings.Contains(path, hdr.Name) { + return nil, fmt.Errorf("file not found in tar archive, looking for: %s, header was: %s", path, hdr.Name) + } + + if _, err := io.Copy(&out, tr); err != nil { + return nil, fmt.Errorf("copying file to buffer: %w", err) + } + + // Only support reading the first tile + break + } + + if out.Len() == 0 { + return nil, fmt.Errorf("file is empty") + } + + return out.Bytes(), nil } diff --git a/integration/utils.go b/integration/utils.go index b9e25be6..840dbc4c 100644 --- a/integration/utils.go +++ b/integration/utils.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/tsic" "github.com/stretchr/testify/assert" ) @@ -154,11 +155,11 @@ func assertClientsState(t *testing.T, clients []TailscaleClient) { func assertValidNetmap(t *testing.T, client TailscaleClient) { t.Helper() - // if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) { - // t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version()) + if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) { + t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version()) - // return - // } + return + } t.Logf("Checking netmap of %q", client.Hostname()) @@ -175,7 +176,11 @@ func assertValidNetmap(t *testing.T, client TailscaleClient) { assert.NotEmptyf(t, netmap.SelfNode.AllowedIPs(), "%q does not have any allowed IPs", client.Hostname()) assert.NotEmptyf(t, netmap.SelfNode.Addresses(), "%q does not have any addresses", client.Hostname()) - assert.Truef(t, *netmap.SelfNode.Online(), "%q is not online", client.Hostname()) + if netmap.SelfNode.Online() != nil { + assert.Truef(t, *netmap.SelfNode.Online(), "%q is not online", client.Hostname()) + } else { + t.Errorf("Online should not be nil for %s", client.Hostname()) + } assert.Falsef(t, netmap.SelfNode.Key().IsZero(), "%q does not have a valid NodeKey", client.Hostname()) assert.Falsef(t, netmap.SelfNode.Machine().IsZero(), "%q does not have a valid MachineKey", client.Hostname()) @@ -213,7 +218,7 @@ func assertValidNetmap(t *testing.T, client TailscaleClient) { // This test is not suitable for ACL/partial connection tests. func assertValidStatus(t *testing.T, client TailscaleClient) { t.Helper() - status, err := client.Status() + status, err := client.Status(true) if err != nil { t.Fatalf("getting status for %q: %s", client.Hostname(), err) } @@ -326,7 +331,7 @@ func dockertestMaxWait() time.Duration { // return timeout // } -// pingAllNegativeHelper is intended to have 1 or more nodes timeing out from the ping, +// pingAllNegativeHelper is intended to have 1 or more nodes timing out from the ping, // it counts failures instead of successes. // func pingAllNegativeHelper(t *testing.T, clients []TailscaleClient, addrs []string) int { // t.Helper() diff --git a/mkdocs.yml b/mkdocs.yml index 86a15469..fe5c0d64 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -10,7 +10,7 @@ repo_name: juanfont/headscale repo_url: https://github.com/juanfont/headscale # Copyright -copyright: Copyright © 2023 Headscale authors +copyright: Copyright © 2024 Headscale authors # Configuration theme: @@ -139,9 +139,5 @@ nav: - Remote CLI: remote-cli.md - Usage: - Android: android-client.md + - Apple: apple-client.md - Windows: windows-client.md - - iOS: iOS-client.md - - Proposals: - - ACLs: proposals/001-acls.md - - Better routing: proposals/002-better-routing.md - - Glossary: glossary.md diff --git a/proto/headscale/v1/headscale.proto b/proto/headscale/v1/headscale.proto index f8cc596f..7324b65a 100644 --- a/proto/headscale/v1/headscale.proto +++ b/proto/headscale/v1/headscale.proto @@ -9,6 +9,7 @@ import "headscale/v1/preauthkey.proto"; import "headscale/v1/node.proto"; import "headscale/v1/routes.proto"; import "headscale/v1/apikey.proto"; +import "headscale/v1/policy.proto"; // import "headscale/v1/device.proto"; service HeadscaleService { @@ -123,6 +124,13 @@ service HeadscaleService { post: "/api/v1/node/{node_id}/user" }; } + + rpc BackfillNodeIPs(BackfillNodeIPsRequest) returns (BackfillNodeIPsResponse) { + option (google.api.http) = { + post: "/api/v1/node/backfillips" + }; + } + // --- Node end --- // --- Route start --- @@ -186,6 +194,21 @@ service HeadscaleService { } // --- ApiKeys end --- + // --- Policy start --- + rpc GetPolicy(GetPolicyRequest) returns (GetPolicyResponse) { + option (google.api.http) = { + get: "/api/v1/policy" + }; + } + + rpc SetPolicy(SetPolicyRequest) returns (SetPolicyResponse) { + option (google.api.http) = { + put: "/api/v1/policy" + body: "*" + }; + } + // --- Policy end --- + // Implement Tailscale API // rpc GetDevice(GetDeviceRequest) returns(GetDeviceResponse) { // option(google.api.http) = { diff --git a/proto/headscale/v1/node.proto b/proto/headscale/v1/node.proto index a9551530..26fe73c7 100644 --- a/proto/headscale/v1/node.proto +++ b/proto/headscale/v1/node.proto @@ -126,3 +126,11 @@ message DebugCreateNodeRequest { message DebugCreateNodeResponse { Node node = 1; } + +message BackfillNodeIPsRequest { + bool confirmed = 1; +} + +message BackfillNodeIPsResponse { + repeated string changes = 1; +} diff --git a/proto/headscale/v1/policy.proto b/proto/headscale/v1/policy.proto new file mode 100644 index 00000000..995f3af8 --- /dev/null +++ b/proto/headscale/v1/policy.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package headscale.v1; +option go_package = "github.com/juanfont/headscale/gen/go/v1"; + +import "google/protobuf/timestamp.proto"; + +message SetPolicyRequest { + string policy = 1; +} + +message SetPolicyResponse { + string policy = 1; + google.protobuf.Timestamp updated_at = 2; +} + +message GetPolicyRequest {} + +message GetPolicyResponse { + string policy = 1; + google.protobuf.Timestamp updated_at = 2; +} \ No newline at end of file