diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 8563e7af..00000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -name: "Bug report" -about: "Create a bug report to help us improve" -title: "" -labels: ["bug"] -assignees: "" ---- - - - -## Bug description - - - -## Environment - - - -- OS: -- Headscale version: -- Tailscale version: - - - -- [ ] Headscale is behind a (reverse) proxy -- [ ] Headscale runs in a container - -## To Reproduce - - - -## Logs and attachments - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 00000000..a7afb6d3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,83 @@ +name: 🐞 Bug +description: File a bug/issue +title: "[Bug] " +labels: ["bug", "needs triage"] +body: + - type: checkboxes + attributes: + label: Is this a support request? + description: This issue tracker is for bugs and feature requests only. If you need help, please use ask in our Discord community + options: + - label: This is not a support request + required: true + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Current Behavior + description: A concise description of what you're experiencing. + validations: + required: true + - type: textarea + attributes: + label: Expected Behavior + description: A concise description of what you expected to happen. + validations: + required: true + - type: textarea + attributes: + label: Steps To Reproduce + description: Steps to reproduce the behavior. + placeholder: | + 1. In this environment... + 1. With this config... + 1. Run '...' + 1. See error... + validations: + required: true + - type: textarea + attributes: + label: Environment + description: | + examples: + - **OS**: Ubuntu 20.04 + - **Headscale version**: 0.22.3 + - **Tailscale version**: 1.64.0 + value: | + - OS: + - Headscale version: + - Tailscale version: + render: markdown + validations: + required: true + - type: checkboxes + attributes: + label: Runtime environment + options: + - label: Headscale is behind a (reverse) proxy + required: false + - label: Headscale runs in a container + required: false + - type: textarea + attributes: + label: Anything else? + description: | + Links? References? Anything that will give us more context about the issue you are encountering! + + - Client netmap dump (see below) + - ACL configuration + - Headscale configuration + + Dump the netmap of tailscale clients: + `tailscale debug netmap > DESCRIPTIVE_NAME.json` + + Please provide information describing the netmap, which client, which headscale version etc. + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 92c51b8f..00000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -name: "Feature request" -about: "Suggest an idea for headscale" -title: "" -labels: ["enhancement"] -assignees: "" ---- - -<!-- -We typically have a clear roadmap for what we want to improve and reserve the right -to close feature requests that does not fit in the roadmap, or fit with the scope -of the project, or we actually want to implement ourselves. - -Headscale is a multinational community across the globe. Our language is English. -All bug reports needs to be in English. ---> - -## Why - -<!-- Include the reason, why you would need the feature. E.g. what problem - does it solve? Or which workflow is currently frustrating and will be improved by - this? --> - -## Description - -<!-- A clear and precise description of what new or changed feature you want. --> diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 00000000..b95cd5e6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,36 @@ +name: 🚀 Feature Request +description: Suggest an idea for Headscale +title: "[Feature] <title>" +labels: [enhancement] +body: + - type: textarea + attributes: + label: Use case + description: Please describe the use case for this feature. + placeholder: | + <!-- Include the reason, why you would need the feature. E.g. what problem + does it solve? Or which workflow is currently frustrating and will be improved by + this? --> + validations: + required: true + - type: textarea + attributes: + label: Description + description: A clear and precise description of what new or changed feature you want. + validations: + required: true + - type: checkboxes + attributes: + label: Contribution + description: Are you willing to contribute to the implementation of this feature? + options: + - label: I can write the design doc for this feature + required: true + - label: I can contribute this feature + required: true + - type: textarea + attributes: + label: How can it be implemented? + description: Free text for your ideas on how this feature could be implemented. + validations: + required: false diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d4e4f4f9..9d8e731d 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -12,7 +12,7 @@ If you find mistakes in the documentation, please submit a fix to the documentat <!-- Please tick if the following things apply. You… --> -- [ ] read the [CONTRIBUTING guidelines](README.md#contributing) +- [ ] have read the [CONTRIBUTING.md](./CONTRIBUTING.md) file - [ ] raised a GitHub issue or discussed it on the projects chat beforehand - [ ] added unit tests - [ ] added integration tests diff --git a/.github/workflows/contributors.yml b/.github/workflows/contributors.yml deleted file mode 100644 index 2c55c002..00000000 --- a/.github/workflows/contributors.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Contributors - -on: - push: - branches: - - main - workflow_dispatch: - -jobs: - add-contributors: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Delete upstream contributor branch - # Allow continue on failure to account for when the - # upstream branch is deleted or does not exist. - continue-on-error: true - run: git push origin --delete update-contributors - - name: Create up-to-date contributors branch - run: git checkout -B update-contributors - - name: Push empty contributors branch - run: git push origin update-contributors - - name: Switch back to main - run: git checkout main - - uses: BobAnkh/add-contributors@v0.2.2 - with: - CONTRIBUTOR: "## Contributors" - COLUMN_PER_ROW: "6" - ACCESS_TOKEN: ${{secrets.GITHUB_TOKEN}} - IMG_WIDTH: "100" - FONT_SIZE: "14" - PATH: "/README.md" - COMMIT_MESSAGE: "docs(README): update contributors" - AVATAR_SHAPE: "round" - BRANCH: "update-contributors" - PULL_REQUEST: "main" diff --git a/.github/workflows/docs-test.yml b/.github/workflows/docs-test.yml new file mode 100644 index 00000000..b0e60131 --- /dev/null +++ b/.github/workflows/docs-test.yml @@ -0,0 +1,27 @@ +name: Test documentation build + +on: [pull_request] + +concurrency: + group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install python + uses: actions/setup-python@v4 + with: + python-version: 3.x + - name: Setup cache + uses: actions/cache@v2 + with: + key: ${{ github.ref }} + path: .cache + - name: Setup dependencies + run: pip install -r docs/requirements.txt + - name: Build docs + run: mkdocs build --strict diff --git a/.github/workflows/test-integration.yaml b/.github/workflows/test-integration.yaml index 06a99db4..9581bada 100644 --- a/.github/workflows/test-integration.yaml +++ b/.github/workflows/test-integration.yaml @@ -26,6 +26,7 @@ jobs: - TestPreAuthKeyCommand - TestPreAuthKeyCommandWithoutExpiry - TestPreAuthKeyCommandReusableEphemeral + - TestPreAuthKeyCorrectUserLoggedInCommand - TestApiKeyCommand - TestNodeTagCommand - TestNodeAdvertiseTagNoACLCommand @@ -43,7 +44,8 @@ jobs: - TestTaildrop - TestResolveMagicDNS - TestExpireNode - - TestNodeOnlineLastSeenStatus + - TestNodeOnlineStatus + - TestPingAllByIPManyUpDown - TestEnablingRoutes - TestHASubnetRouterFailover - TestEnableDisableAutoApprovedRoute diff --git a/.goreleaser.yml b/.goreleaser.yml index b1df31c7..4e91c74d 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -135,7 +135,7 @@ kos: - id: ghcr-debug repository: ghcr.io/juanfont/headscale bare: true - base_image: "debian:12" + base_image: gcr.io/distroless/base-debian12:debug build: headscale main: ./cmd/headscale env: @@ -160,7 +160,7 @@ kos: - id: dockerhub-debug build: headscale - base_image: "debian:12" + base_image: gcr.io/distroless/base-debian12:debug repository: headscale/headscale bare: true platforms: diff --git a/CHANGELOG.md b/CHANGELOG.md index c0186961..03516fd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473) - Change the structure of database configuration, see [config-example.yaml](./config-example.yaml) for the new structure. [#1700](https://github.com/juanfont/headscale/pull/1700) - Old structure has been remove and the configuration _must_ be converted. - - Adds additional configuration for PostgreSQL for setting max open, idle conection and idle connection lifetime. + - Adds additional configuration for PostgreSQL for setting max open, idle connection and idle connection lifetime. - API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553) - Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611) - The latest supported client is 1.38 @@ -39,6 +39,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - `/var/lib/headscale` and `/var/run/headscale` is no longer created automatically, see [container docs](./docs/running-headscale-container.md) - Prefixes are now defined per v4 and v6 range. [#1756](https://github.com/juanfont/headscale/pull/1756) - `ip_prefixes` option is now `prefixes.v4` and `prefixes.v6` + - `prefixes.allocation` can be set to assign IPs at `sequential` or `random`. [#1869](https://github.com/juanfont/headscale/pull/1869) ### Changes @@ -53,6 +54,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259) - Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565) - Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702) +- Add command to backfill IP addresses for nodes missing IPs from configured prefixes. [#1869](https://github.com/juanfont/headscale/pull/1869) +- Log available update as warning [#1877](https://github.com/juanfont/headscale/pull/1877) +- Add `autogroup:internet` to Policy [#1917](https://github.com/juanfont/headscale/pull/1917) +- Restore foreign keys and add constraints [#1562](https://github.com/juanfont/headscale/pull/1562) ## 0.22.3 (2023-05-12) @@ -65,7 +70,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ ### Changes - Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382) - - Profiles are continously generated in our integration tests. + - Profiles are continuously generated in our integration tests. - Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391) - Improvements on Noise implementation [#1379](https://github.com/juanfont/headscale/pull/1379) - Replace node filter logic, ensuring nodes with access can see eachother [#1381](https://github.com/juanfont/headscale/pull/1381) @@ -156,7 +161,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - SSH ACLs status: - Support `accept` and `check` (SSH can be enabled and used for connecting and authentication) - Rejecting connections **are not supported**, meaning that if you enable SSH, then assume that _all_ `ssh` connections **will be allowed**. - - If you decied to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients. + - If you decided to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients. - We are currently improving our testing of the SSH ACLs, help us get an overview by testing and giving feedback. - This feature should be considered dangerous and it is disabled by default. Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`. @@ -206,7 +211,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ ### Changes - Updated dependencies (including the library that lacked armhf support) [#722](https://github.com/juanfont/headscale/pull/722) -- Fix missing group expansion in function `excludeCorretlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563) +- Fix missing group expansion in function `excludeCorrectlyTaggedNodes` [#563](https://github.com/juanfont/headscale/issues/563) - Improve registration protocol implementation and switch to NodeKey as main identifier [#725](https://github.com/juanfont/headscale/pull/725) - Add ability to connect to PostgreSQL via unix socket [#734](https://github.com/juanfont/headscale/pull/734) @@ -226,7 +231,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Fix send on closed channel crash in polling [#542](https://github.com/juanfont/headscale/pull/542) - Fixed spurious calls to setLastStateChangeToNow from ephemeral nodes [#566](https://github.com/juanfont/headscale/pull/566) - Add command for moving nodes between namespaces [#362](https://github.com/juanfont/headscale/issues/362) -- Added more configuration parameters for OpenID Connect (scopes, free-form paramters, domain and user allowlist) +- Added more configuration parameters for OpenID Connect (scopes, free-form parameters, domain and user allowlist) - Add command to set tags on a node [#525](https://github.com/juanfont/headscale/issues/525) - Add command to view tags of nodes [#356](https://github.com/juanfont/headscale/issues/356) - Add --all (-a) flag to enable routes command [#360](https://github.com/juanfont/headscale/issues/360) @@ -274,10 +279,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/ - Fix a bug were the same IP could be assigned to multiple hosts if joined in quick succession [#346](https://github.com/juanfont/headscale/pull/346) - Simplify the code behind registration of machines [#366](https://github.com/juanfont/headscale/pull/366) - - Nodes are now only written to database if they are registrated successfully + - Nodes are now only written to database if they are registered successfully - Fix a limitation in the ACLs that prevented users to write rules with `*` as source [#374](https://github.com/juanfont/headscale/issues/374) - Reduce the overhead of marshal/unmarshal for Hostinfo, routes and endpoints by using specific types in Machine [#371](https://github.com/juanfont/headscale/pull/371) -- Apply normalization function to FQDN on hostnames when hosts registers and retrieve informations [#363](https://github.com/juanfont/headscale/issues/363) +- Apply normalization function to FQDN on hostnames when hosts registers and retrieve information [#363](https://github.com/juanfont/headscale/issues/363) - Fix a bug that prevented the use of `tailscale logout` with OIDC [#508](https://github.com/juanfont/headscale/issues/508) - Added Tailscale repo HEAD and unstable releases channel to the integration tests targets [#513](https://github.com/juanfont/headscale/pull/513) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..18d7dfb8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributing + +Headscale is "Open Source, acknowledged contribution", this means that any contribution will have to be discussed with the maintainers before being added to the project. +This model has been chosen to reduce the risk of burnout by limiting the maintenance overhead of reviewing and validating third-party code. + +## Why do we have this model? + +Headscale has a small maintainer team that tries to balance working on the project, fixing bugs and reviewing contributions. + +When we work on issues ourselves, we develop first hand knowledge of the code and it makes it possible for us to maintain and own the code as the project develops. + +Code contributions are seen as a positive thing. People enjoy and engage with our project, but it also comes with some challenges; we have to understand the code, we have to understand the feature, we might have to become familiar with external libraries or services and we think about security implications. All those steps are required during the reviewing process. After the code has been merged, the feature has to be maintained. Any changes reliant on external services must be updated and expanded accordingly. + +The review and day-1 maintenance adds a significant burden on the maintainers. Often we hope that the contributor will help out, but we found that most of the time, they disappear after their new feature was added. + +This means that when someone contributes, we are mostly happy about it, but we do have to run it through a series of checks to establish if we actually can maintain this feature. + +## What do we require? + +A general description is provided here and an explicit list is provided in our pull request template. + +All new features have to start out with a design document, which should be discussed on the issue tracker (not discord). It should include a use case for the feature, how it can be implemented, who will implement it and a plan for maintaining it. + +All features have to be end-to-end tested (integration tests) and have good unit test coverage to ensure that they work as expected. This will also ensure that the feature continues to work as expected over time. If a change cannot be tested, a strong case for why this is not possible needs to be presented. + +The contributor should help to maintain the feature over time. In case the feature is not maintained probably, the maintainers reserve themselves the right to remove features they redeem as unmaintainable. This should help to improve the quality of the software and keep it in a maintainable state. + +## Bug fixes + +Headscale is open to code contributions for bug fixes without discussion. + +## Documentation + +If you find mistakes in the documentation, please submit a fix to the documentation. diff --git a/Dockerfile.debug b/Dockerfile.debug index 659ae4cc..4e63dca8 100644 --- a/Dockerfile.debug +++ b/Dockerfile.debug @@ -2,31 +2,24 @@ # and are in no way endorsed by Headscale's maintainers as an # official nor supported release or distribution. -FROM docker.io/golang:1.22-bookworm AS build +FROM docker.io/golang:1.22-bookworm ARG VERSION=dev ENV GOPATH /go WORKDIR /go/src/headscale -COPY go.mod go.sum /go/src/headscale/ -RUN go mod download - -COPY . . - -RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale -RUN test -e /go/bin/headscale - -# Debug image -FROM docker.io/golang:1.22-bookworm - -COPY --from=build /go/bin/headscale /bin/headscale -ENV TZ UTC - RUN apt-get update \ && apt-get install --no-install-recommends --yes less jq \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean RUN mkdir -p /var/run/headscale +COPY go.mod go.sum /go/src/headscale/ +RUN go mod download + +COPY . . + +RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale && test -e /go/bin/headscale + # Need to reset the entrypoint or everything will run as a busybox script ENTRYPOINT [] EXPOSE 8080/tcp diff --git a/Dockerfile.tailscale-HEAD b/Dockerfile.tailscale-HEAD index 83ff9fe5..f78d687a 100644 --- a/Dockerfile.tailscale-HEAD +++ b/Dockerfile.tailscale-HEAD @@ -1,21 +1,43 @@ -# This Dockerfile and the images produced are for testing headscale, -# and are in no way endorsed by Headscale's maintainers as an -# official nor supported release or distribution. +# Copyright (c) Tailscale Inc & AUTHORS +# SPDX-License-Identifier: BSD-3-Clause -FROM golang:latest +# This Dockerfile is more or less lifted from tailscale/tailscale +# to ensure a similar build process when testing the HEAD of tailscale. -RUN apt-get update \ - && apt-get install -y dnsutils git iptables ssh ca-certificates \ - && rm -rf /var/lib/apt/lists/* +FROM golang:1.22-alpine AS build-env -RUN useradd --shell=/bin/bash --create-home ssh-it-user +WORKDIR /go/src +RUN apk add --no-cache git + +# Replace `RUN git...` with `COPY` and a local checked out version of Tailscale in `./tailscale` +# to test specific commits of the Tailscale client. This is useful when trying to find out why +# something specific broke between two versions of Tailscale with for example `git bisect`. +# COPY ./tailscale . RUN git clone https://github.com/tailscale/tailscale.git -WORKDIR /go/tailscale +WORKDIR /go/src/tailscale -RUN git checkout main \ - && sh build_dist.sh tailscale.com/cmd/tailscale \ - && sh build_dist.sh tailscale.com/cmd/tailscaled \ - && cp tailscale /usr/local/bin/ \ - && cp tailscaled /usr/local/bin/ + +# see build_docker.sh +ARG VERSION_LONG="" +ENV VERSION_LONG=$VERSION_LONG +ARG VERSION_SHORT="" +ENV VERSION_SHORT=$VERSION_SHORT +ARG VERSION_GIT_HASH="" +ENV VERSION_GIT_HASH=$VERSION_GIT_HASH +ARG TARGETARCH + +RUN GOARCH=$TARGETARCH go install -ldflags="\ + -X tailscale.com/version.longStamp=$VERSION_LONG \ + -X tailscale.com/version.shortStamp=$VERSION_SHORT \ + -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ + -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot + +FROM alpine:3.18 +RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables curl + +COPY --from=build-env /go/bin/* /usr/local/bin/ +# For compat with the previous run.sh, although ideally you should be +# using build_docker.sh which sets an entrypoint for the image. +RUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh diff --git a/Makefile b/Makefile index 442690ed..719393f5 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ test_integration: --name headscale-test-suite \ -v $$PWD:$$PWD -w $$PWD/integration \ -v /var/run/docker.sock:/var/run/docker.sock \ + -v $$PWD/control_logs:/tmp/control \ golang:1 \ go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8 diff --git a/README.md b/README.md index 4c7ccacb..2ee8f4eb 100644 --- a/README.md +++ b/README.md @@ -87,24 +87,19 @@ Please have a look at the [`documentation`](https://headscale.net/). ## Disclaimer -1. This project is not associated with Tailscale Inc. -2. The purpose of Headscale is maintaining a working, self-hosted Tailscale control panel. +This project is not associated with Tailscale Inc. + +However, one of the active maintainers for Headscale [is employed by Tailscale](https://tailscale.com/blog/opensource) and he is allowed to spend work hours contributing to the project. Contributions from this maintainer are reviewed by other maintainers. + +The maintainers work together on setting the direction for the project. The underlying principle is to serve the community of self-hosters, enthusiasts and hobbyists - while having a sustainable project. ## Contributing -Headscale is "Open Source, acknowledged contribution", this means that any -contribution will have to be discussed with the Maintainers before being submitted. - -This model has been chosen to reduce the risk of burnout by limiting the -maintenance overhead of reviewing and validating third-party code. - -Headscale is open to code contributions for bug fixes without discussion. - -If you find mistakes in the documentation, please submit a fix to the documentation. +Please read the [CONTRIBUTING.md](./CONTRIBUTING.md) file. ### Requirements -To contribute to headscale you would need the lastest version of [Go](https://golang.org) +To contribute to headscale you would need the latest version of [Go](https://golang.org) and [Buf](https://buf.build)(Protobuf generator). We recommend using [Nix](https://nixos.org/) to setup a development environment. This can @@ -172,1033 +167,8 @@ make build ## Contributors -<table> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kradalby> - <img src=https://avatars.githubusercontent.com/u/98431?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kristoffer Dalby/> - <br /> - <sub style="font-size:14px"><b>Kristoffer Dalby</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/juanfont> - <img src=https://avatars.githubusercontent.com/u/181059?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Juan Font/> - <br /> - <sub style="font-size:14px"><b>Juan Font</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/restanrm> - <img src=https://avatars.githubusercontent.com/u/4344371?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Adrien Raffin-Caboisse/> - <br /> - <sub style="font-size:14px"><b>Adrien Raffin-Caboisse</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/cure> - <img src=https://avatars.githubusercontent.com/u/149135?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ward Vandewege/> - <br /> - <sub style="font-size:14px"><b>Ward Vandewege</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/huskyii> - <img src=https://avatars.githubusercontent.com/u/5499746?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jiang Zhu/> - <br /> - <sub style="font-size:14px"><b>Jiang Zhu</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/tsujamin> - <img src=https://avatars.githubusercontent.com/u/2435619?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Benjamin Roberts/> - <br /> - <sub style="font-size:14px"><b>Benjamin Roberts</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/reynico> - <img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/> - <br /> - <sub style="font-size:14px"><b>Nico</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/e-zk> - <img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/> - <br /> - <sub style="font-size:14px"><b>e-zk</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/evenh> - <img src=https://avatars.githubusercontent.com/u/2701536?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Even Holthe/> - <br /> - <sub style="font-size:14px"><b>Even Holthe</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ImpostorKeanu> - <img src=https://avatars.githubusercontent.com/u/11574161?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Justin Angel/> - <br /> - <sub style="font-size:14px"><b>Justin Angel</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ItalyPaleAle> - <img src=https://avatars.githubusercontent.com/u/43508?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Alessandro (Ale) Segala/> - <br /> - <sub style="font-size:14px"><b>Alessandro (Ale) Segala</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ohdearaugustin> - <img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/> - <br /> - <sub style="font-size:14px"><b>ohdearaugustin</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mpldr> - <img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/> - <br /> - <sub style="font-size:14px"><b>Moritz Poldrack</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Orhideous> - <img src=https://avatars.githubusercontent.com/u/2265184?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andriy Kushnir/> - <br /> - <sub style="font-size:14px"><b>Andriy Kushnir</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/GrigoriyMikhalkin> - <img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/> - <br /> - <sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mike-lloyd03> - <img src=https://avatars.githubusercontent.com/u/49411532?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mike Lloyd/> - <br /> - <sub style="font-size:14px"><b>Mike Lloyd</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/christian-heusel> - <img src=https://avatars.githubusercontent.com/u/26827864?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Christian Heusel/> - <br /> - <sub style="font-size:14px"><b>Christian Heusel</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/iSchluff> - <img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/> - <br /> - <sub style="font-size:14px"><b>Anton Schubert</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Niek> - <img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/> - <br /> - <sub style="font-size:14px"><b>Niek van der Maas</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/negbie> - <img src=https://avatars.githubusercontent.com/u/20154956?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Eugen Biegler/> - <br /> - <sub style="font-size:14px"><b>Eugen Biegler</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/617a7aa> - <img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/> - <br /> - <sub style="font-size:14px"><b>Azz</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/qbit> - <img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/> - <br /> - <sub style="font-size:14px"><b>Aaron Bieber</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kazauwa> - <img src=https://avatars.githubusercontent.com/u/12330159?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Igor Perepilitsyn/> - <br /> - <sub style="font-size:14px"><b>Igor Perepilitsyn</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Aluxima> - <img src=https://avatars.githubusercontent.com/u/16262531?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Laurent Marchaud/> - <br /> - <sub style="font-size:14px"><b>Laurent Marchaud</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/majst01> - <img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/> - <br /> - <sub style="font-size:14px"><b>Stefan Majer</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/bravechamp> - <img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/> - <br /> - <sub style="font-size:14px"><b>bravechamp</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/hdhoang> - <img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/> - <br /> - <sub style="font-size:14px"><b>hdhoang</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/OrvilleQ> - <img src=https://avatars.githubusercontent.com/u/21377465?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Orville Q. Song/> - <br /> - <sub style="font-size:14px"><b>Orville Q. Song</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fdelucchijr> - <img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/> - <br /> - <sub style="font-size:14px"><b>Fernando De Lucchi</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/vsychov> - <img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/> - <br /> - <sub style="font-size:14px"><b>MichaelKo</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kevin1sMe> - <img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/> - <br /> - <sub style="font-size:14px"><b>kevinlin</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/unreality> - <img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/> - <br /> - <sub style="font-size:14px"><b>unreality</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/loprima-l> - <img src=https://avatars.githubusercontent.com/u/69201633?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=loprima-l/> - <br /> - <sub style="font-size:14px"><b>loprima-l</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/samson4649> - <img src=https://avatars.githubusercontent.com/u/12725953?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Samuel Lock/> - <br /> - <sub style="font-size:14px"><b>Samuel Lock</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ptman> - <img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/> - <br /> - <sub style="font-size:14px"><b>Paul Tötterman</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/dragetd> - <img src=https://avatars.githubusercontent.com/u/3639577?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael G./> - <br /> - <sub style="font-size:14px"><b>Michael G.</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mevansam> - <img src=https://avatars.githubusercontent.com/u/403630?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mevan Samaratunga/> - <br /> - <sub style="font-size:14px"><b>Mevan Samaratunga</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/majabojarska> - <img src=https://avatars.githubusercontent.com/u/33836570?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Maja Bojarska/> - <br /> - <sub style="font-size:14px"><b>Maja Bojarska</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ChibangLW> - <img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/> - <br /> - <sub style="font-size:14px"><b>ChibangLW</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jonathanspw> - <img src=https://avatars.githubusercontent.com/u/8390543?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan Wright/> - <br /> - <sub style="font-size:14px"><b>Jonathan Wright</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/madjam002> - <img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/> - <br /> - <sub style="font-size:14px"><b>Jamie Greeff</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/deonthomasgy> - <img src=https://avatars.githubusercontent.com/u/150036?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Deon Thomas/> - <br /> - <sub style="font-size:14px"><b>Deon Thomas</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/linsomniac> - <img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/> - <br /> - <sub style="font-size:14px"><b>Sean Reifschneider</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/derelm> - <img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/> - <br /> - <sub style="font-size:14px"><b>derelm</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/puzpuzpuz> - <img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/> - <br /> - <sub style="font-size:14px"><b>Andrei Pechkurov</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/t56k> - <img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/> - <br /> - <sub style="font-size:14px"><b>thomas</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/qzydustin> - <img src=https://avatars.githubusercontent.com/u/44362429?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhenyu Qi/> - <br /> - <sub style="font-size:14px"><b>Zhenyu Qi</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/vdovhanych> - <img src=https://avatars.githubusercontent.com/u/45185420?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Vitalij Dovhanyc/> - <br /> - <sub style="font-size:14px"><b>Vitalij Dovhanyc</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ratsclub> - <img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/> - <br /> - <sub style="font-size:14px"><b>Victor Freire</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/snh> - <img src=https://avatars.githubusercontent.com/u/2051768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Steven Honson/> - <br /> - <sub style="font-size:14px"><b>Steven Honson</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/SilverBut> - <img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/> - <br /> - <sub style="font-size:14px"><b>Silver Bullet</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/QZAiXH> - <img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/> - <br /> - <sub style="font-size:14px"><b>Snack</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/artemklevtsov> - <img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/> - <br /> - <sub style="font-size:14px"><b>Artem Klevtsov</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/cmars> - <img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/> - <br /> - <sub style="font-size:14px"><b>Casey Marshall</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/TotoTheDragon> - <img src=https://avatars.githubusercontent.com/u/42499964?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=DeveloperDragon/> - <br /> - <sub style="font-size:14px"><b>DeveloperDragon</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/dbevacqua> - <img src=https://avatars.githubusercontent.com/u/6534306?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dbevacqua/> - <br /> - <sub style="font-size:14px"><b>dbevacqua</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/SuperSandro2000> - <img src=https://avatars.githubusercontent.com/u/7258858?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sandro/> - <br /> - <sub style="font-size:14px"><b>Sandro</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pvinis> - <img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/> - <br /> - <sub style="font-size:14px"><b>Pavlos Vinieratos</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pallabpain> - <img src=https://avatars.githubusercontent.com/u/5305744?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pallab Pain/> - <br /> - <sub style="font-size:14px"><b>Pallab Pain</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/joshuataylor> - <img src=https://avatars.githubusercontent.com/u/225131?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Josh Taylor/> - <br /> - <sub style="font-size:14px"><b>Josh Taylor</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/motiejus> - <img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/> - <br /> - <sub style="font-size:14px"><b>Motiejus Jakštys</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/CNLHC> - <img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LIU HANCHENG/> - <br /> - <sub style="font-size:14px"><b>LIU HANCHENG</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/caelansar> - <img src=https://avatars.githubusercontent.com/u/31852257?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=caelansar/> - <br /> - <sub style="font-size:14px"><b>caelansar</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Bpazy> - <img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/> - <br /> - <sub style="font-size:14px"><b>Ziyuan Han</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/zhzy0077> - <img src=https://avatars.githubusercontent.com/u/8717471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhiyuan Zheng/> - <br /> - <sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/zekker6> - <img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/> - <br /> - <sub style="font-size:14px"><b>Zakhar Bessarab</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/newellz2> - <img src=https://avatars.githubusercontent.com/u/52436542?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zachary Newell/> - <br /> - <sub style="font-size:14px"><b>Zachary Newell</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/sleepymole> - <img src=https://avatars.githubusercontent.com/u/17199941?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yujie Xia/> - <br /> - <sub style="font-size:14px"><b>Yujie Xia</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/y0ngb1n> - <img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=杨斌 Aben/> - <br /> - <sub style="font-size:14px"><b>杨斌 Aben</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/woudsma> - <img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/> - <br /> - <sub style="font-size:14px"><b>Tjerk Woudsma</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/thetillhoff> - <img src=https://avatars.githubusercontent.com/u/25052289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Till Hoffmann/> - <br /> - <sub style="font-size:14px"><b>Till Hoffmann</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/tianon> - <img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/> - <br /> - <sub style="font-size:14px"><b>Tianon Gravi</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/gitter-badger> - <img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/> - <br /> - <sub style="font-size:14px"><b>The Gitter Badger</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Teteros> - <img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/> - <br /> - <sub style="font-size:14px"><b>Teteros</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/m-tanner-dev0> - <img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/> - <br /> - <sub style="font-size:14px"><b>Tanner</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/sophware> - <img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/> - <br /> - <sub style="font-size:14px"><b>sophware</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/exsplashit> - <img src=https://avatars.githubusercontent.com/u/121534647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stepan/> - <br /> - <sub style="font-size:14px"><b>Stepan</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/stefanvanburen> - <img src=https://avatars.githubusercontent.com/u/622527?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan VanBuren/> - <br /> - <sub style="font-size:14px"><b>Stefan VanBuren</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/6ixfalls> - <img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/> - <br /> - <sub style="font-size:14px"><b>Six</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/shaananc> - <img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/> - <br /> - <sub style="font-size:14px"><b>Shaanan Cohney</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/muzy> - <img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian/> - <br /> - <sub style="font-size:14px"><b>Sebastian</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/atorregrosa-smd> - <img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/> - <br /> - <sub style="font-size:14px"><b>Àlex Torregrosa</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/xpzouying> - <img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/> - <br /> - <sub style="font-size:14px"><b>zy</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Milokita> - <img src=https://avatars.githubusercontent.com/u/8993036?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zx/> - <br /> - <sub style="font-size:14px"><b>zx</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Wakeful-Cloud> - <img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful Cloud/> - <br /> - <sub style="font-size:14px"><b>Wakeful Cloud</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/phpmalik> - <img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/> - <br /> - <sub style="font-size:14px"><b>phpmalik</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pernila> - <img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/> - <br /> - <sub style="font-size:14px"><b>Tommi Pernila</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/nicholas-yap> - <img src=https://avatars.githubusercontent.com/u/38109533?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=nicholas-yap/> - <br /> - <sub style="font-size:14px"><b>nicholas-yap</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/manju-rn> - <img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/> - <br /> - <sub style="font-size:14px"><b>manju-rn</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ma6174> - <img src=https://avatars.githubusercontent.com/u/1449133?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ma6174/> - <br /> - <sub style="font-size:14px"><b>ma6174</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/l00ps> - <img src=https://avatars.githubusercontent.com/u/7349576?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lööps/> - <br /> - <sub style="font-size:14px"><b>lööps</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/lion24> - <img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lionel.codes/> - <br /> - <sub style="font-size:14px"><b>lionel.codes</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/magichuihui> - <img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/> - <br /> - <sub style="font-size:14px"><b>suhelen</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jimyag> - <img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/> - <br /> - <sub style="font-size:14px"><b>jimyag</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ignoramous> - <img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/> - <br /> - <sub style="font-size:14px"><b>ignoramous</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/nning> - <img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/> - <br /> - <sub style="font-size:14px"><b>henning mueller</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/foosinn> - <img src=https://avatars.githubusercontent.com/u/8914163?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=foosinn/> - <br /> - <sub style="font-size:14px"><b>foosinn</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fortitudepub> - <img src=https://avatars.githubusercontent.com/u/343470?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dyz/> - <br /> - <sub style="font-size:14px"><b>dyz</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/dnaq> - <img src=https://avatars.githubusercontent.com/u/1299717?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dnaq/> - <br /> - <sub style="font-size:14px"><b>dnaq</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/danielalvsaaker> - <img src=https://avatars.githubusercontent.com/u/30574112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=danielalvsaaker/> - <br /> - <sub style="font-size:14px"><b>danielalvsaaker</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/JJGadgets> - <img src=https://avatars.githubusercontent.com/u/5709019?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JJGadgets/> - <br /> - <sub style="font-size:14px"><b>JJGadgets</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/gabe565> - <img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/> - <br /> - <sub style="font-size:14px"><b>Gabe Cook</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/felixonmars> - <img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/> - <br /> - <sub style="font-size:14px"><b>Felix Yan</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fkr> - <img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/> - <br /> - <sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/fatih-acar> - <img src=https://avatars.githubusercontent.com/u/15028881?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=fatih-acar/> - <br /> - <sub style="font-size:14px"><b>fatih-acar</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/kundel> - <img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Darrell Kundel/> - <br /> - <sub style="font-size:14px"><b>Darrell Kundel</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/yangchuansheng> - <img src=https://avatars.githubusercontent.com/u/15308462?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt= Carson Yang/> - <br /> - <sub style="font-size:14px"><b> Carson Yang</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/clfs> - <img src=https://avatars.githubusercontent.com/u/12112217?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Calvin Figuereo-Supraner/> - <br /> - <sub style="font-size:14px"><b>Calvin Figuereo-Supraner</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/stensonb> - <img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/> - <br /> - <sub style="font-size:14px"><b>Bryan Stenson</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/winterheart> - <img src=https://avatars.githubusercontent.com/u/81112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azamat H. Hackimov/> - <br /> - <sub style="font-size:14px"><b>Azamat H. Hackimov</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/avirut> - <img src=https://avatars.githubusercontent.com/u/27095602?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Avirut Mehta/> - <br /> - <sub style="font-size:14px"><b>Avirut Mehta</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/awoimbee> - <img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/> - <br /> - <sub style="font-size:14px"><b>Arthur Woimbée</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/arnarg> - <img src=https://avatars.githubusercontent.com/u/1291396?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arnar/> - <br /> - <sub style="font-size:14px"><b>Arnar</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/aofei> - <img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/> - <br /> - <sub style="font-size:14px"><b>Aofei Sheng</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/tony1661> - <img src=https://avatars.githubusercontent.com/u/5287266?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antonio Fernandez/> - <br /> - <sub style="font-size:14px"><b>Antonio Fernandez</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/apognu> - <img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/> - <br /> - <sub style="font-size:14px"><b>Antoine POPINEAU</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/theryecatcher> - <img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/> - <br /> - <sub style="font-size:14px"><b>Anoop Sundaresh</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/alexhalbi> - <img src=https://avatars.githubusercontent.com/u/5500720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Alexander Halbarth/> - <br /> - <sub style="font-size:14px"><b>Alexander Halbarth</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/iFargle> - <img src=https://avatars.githubusercontent.com/u/124551390?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Albert Copeland/> - <br /> - <sub style="font-size:14px"><b>Albert Copeland</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/aberoham> - <img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/> - <br /> - <sub style="font-size:14px"><b>Abraham Ingersoll</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ryanfowler> - <img src=https://avatars.githubusercontent.com/u/2668821?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ryan Fowler/> - <br /> - <sub style="font-size:14px"><b>Ryan Fowler</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/renovate-bot> - <img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/> - <br /> - <sub style="font-size:14px"><b>Mend Renovate</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/rcursaru> - <img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/> - <br /> - <sub style="font-size:14px"><b>rcursaru</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/nnsee> - <img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/> - <br /> - <sub style="font-size:14px"><b>Rasmus Moorats</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/donran> - <img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/> - <br /> - <sub style="font-size:14px"><b>Pontus N</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/piec> - <img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/> - <br /> - <sub style="font-size:14px"><b>Pierre Carru</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/pkrivanec> - <img src=https://avatars.githubusercontent.com/u/25530641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Philipp Krivanec/> - <br /> - <sub style="font-size:14px"><b>Philipp Krivanec</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mikejsavage> - <img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/> - <br /> - <sub style="font-size:14px"><b>Michael Savage</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/mhameed> - <img src=https://avatars.githubusercontent.com/u/447017?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mesar Hameed/> - <br /> - <sub style="font-size:14px"><b>Mesar Hameed</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/foxtrot> - <img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/> - <br /> - <sub style="font-size:14px"><b>Marc</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/Lucalux> - <img src=https://avatars.githubusercontent.com/u/70356955?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Lucalux/> - <br /> - <sub style="font-size:14px"><b>Lucalux</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/lgrn> - <img src=https://avatars.githubusercontent.com/u/735192?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Linus/> - <br /> - <sub style="font-size:14px"><b>Linus</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/win-t> - <img src=https://avatars.githubusercontent.com/u/1589120?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kurnia D Win/> - <br /> - <sub style="font-size:14px"><b>Kurnia D Win</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/JulienFloris> - <img src=https://avatars.githubusercontent.com/u/20380255?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Julien Zweverink/> - <br /> - <sub style="font-size:14px"><b>Julien Zweverink</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/ShadowJonathan> - <img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/> - <br /> - <sub style="font-size:14px"><b>Jonathan de Jong</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/johnae> - <img src=https://avatars.githubusercontent.com/u/28332?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=John Axel Eriksson/> - <br /> - <sub style="font-size:14px"><b>John Axel Eriksson</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jsiebens> - <img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/> - <br /> - <sub style="font-size:14px"><b>Johan Siebens</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jimt> - <img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/> - <br /> - <sub style="font-size:14px"><b>Jim Tittsler</b></sub> - </a> - </td> -</tr> -<tr> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/jessebot> - <img src=https://avatars.githubusercontent.com/u/2389292?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JesseBot/> - <br /> - <sub style="font-size:14px"><b>JesseBot</b></sub> - </a> - </td> - <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> - <a href=https://github.com/hrtkpf> - <img src=https://avatars.githubusercontent.com/u/42646788?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hrtkpf/> - <br /> - <sub style="font-size:14px"><b>hrtkpf</b></sub> - </a> - </td> -</tr> -</table> +<a href="https://github.com/juanfont/headscale/graphs/contributors"> + <img src="https://contrib.rocks/image?repo=juanfont/headscale" /> +</a> + +Made with [contrib.rocks](https://contrib.rocks). diff --git a/cmd/headscale/cli/nodes.go b/cmd/headscale/cli/nodes.go index ac996245..58890cb0 100644 --- a/cmd/headscale/cli/nodes.go +++ b/cmd/headscale/cli/nodes.go @@ -97,6 +97,8 @@ func init() { tagCmd.Flags(). StringSliceP("tags", "t", []string{}, "List of tags to add to the node") nodeCmd.AddCommand(tagCmd) + + nodeCmd.AddCommand(backfillNodeIPsCmd) } var nodeCmd = &cobra.Command{ @@ -477,6 +479,57 @@ var moveNodeCmd = &cobra.Command{ }, } +var backfillNodeIPsCmd = &cobra.Command{ + Use: "backfillips", + Short: "Backfill IPs missing from nodes", + Long: ` +Backfill IPs can be used to add/remove IPs from nodes +based on the current configuration of Headscale. + +If there are nodes that does not have IPv4 or IPv6 +even if prefixes for both are configured in the config, +this command can be used to assign IPs of the sort to +all nodes that are missing. + +If you remove IPv4 or IPv6 prefixes from the config, +it can be run to remove the IPs that should no longer +be assigned to nodes.`, + Run: func(cmd *cobra.Command, args []string) { + var err error + output, _ := cmd.Flags().GetString("output") + + confirm := false + prompt := &survey.Confirm{ + Message: "Are you sure that you want to assign/remove IPs to/from nodes?", + } + err = survey.AskOne(prompt, &confirm) + if err != nil { + return + } + if confirm { + ctx, client, conn, cancel := getHeadscaleCLIClient() + defer cancel() + defer conn.Close() + + changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm}) + if err != nil { + ErrorOutput( + err, + fmt.Sprintf( + "Error backfilling IPs: %s", + status.Convert(err).Message(), + ), + output, + ) + + return + } + + SuccessOutput(changes, "Node IPs backfilled successfully", output) + } + }, +} + func nodesToPtables( currentUser string, showTags bool, diff --git a/cmd/headscale/cli/root.go b/cmd/headscale/cli/root.go index 40a9b18a..b0d9500e 100644 --- a/cmd/headscale/cli/root.go +++ b/cmd/headscale/cli/root.go @@ -51,13 +51,11 @@ func initConfig() { cfg, err := types.GetHeadscaleConfig() if err != nil { - log.Fatal().Caller().Err(err).Msg("Failed to get headscale configuration") + log.Fatal().Err(err).Msg("Failed to read headscale configuration") } machineOutput := HasMachineOutputFlag() - zerolog.SetGlobalLevel(cfg.Log.Level) - // If the user has requested a "node" readable format, // then disable login so the output remains valid. if machineOutput { @@ -78,7 +76,7 @@ func initConfig() { res, err := latest.Check(githubTag, Version) if err == nil && res.Outdated { //nolint - fmt.Printf( + log.Warn().Msgf( "An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n", res.Current, Version, diff --git a/cmd/headscale/headscale.go b/cmd/headscale/headscale.go index 3f3322e2..fa17bf6d 100644 --- a/cmd/headscale/headscale.go +++ b/cmd/headscale/headscale.go @@ -4,7 +4,7 @@ import ( "os" "time" - "github.com/efekarakus/termcolor" + "github.com/jagottsicher/termcolor" "github.com/juanfont/headscale/cmd/headscale/cli" "github.com/rs/zerolog" "github.com/rs/zerolog/log" diff --git a/config-example.yaml b/config-example.yaml index 548868ce..668fa39a 100644 --- a/config-example.yaml +++ b/config-example.yaml @@ -66,6 +66,11 @@ prefixes: v6: fd7a:115c:a1e0::/48 v4: 100.64.0.0/10 + # Strategy used for allocation of IPs to nodes, available options: + # - sequential (default): assigns the next free IP from the previous given IP. + # - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand). + allocation: sequential + # DERP is a relay system that Tailscale uses when a direct # connection cannot be established. # https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp @@ -105,7 +110,7 @@ derp: automatically_add_embedded_derp_region: true # For better connection stability (especially when using an Exit-Node and DNS is not working), - # it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using: + # it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using: ipv4: 1.2.3.4 ipv6: 2001:db8::1 @@ -137,12 +142,6 @@ disable_check_updates: false # Time before an inactive ephemeral node is deleted? ephemeral_node_inactivity_timeout: 30m -# Period to check for node updates within the tailnet. A value too low will severely affect -# CPU consumption of Headscale. A value too high (over 60s) will cause problems -# for the nodes, as they won't get updates or keep alive messages frequently enough. -# In case of doubts, do not touch the default 10s. -node_update_check_interval: 10s - database: type: sqlite @@ -205,7 +204,7 @@ log: format: text level: info -# Path to a file containg ACL policies. +# Path to a file containing ACL policies. # ACLs can be defined as YAML or HUJSON. # https://tailscale.com/kb/1018/acls/ acl_policy_path: "" diff --git a/docs/exit-node.md b/docs/exit-node.md index 898b7811..831652b3 100644 --- a/docs/exit-node.md +++ b/docs/exit-node.md @@ -14,7 +14,7 @@ If the node is already registered, it can advertise exit capabilities like this: $ sudo tailscale set --advertise-exit-node ``` -To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP fowarding. +To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP forwarding. ## On the control server diff --git a/docs/faq.md b/docs/faq.md index fff96132..ba30911b 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -36,7 +36,7 @@ We don't know. We might be working on it. If you want to help, please send us a Please be aware that there are a number of reasons why we might not accept specific contributions: - It is not possible to implement the feature in a way that makes sense in a self-hosted environment. -- Given that we are reverse-engineering Tailscale to satify our own curiosity, we might be interested in implementing the feature ourselves. +- Given that we are reverse-engineering Tailscale to satisfy our own curiosity, we might be interested in implementing the feature ourselves. - You are not sending unit and integration tests with it. ## Do you support Y method of deploying Headscale? diff --git a/docs/images/headscale-sealos-grpc-url.png b/docs/images/headscale-sealos-grpc-url.png new file mode 100644 index 00000000..1b0df4f3 Binary files /dev/null and b/docs/images/headscale-sealos-grpc-url.png differ diff --git a/docs/images/headscale-sealos-url.png b/docs/images/headscale-sealos-url.png new file mode 100644 index 00000000..66233698 Binary files /dev/null and b/docs/images/headscale-sealos-url.png differ diff --git a/docs/proposals/001-acls.md b/docs/proposals/001-acls.md index 8a02e836..74bcd13e 100644 --- a/docs/proposals/001-acls.md +++ b/docs/proposals/001-acls.md @@ -58,12 +58,12 @@ A solution could be to consider a headscale server (in it's entirety) as a tailnet. For personal users the default behavior could either allow all communications -between all namespaces (like tailscale) or dissallow all communications between +between all namespaces (like tailscale) or disallow all communications between namespaces (current behavior). For businesses and organisations, viewing a headscale instance a single tailnet would allow users (namespace) to talk to each other with the ACLs. As described -in tailscale's documentation [[1]], a server should be tagged and personnal +in tailscale's documentation [[1]], a server should be tagged and personal devices should be tied to a user. Translated in headscale's terms each user can have multiple devices and all those devices should be in the same namespace. The servers should be tagged and used as such. @@ -88,7 +88,7 @@ the ability to rules in either format (HuJSON or YAML). Let's build an example use case for a small business (It may be the place where ACL's are the most useful). -We have a small company with a boss, an admin, two developper and an intern. +We have a small company with a boss, an admin, two developer and an intern. The boss should have access to all servers but not to the users hosts. Admin should also have access to all hosts except that their permissions should be @@ -173,7 +173,7 @@ need to add the following ACLs "ports": ["prod:*", "dev:*", "internal:*"] }, - // admin have access to adminstration port (lets only consider port 22 here) + // admin have access to administration port (lets only consider port 22 here) { "action": "accept", "users": ["group:admin"], diff --git a/docs/remote-cli.md b/docs/remote-cli.md index 96a6333a..3d44eabc 100644 --- a/docs/remote-cli.md +++ b/docs/remote-cli.md @@ -1,13 +1,13 @@ # Controlling `headscale` with remote CLI -## Prerequisit +## Prerequisite - A workstation to run `headscale` (could be Linux, macOS, other supported platforms) - A `headscale` server (version `0.13.0` or newer) - Access to create API keys (local access to the `headscale` server) - `headscale` _must_ be served over TLS/HTTPS - Remote access does _not_ support unencrypted traffic. -- Port `50443` must be open in the firewall (or port overriden by `grpc_listen_addr` option) +- Port `50443` must be open in the firewall (or port overridden by `grpc_listen_addr` option) ## Goal @@ -97,4 +97,4 @@ Checklist: - Make sure you use version `0.13.0` or newer. - Verify that your TLS certificate is valid and trusted - If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or - - Set `HEADSCALE_CLI_INSECURE` to 0 in your environement + - Set `HEADSCALE_CLI_INSECURE` to 0 in your environment diff --git a/docs/requirements.txt b/docs/requirements.txt index 32bd08c1..bcbf7c0e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,4 @@ cairosvg~=2.7.1 -mkdocs-material~=9.4.14 +mkdocs-material~=9.5.18 mkdocs-minify-plugin~=0.7.1 pillow~=10.1.0 - diff --git a/docs/reverse-proxy.md b/docs/reverse-proxy.md index 1f417c9b..c6fd4b16 100644 --- a/docs/reverse-proxy.md +++ b/docs/reverse-proxy.md @@ -115,7 +115,7 @@ The following Caddyfile is all that is necessary to use Caddy as a reverse proxy } ``` -Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certficate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary. +Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certificate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary. For a slightly more complex configuration which utilizes Docker containers to manage Caddy, Headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference. diff --git a/docs/running-headscale-linux.md b/docs/running-headscale-linux.md index 5f906009..f08789c4 100644 --- a/docs/running-headscale-linux.md +++ b/docs/running-headscale-linux.md @@ -20,17 +20,19 @@ configuration (`/etc/headscale/config.yaml`). ## Installation -1. Download the latest Headscale package for your platform (`.deb` for Ubuntu and Debian) from [Headscale's releases page](https://github.com/juanfont/headscale/releases): +1. Download the [latest Headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian). ```shell + HEADSCALE_VERSION="" # See above URL for latest version, e.g. "X.Y.Z" (NOTE: do not add the "v" prefix!) + HEADSCALE_ARCH="" # Your system architecture, e.g. "amd64" wget --output-document=headscale.deb \ - https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>.deb + "https://github.com/juanfont/headscale/releases/download/v${HEADSCALE_VERSION}/headscale_${HEADSCALE_VERSION}_linux_${HEADSCALE_ARCH}.deb" ``` 1. Install Headscale: ```shell - sudo apt install headscale.deb + sudo apt install ./headscale.deb ``` 1. Enable Headscale service, this will start Headscale at boot: diff --git a/docs/running-headscale-openbsd.md b/docs/running-headscale-openbsd.md index 29e340fc..e1d8d83f 100644 --- a/docs/running-headscale-openbsd.md +++ b/docs/running-headscale-openbsd.md @@ -9,19 +9,17 @@ ## Goal -This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD 7.1. +This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD. In additional to the "get up and running section", there is an optional [rc.d section](#running-headscale-in-the-background-with-rcd) describing how to make `headscale` run properly in a server environment. ## Install `headscale` -1. Install from ports (not recommended) +1. Install from ports - !!! info + You can install headscale from ports by running `pkg_add headscale`. - As of OpenBSD 7.2, there's a headscale in ports collection, however, it's severely outdated(v0.12.4). You can install it via `pkg_add headscale`. - -1. Install from source on OpenBSD 7.2 +1. Install from source ```shell # Install prerequistes @@ -32,7 +30,7 @@ describing how to make `headscale` run properly in a server environment. cd headscale # optionally checkout a release - # option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest + # option a. you can find official release at https://github.com/juanfont/headscale/releases/latest # option b. get latest tag, this may be a beta release latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) @@ -59,7 +57,7 @@ describing how to make `headscale` run properly in a server environment. cd headscale # optionally checkout a release - # option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest + # option a. you can find official release at https://github.com/juanfont/headscale/releases/latest # option b. get latest tag, this may be a beta release latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) diff --git a/docs/running-headscale-sealos.md b/docs/running-headscale-sealos.md new file mode 100644 index 00000000..01aecb0e --- /dev/null +++ b/docs/running-headscale-sealos.md @@ -0,0 +1,136 @@ +# Running headscale on Sealos + +!!! warning "Community documentation" + + This page is not actively maintained by the headscale authors and is + written by community members. It is _not_ verified by `headscale` developers. + + **It might be outdated and it might miss necessary steps**. + +## Goal + +This documentation has the goal of showing a user how-to run `headscale` on Sealos. + +## Running headscale server + +1. Click the following prebuilt template(version [0.23.0-alpha2](https://github.com/juanfont/headscale/releases/tag/v0.23.0-alpha2)): + + [![](https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dheadscale) + +2. Click "Deploy Application" on the template page to start deployment. Upon completion, two applications appear: Headscale, and its [visual interface](https://github.com/GoodiesHQ/headscale-admin). +3. Once deployment concludes, click 'Details' on the Headscale application page to navigate to the application's details. +4. Wait for the application's status to switch to running. For accessing the headscale server, the Public Address associated with port 8080 is the address of the headscale server. To access the Headscale console, simply append `/admin/` to the Headscale public URL. + + ![](./images/headscale-sealos-url.png) + +5. Click on 'Terminal' button on the right side of the details to access the Terminal of the headscale application. then create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)): + + ```bash + headscale users create myfirstuser + ``` + +### Register a machine (normal login) + +On a client machine, execute the `tailscale` login command: + +```bash +# replace <YOUR_HEADSCALE_URL> with the public domain provided by Sealos +tailscale up --login-server YOUR_HEADSCALE_URL +``` + +To register a machine when running headscale in [Sealos](https://sealos.io), click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then take the headscale command: + +```bash +headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY> +``` + +### Register machine using a pre authenticated key + +click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then generate a key using the command line: + +```bash +headscale --user myfirstuser preauthkeys create --reusable --expiration 24h +``` + +This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command: + +```bash +tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY> +``` + +## Controlling headscale with remote CLI + +This documentation has the goal of showing a user how-to set control a headscale instance from a remote machine with the headscale command line binary. + +### Create an API key + +We need to create an API key to authenticate our remote headscale when using it from our workstation. + +To create a API key, click on 'Terminal' button on the right side of the headscale application's detail page to access the Terminal of the headscale application, then generate a key: + +```bash +headscale apikeys create --expiration 90d +``` + +Copy the output of the command and save it for later. Please note that you can not retrieve a key again, if the key is lost, expire the old one, and create a new key. + +To list the keys currently assosicated with the server: + +```bash +headscale apikeys list +``` + +and to expire a key: + +```bash +headscale apikeys expire --prefix "<PREFIX>" +``` + +### Download and configure `headscale` client + +1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases): + +2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headscale` + +3. Make `headscale` executable: + +```shell +chmod +x /usr/local/bin/headscale +``` + +4. Configure the CLI through Environment Variables + +```shell +export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:443" +export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>" +``` + +In the headscale application's detail page, The Public Address corresponding to port 50443 corresponds to the value of <HEADSCALE ADDRESS>. + +![](./images/headscale-sealos-grpc-url.png) + +for example: + +```shell +export HEADSCALE_CLI_ADDRESS="pwnjnnly.cloud.sealos.io:443" +export HEADSCALE_CLI_API_KEY="abcde12345" +``` + +This will tell the `headscale` binary to connect to a remote instance, instead of looking +for a local instance. + +The API key is needed to make sure that your are allowed to access the server. The key is _not_ +needed when running directly on the server, as the connection is local. + +1. Test the connection + +Let us run the headscale command to verify that we can connect by listing our nodes: + +```shell +headscale nodes list +``` + +You should now be able to see a list of your nodes from your workstation, and you can +now control the `headscale` server from your workstation. + +> Reference: [Headscale Deployment and Usage Guide: Mastering Tailscale's Self-Hosting Basics](https://icloudnative.io/en/posts/how-to-set-up-or-migrate-headscale/) diff --git a/docs/web-ui.md b/docs/web-ui.md index d018666e..fae71be1 100644 --- a/docs/web-ui.md +++ b/docs/web-ui.md @@ -5,10 +5,11 @@ This page contains community contributions. The projects listed here are not maintained by the Headscale authors and are written by community members. -| Name | Repository Link | Description | Status | -| --------------- | ------------------------------------------------------- | ------------------------------------------------------------------------- | ------ | -| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha | -| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha | -| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha | +| Name | Repository Link | Description | Status | +| --------------- | ------------------------------------------------------- | --------------------------------------------------------------------------- | ------ | +| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha | +| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha | +| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha | +| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for Headscale | Beta | You can ask for support on our dedicated [Discord channel](https://discord.com/channels/896711691637780480/1105842846386356294). diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index f9e85ff3..00000000 --- a/examples/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Examples - -This directory contains examples on how to run `headscale` on different platforms. - -All examples are provided by the community and they are not verified by the `headscale` authors. diff --git a/examples/kustomize/.gitignore b/examples/kustomize/.gitignore deleted file mode 100644 index 229058d2..00000000 --- a/examples/kustomize/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/**/site -/**/secrets diff --git a/examples/kustomize/README.md b/examples/kustomize/README.md deleted file mode 100644 index cc57f147..00000000 --- a/examples/kustomize/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Deploying headscale on Kubernetes - -**Note:** This is contributed by the community and not verified by the headscale authors. - -This directory contains [Kustomize](https://kustomize.io) templates that deploy -headscale in various configurations. - -These templates currently support Rancher k3s. Other clusters may require -adaptation, especially around volume claims and ingress. - -Commands below assume this directory is your current working directory. - -# Generate secrets and site configuration - -Run `./init.bash` to generate keys, passwords, and site configuration files. - -Edit `base/site/public.env`, changing `public-hostname` to the public DNS name -that will be used for your headscale deployment. - -Set `public-proto` to "https" if you're planning to use TLS & Let's Encrypt. - -Configure DERP servers by editing `base/site/derp.yaml` if needed. - -# Add the image to the registry - -You'll somehow need to get `headscale:latest` into your cluster image registry. - -An easy way to do this with k3s: - -- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`) -- `docker build -t headscale:latest ..` from here - -# Create the namespace - -If it doesn't already exist, `kubectl create ns headscale`. - -# Deploy headscale - -## sqlite - -`kubectl -n headscale apply -k ./sqlite` - -## postgres - -`kubectl -n headscale apply -k ./postgres` - -# TLS & Let's Encrypt - -Test a staging certificate with your configured DNS name and Let's Encrypt. - -`kubectl -n headscale apply -k ./staging-tls` - -Replace with a production certificate. - -`kubectl -n headscale apply -k ./production-tls` - -## Static / custom TLS certificates - -Only Let's Encrypt is supported. If you need other TLS settings, modify or patch the ingress. - -# Administration - -Use the wrapper script to remotely operate headscale to perform administrative -tasks like creating namespaces, authkeys, etc. - -``` -[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash - -headscale is an open source implementation of the Tailscale control server - -https://github.com/juanfont/headscale - -Usage: - headscale [command] - -Available Commands: - help Help about any command - namespace Manage the namespaces of headscale - node Manage the nodes of headscale - preauthkey Handle the preauthkeys in headscale - routes Manage the routes of headscale - serve Launches the headscale server - version Print the version. - -Flags: - -h, --help help for headscale - -o, --output string Output format. Empty for human-readable, 'json' or 'json-line' - -Use "headscale [command] --help" for more information about a command. - -``` - -# TODO / Ideas - -- Interpolate `email:` option to the ClusterIssuer from site configuration. - This probably needs to be done with a transformer, kustomize vars don't seem to work. -- Add kustomize examples for cloud-native ingress, load balancer -- CockroachDB for the backend -- DERP server deployment -- Tor hidden service diff --git a/examples/kustomize/base/configmap.yaml b/examples/kustomize/base/configmap.yaml deleted file mode 100644 index 0ac2d563..00000000 --- a/examples/kustomize/base/configmap.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: headscale-config -data: - server_url: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME) - listen_addr: "0.0.0.0:8080" - metrics_listen_addr: "127.0.0.1:9090" - ephemeral_node_inactivity_timeout: "30m" diff --git a/examples/kustomize/base/ingress.yaml b/examples/kustomize/base/ingress.yaml deleted file mode 100644 index 51da3427..00000000 --- a/examples/kustomize/base/ingress.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: headscale - annotations: - kubernetes.io/ingress.class: traefik -spec: - rules: - - host: $(PUBLIC_HOSTNAME) - http: - paths: - - backend: - service: - name: headscale - port: - number: 8080 - path: / - pathType: Prefix diff --git a/examples/kustomize/base/kustomization.yaml b/examples/kustomize/base/kustomization.yaml deleted file mode 100644 index 93278f7d..00000000 --- a/examples/kustomize/base/kustomization.yaml +++ /dev/null @@ -1,42 +0,0 @@ -namespace: headscale -resources: - - configmap.yaml - - ingress.yaml - - service.yaml -generatorOptions: - disableNameSuffixHash: true -configMapGenerator: - - name: headscale-site - files: - - derp.yaml=site/derp.yaml - envs: - - site/public.env - - name: headscale-etc - literals: - - config.json={} -secretGenerator: - - name: headscale - files: - - secrets/private-key -vars: - - name: PUBLIC_PROTO - objRef: - kind: ConfigMap - name: headscale-site - apiVersion: v1 - fieldRef: - fieldPath: data.public-proto - - name: PUBLIC_HOSTNAME - objRef: - kind: ConfigMap - name: headscale-site - apiVersion: v1 - fieldRef: - fieldPath: data.public-hostname - - name: CONTACT_EMAIL - objRef: - kind: ConfigMap - name: headscale-site - apiVersion: v1 - fieldRef: - fieldPath: data.contact-email diff --git a/examples/kustomize/base/service.yaml b/examples/kustomize/base/service.yaml deleted file mode 100644 index 39e67253..00000000 --- a/examples/kustomize/base/service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: headscale - labels: - app: headscale -spec: - selector: - app: headscale - ports: - - name: http - targetPort: http - port: 8080 diff --git a/examples/kustomize/headscale.bash b/examples/kustomize/headscale.bash deleted file mode 100755 index 66bfe92c..00000000 --- a/examples/kustomize/headscale.bash +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -set -eu -exec kubectl -n headscale exec -ti pod/headscale-0 -- /go/bin/headscale "$@" diff --git a/examples/kustomize/init.bash b/examples/kustomize/init.bash deleted file mode 100755 index e5b7965c..00000000 --- a/examples/kustomize/init.bash +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -eux -cd $(dirname $0) - -umask 022 -mkdir -p base/site/ -[ ! -e base/site/public.env ] && ( - cat >base/site/public.env <<EOF -public-hostname=localhost -public-proto=http -contact-email=headscale@example.com -EOF -) -[ ! -e base/site/derp.yaml ] && cp ../derp.yaml base/site/derp.yaml - -umask 077 -mkdir -p base/secrets/ -[ ! -e base/secrets/private-key ] && ( - wg genkey > base/secrets/private-key -) -mkdir -p postgres/secrets/ -[ ! -e postgres/secrets/password ] && (head -c 32 /dev/urandom | base64 -w0 > postgres/secrets/password) diff --git a/examples/kustomize/install-cert-manager.bash b/examples/kustomize/install-cert-manager.bash deleted file mode 100755 index 1a5ecacb..00000000 --- a/examples/kustomize/install-cert-manager.bash +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash -set -eux -kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.4.0/cert-manager.yaml diff --git a/examples/kustomize/postgres/deployment.yaml b/examples/kustomize/postgres/deployment.yaml deleted file mode 100644 index 1dd88b41..00000000 --- a/examples/kustomize/postgres/deployment.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: headscale -spec: - replicas: 2 - selector: - matchLabels: - app: headscale - template: - metadata: - labels: - app: headscale - spec: - containers: - - name: headscale - image: "headscale:latest" - imagePullPolicy: IfNotPresent - command: ["/go/bin/headscale", "serve"] - env: - - name: SERVER_URL - value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME) - - name: LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: listen_addr - - name: METRICS_LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: metrics_listen_addr - - name: DERP_MAP_PATH - value: /vol/config/derp.yaml - - name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT - valueFrom: - configMapKeyRef: - name: headscale-config - key: ephemeral_node_inactivity_timeout - - name: DB_TYPE - value: postgres - - name: DB_HOST - value: postgres.headscale.svc.cluster.local - - name: DB_PORT - value: "5432" - - name: DB_USER - value: headscale - - name: DB_PASS - valueFrom: - secretKeyRef: - name: postgresql - key: password - - name: DB_NAME - value: headscale - ports: - - name: http - protocol: TCP - containerPort: 8080 - livenessProbe: - tcpSocket: - port: http - initialDelaySeconds: 30 - timeoutSeconds: 5 - periodSeconds: 15 - volumeMounts: - - name: config - mountPath: /vol/config - - name: secret - mountPath: /vol/secret - - name: etc - mountPath: /etc/headscale - volumes: - - name: config - configMap: - name: headscale-site - - name: etc - configMap: - name: headscale-etc - - name: secret - secret: - secretName: headscale diff --git a/examples/kustomize/postgres/kustomization.yaml b/examples/kustomize/postgres/kustomization.yaml deleted file mode 100644 index e732e3b9..00000000 --- a/examples/kustomize/postgres/kustomization.yaml +++ /dev/null @@ -1,13 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - deployment.yaml - - postgres-service.yaml - - postgres-statefulset.yaml -generatorOptions: - disableNameSuffixHash: true -secretGenerator: - - name: postgresql - files: - - secrets/password diff --git a/examples/kustomize/postgres/postgres-service.yaml b/examples/kustomize/postgres/postgres-service.yaml deleted file mode 100644 index 6252e7f9..00000000 --- a/examples/kustomize/postgres/postgres-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: postgres -spec: - selector: - app: postgres - ports: - - name: postgres - targetPort: postgres - port: 5432 diff --git a/examples/kustomize/postgres/postgres-statefulset.yaml b/examples/kustomize/postgres/postgres-statefulset.yaml deleted file mode 100644 index b81c9bf0..00000000 --- a/examples/kustomize/postgres/postgres-statefulset.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: postgres -spec: - serviceName: postgres - replicas: 1 - selector: - matchLabels: - app: postgres - template: - metadata: - labels: - app: postgres - spec: - containers: - - name: postgres - image: "postgres:13" - imagePullPolicy: IfNotPresent - env: - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: postgresql - key: password - - name: POSTGRES_USER - value: headscale - ports: - - name: postgres - protocol: TCP - containerPort: 5432 - livenessProbe: - tcpSocket: - port: 5432 - initialDelaySeconds: 30 - timeoutSeconds: 5 - periodSeconds: 15 - volumeMounts: - - name: pgdata - mountPath: /var/lib/postgresql/data - volumeClaimTemplates: - - metadata: - name: pgdata - spec: - storageClassName: local-path - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi diff --git a/examples/kustomize/production-tls/ingress-patch.yaml b/examples/kustomize/production-tls/ingress-patch.yaml deleted file mode 100644 index 9e6177fb..00000000 --- a/examples/kustomize/production-tls/ingress-patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Ingress -metadata: - name: headscale - annotations: - cert-manager.io/cluster-issuer: letsencrypt-production - traefik.ingress.kubernetes.io/router.tls: "true" -spec: - tls: - - hosts: - - $(PUBLIC_HOSTNAME) - secretName: production-cert diff --git a/examples/kustomize/production-tls/kustomization.yaml b/examples/kustomize/production-tls/kustomization.yaml deleted file mode 100644 index d3147f5f..00000000 --- a/examples/kustomize/production-tls/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - production-issuer.yaml -patches: - - path: ingress-patch.yaml - target: - kind: Ingress diff --git a/examples/kustomize/production-tls/production-issuer.yaml b/examples/kustomize/production-tls/production-issuer.yaml deleted file mode 100644 index f436090b..00000000 --- a/examples/kustomize/production-tls/production-issuer.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-production -spec: - acme: - # TODO: figure out how to get kustomize to interpolate this, or use a transformer - #email: $(CONTACT_EMAIL) - server: https://acme-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource used to store the account's private key. - name: letsencrypt-production-acc-key - solvers: - - http01: - ingress: - class: traefik diff --git a/examples/kustomize/sqlite/kustomization.yaml b/examples/kustomize/sqlite/kustomization.yaml deleted file mode 100644 index ca799419..00000000 --- a/examples/kustomize/sqlite/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - statefulset.yaml diff --git a/examples/kustomize/sqlite/statefulset.yaml b/examples/kustomize/sqlite/statefulset.yaml deleted file mode 100644 index 2321d39d..00000000 --- a/examples/kustomize/sqlite/statefulset.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: headscale -spec: - serviceName: headscale - replicas: 1 - selector: - matchLabels: - app: headscale - template: - metadata: - labels: - app: headscale - spec: - containers: - - name: headscale - image: "headscale:latest" - imagePullPolicy: IfNotPresent - command: ["/go/bin/headscale", "serve"] - env: - - name: SERVER_URL - value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME) - - name: LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: listen_addr - - name: METRICS_LISTEN_ADDR - valueFrom: - configMapKeyRef: - name: headscale-config - key: metrics_listen_addr - - name: DERP_MAP_PATH - value: /vol/config/derp.yaml - - name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT - valueFrom: - configMapKeyRef: - name: headscale-config - key: ephemeral_node_inactivity_timeout - - name: DB_TYPE - value: sqlite3 - - name: DB_PATH - value: /vol/data/db.sqlite - ports: - - name: http - protocol: TCP - containerPort: 8080 - livenessProbe: - tcpSocket: - port: http - initialDelaySeconds: 30 - timeoutSeconds: 5 - periodSeconds: 15 - volumeMounts: - - name: config - mountPath: /vol/config - - name: data - mountPath: /vol/data - - name: secret - mountPath: /vol/secret - - name: etc - mountPath: /etc/headscale - volumes: - - name: config - configMap: - name: headscale-site - - name: etc - configMap: - name: headscale-etc - - name: secret - secret: - secretName: headscale - volumeClaimTemplates: - - metadata: - name: data - spec: - storageClassName: local-path - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi diff --git a/examples/kustomize/staging-tls/ingress-patch.yaml b/examples/kustomize/staging-tls/ingress-patch.yaml deleted file mode 100644 index 5a1daf0c..00000000 --- a/examples/kustomize/staging-tls/ingress-patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: Ingress -metadata: - name: headscale - annotations: - cert-manager.io/cluster-issuer: letsencrypt-staging - traefik.ingress.kubernetes.io/router.tls: "true" -spec: - tls: - - hosts: - - $(PUBLIC_HOSTNAME) - secretName: staging-cert diff --git a/examples/kustomize/staging-tls/kustomization.yaml b/examples/kustomize/staging-tls/kustomization.yaml deleted file mode 100644 index 0900c583..00000000 --- a/examples/kustomize/staging-tls/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -namespace: headscale -bases: - - ../base -resources: - - staging-issuer.yaml -patches: - - path: ingress-patch.yaml - target: - kind: Ingress diff --git a/examples/kustomize/staging-tls/staging-issuer.yaml b/examples/kustomize/staging-tls/staging-issuer.yaml deleted file mode 100644 index cf290415..00000000 --- a/examples/kustomize/staging-tls/staging-issuer.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-staging -spec: - acme: - # TODO: figure out how to get kustomize to interpolate this, or use a transformer - #email: $(CONTACT_EMAIL) - server: https://acme-staging-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource used to store the account's private key. - name: letsencrypt-staging-acc-key - solvers: - - http01: - ingress: - class: traefik diff --git a/flake.lock b/flake.lock index 89d88b38..351c657c 100644 --- a/flake.lock +++ b/flake.lock @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1710534455, - "narHash": "sha256-huQT4Xs0y4EeFKn2BTBVYgEwJSv8SDlm82uWgMnCMmI=", + "lastModified": 1716062047, + "narHash": "sha256-OhysviwHQz4p2HZL4g7XGMLoUbWMjkMr/ogaR3VUYNA=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9af9c1c87ed3e3ed271934cb896e0cdd33dae212", + "rev": "02923630b89aa1ab36ef8e422501a6f4fd4b2016", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 79b75a9a..5d4978ca 100644 --- a/flake.nix +++ b/flake.nix @@ -31,7 +31,7 @@ # When updating go.mod or go.sum, a new sha will need to be calculated, # update this if you have a mismatch after doing a change to thos files. - vendorHash = "sha256-z3IXmr8SK8oUJTnw7gTok6zpLf15kE89q6zYKbMA5AI="; + vendorHash = "sha256-EorT2AVwA3usly/LcNor6r5UIhLCdj3L4O4ilgTIC2o="; subPackages = ["cmd/headscale"]; diff --git a/gen/go/headscale/v1/apikey.pb.go b/gen/go/headscale/v1/apikey.pb.go index d1a5f555..c4377e48 100644 --- a/gen/go/headscale/v1/apikey.pb.go +++ b/gen/go/headscale/v1/apikey.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/apikey.proto diff --git a/gen/go/headscale/v1/device.pb.go b/gen/go/headscale/v1/device.pb.go index 40e2e24f..7a382dd6 100644 --- a/gen/go/headscale/v1/device.pb.go +++ b/gen/go/headscale/v1/device.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/device.proto diff --git a/gen/go/headscale/v1/headscale.pb.go b/gen/go/headscale/v1/headscale.pb.go index b1af2fa5..9de6b060 100644 --- a/gen/go/headscale/v1/headscale.pb.go +++ b/gen/go/headscale/v1/headscale.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/headscale.proto @@ -36,7 +36,7 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{ 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x32, 0xfd, 0x17, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, + 0x6f, 0x32, 0x80, 0x19, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -161,77 +161,85 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{ 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, - 0x73, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, - 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, - 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, - 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, - 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, - 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, + 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, + 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, + 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, + 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x66, + 0x69, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, + 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, - 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, - 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65, - 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, - 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, - 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, - 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, - 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a, + 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22, + 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, + 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75, + 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, + 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, + 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, - 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, - 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, + 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, - 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, - 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, - 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, - 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, + 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x7d, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, - 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x7d, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_headscale_v1_headscale_proto_goTypes = []interface{}{ @@ -252,41 +260,43 @@ var file_headscale_v1_headscale_proto_goTypes = []interface{}{ (*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest (*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest (*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest - (*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest - (*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest - (*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest - (*GetNodeRoutesRequest)(nil), // 20: headscale.v1.GetNodeRoutesRequest - (*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest - (*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest - (*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest - (*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest - (*DeleteApiKeyRequest)(nil), // 25: headscale.v1.DeleteApiKeyRequest - (*GetUserResponse)(nil), // 26: headscale.v1.GetUserResponse - (*CreateUserResponse)(nil), // 27: headscale.v1.CreateUserResponse - (*RenameUserResponse)(nil), // 28: headscale.v1.RenameUserResponse - (*DeleteUserResponse)(nil), // 29: headscale.v1.DeleteUserResponse - (*ListUsersResponse)(nil), // 30: headscale.v1.ListUsersResponse - (*CreatePreAuthKeyResponse)(nil), // 31: headscale.v1.CreatePreAuthKeyResponse - (*ExpirePreAuthKeyResponse)(nil), // 32: headscale.v1.ExpirePreAuthKeyResponse - (*ListPreAuthKeysResponse)(nil), // 33: headscale.v1.ListPreAuthKeysResponse - (*DebugCreateNodeResponse)(nil), // 34: headscale.v1.DebugCreateNodeResponse - (*GetNodeResponse)(nil), // 35: headscale.v1.GetNodeResponse - (*SetTagsResponse)(nil), // 36: headscale.v1.SetTagsResponse - (*RegisterNodeResponse)(nil), // 37: headscale.v1.RegisterNodeResponse - (*DeleteNodeResponse)(nil), // 38: headscale.v1.DeleteNodeResponse - (*ExpireNodeResponse)(nil), // 39: headscale.v1.ExpireNodeResponse - (*RenameNodeResponse)(nil), // 40: headscale.v1.RenameNodeResponse - (*ListNodesResponse)(nil), // 41: headscale.v1.ListNodesResponse - (*MoveNodeResponse)(nil), // 42: headscale.v1.MoveNodeResponse - (*GetRoutesResponse)(nil), // 43: headscale.v1.GetRoutesResponse - (*EnableRouteResponse)(nil), // 44: headscale.v1.EnableRouteResponse - (*DisableRouteResponse)(nil), // 45: headscale.v1.DisableRouteResponse - (*GetNodeRoutesResponse)(nil), // 46: headscale.v1.GetNodeRoutesResponse - (*DeleteRouteResponse)(nil), // 47: headscale.v1.DeleteRouteResponse - (*CreateApiKeyResponse)(nil), // 48: headscale.v1.CreateApiKeyResponse - (*ExpireApiKeyResponse)(nil), // 49: headscale.v1.ExpireApiKeyResponse - (*ListApiKeysResponse)(nil), // 50: headscale.v1.ListApiKeysResponse - (*DeleteApiKeyResponse)(nil), // 51: headscale.v1.DeleteApiKeyResponse + (*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest + (*GetRoutesRequest)(nil), // 18: headscale.v1.GetRoutesRequest + (*EnableRouteRequest)(nil), // 19: headscale.v1.EnableRouteRequest + (*DisableRouteRequest)(nil), // 20: headscale.v1.DisableRouteRequest + (*GetNodeRoutesRequest)(nil), // 21: headscale.v1.GetNodeRoutesRequest + (*DeleteRouteRequest)(nil), // 22: headscale.v1.DeleteRouteRequest + (*CreateApiKeyRequest)(nil), // 23: headscale.v1.CreateApiKeyRequest + (*ExpireApiKeyRequest)(nil), // 24: headscale.v1.ExpireApiKeyRequest + (*ListApiKeysRequest)(nil), // 25: headscale.v1.ListApiKeysRequest + (*DeleteApiKeyRequest)(nil), // 26: headscale.v1.DeleteApiKeyRequest + (*GetUserResponse)(nil), // 27: headscale.v1.GetUserResponse + (*CreateUserResponse)(nil), // 28: headscale.v1.CreateUserResponse + (*RenameUserResponse)(nil), // 29: headscale.v1.RenameUserResponse + (*DeleteUserResponse)(nil), // 30: headscale.v1.DeleteUserResponse + (*ListUsersResponse)(nil), // 31: headscale.v1.ListUsersResponse + (*CreatePreAuthKeyResponse)(nil), // 32: headscale.v1.CreatePreAuthKeyResponse + (*ExpirePreAuthKeyResponse)(nil), // 33: headscale.v1.ExpirePreAuthKeyResponse + (*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse + (*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse + (*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse + (*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse + (*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse + (*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse + (*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse + (*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse + (*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse + (*MoveNodeResponse)(nil), // 43: headscale.v1.MoveNodeResponse + (*BackfillNodeIPsResponse)(nil), // 44: headscale.v1.BackfillNodeIPsResponse + (*GetRoutesResponse)(nil), // 45: headscale.v1.GetRoutesResponse + (*EnableRouteResponse)(nil), // 46: headscale.v1.EnableRouteResponse + (*DisableRouteResponse)(nil), // 47: headscale.v1.DisableRouteResponse + (*GetNodeRoutesResponse)(nil), // 48: headscale.v1.GetNodeRoutesResponse + (*DeleteRouteResponse)(nil), // 49: headscale.v1.DeleteRouteResponse + (*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse + (*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse + (*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse + (*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse } var file_headscale_v1_headscale_proto_depIdxs = []int32{ 0, // 0: headscale.v1.HeadscaleService.GetUser:input_type -> headscale.v1.GetUserRequest @@ -306,43 +316,45 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{ 14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest 15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest 16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest - 17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest - 18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest - 19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest - 20, // 20: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest - 21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest - 22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest - 23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest - 24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest - 25, // 25: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest - 26, // 26: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse - 27, // 27: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse - 28, // 28: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse - 29, // 29: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse - 30, // 30: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse - 31, // 31: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse - 32, // 32: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse - 33, // 33: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse - 34, // 34: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse - 35, // 35: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse - 36, // 36: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse - 37, // 37: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse - 38, // 38: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse - 39, // 39: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse - 40, // 40: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse - 41, // 41: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse - 42, // 42: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse - 43, // 43: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse - 44, // 44: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse - 45, // 45: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse - 46, // 46: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse - 47, // 47: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse - 48, // 48: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse - 49, // 49: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse - 50, // 50: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse - 51, // 51: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse - 26, // [26:52] is the sub-list for method output_type - 0, // [0:26] is the sub-list for method input_type + 17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest + 18, // 18: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest + 19, // 19: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest + 20, // 20: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest + 21, // 21: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest + 22, // 22: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest + 23, // 23: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest + 24, // 24: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest + 25, // 25: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest + 26, // 26: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest + 27, // 27: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse + 28, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse + 29, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse + 30, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse + 31, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse + 32, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse + 33, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse + 34, // 34: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse + 35, // 35: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse + 36, // 36: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse + 37, // 37: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse + 38, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse + 39, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse + 40, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse + 41, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse + 42, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse + 43, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse + 44, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse + 45, // 45: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse + 46, // 46: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse + 47, // 47: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse + 48, // 48: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse + 49, // 49: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse + 50, // 50: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse + 51, // 51: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse + 52, // 52: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse + 53, // 53: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse + 27, // [27:54] is the sub-list for method output_type + 0, // [0:27] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/gen/go/headscale/v1/headscale.pb.gw.go b/gen/go/headscale/v1/headscale.pb.gw.go index b46f383b..adc7beeb 100644 --- a/gen/go/headscale/v1/headscale.pb.gw.go +++ b/gen/go/headscale/v1/headscale.pb.gw.go @@ -795,6 +795,42 @@ func local_request_HeadscaleService_MoveNode_0(ctx context.Context, marshaler ru } +var ( + filter_HeadscaleService_BackfillNodeIPs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackfillNodeIPsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BackfillNodeIPs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BackfillNodeIPsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_BackfillNodeIPs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BackfillNodeIPs(ctx, &protoReq) + return msg, metadata, err + +} + func request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetRoutesRequest var metadata runtime.ServerMetadata @@ -1574,6 +1610,31 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser }) + mux.Handle("POST", pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/BackfillNodeIPs", runtime.WithHTTPPathPattern("/api/v1/node/backfillips")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2214,6 +2275,28 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser }) + mux.Handle("POST", pattern_HeadscaleService_BackfillNodeIPs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/BackfillNodeIPs", runtime.WithHTTPPathPattern("/api/v1/node/backfillips")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_HeadscaleService_BackfillNodeIPs_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2450,6 +2533,8 @@ var ( pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, "")) + pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, "")) + pattern_HeadscaleService_GetRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "routes"}, "")) pattern_HeadscaleService_EnableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "enable"}, "")) @@ -2504,6 +2589,8 @@ var ( forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage + forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage + forward_HeadscaleService_GetRoutes_0 = runtime.ForwardResponseMessage forward_HeadscaleService_EnableRoute_0 = runtime.ForwardResponseMessage diff --git a/gen/go/headscale/v1/headscale_grpc.pb.go b/gen/go/headscale/v1/headscale_grpc.pb.go index 0d731adc..6557f880 100644 --- a/gen/go/headscale/v1/headscale_grpc.pb.go +++ b/gen/go/headscale/v1/headscale_grpc.pb.go @@ -36,6 +36,7 @@ const ( HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode" HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes" HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode" + HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs" HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes" HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute" HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute" @@ -71,6 +72,7 @@ type HeadscaleServiceClient interface { RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) + BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) // --- Route start --- GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) @@ -245,6 +247,15 @@ func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeReque return out, nil } +func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) { + out := new(BackfillNodeIPsResponse) + err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) { out := new(GetRoutesResponse) err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...) @@ -350,6 +361,7 @@ type HeadscaleServiceServer interface { RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) + BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) // --- Route start --- GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error) @@ -419,6 +431,9 @@ func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodes func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method MoveNode not implemented") } +func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented") +} func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented") } @@ -765,6 +780,24 @@ func _HeadscaleService_MoveNode_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackfillNodeIPsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeadscaleService_BackfillNodeIPs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, req.(*BackfillNodeIPsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetRoutesRequest) if err := dec(in); err != nil { @@ -1002,6 +1035,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{ MethodName: "MoveNode", Handler: _HeadscaleService_MoveNode_Handler, }, + { + MethodName: "BackfillNodeIPs", + Handler: _HeadscaleService_BackfillNodeIPs_Handler, + }, { MethodName: "GetRoutes", Handler: _HeadscaleService_GetRoutes_Handler, diff --git a/gen/go/headscale/v1/node.pb.go b/gen/go/headscale/v1/node.pb.go index ee031566..93d2c6b0 100644 --- a/gen/go/headscale/v1/node.pb.go +++ b/gen/go/headscale/v1/node.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/node.proto @@ -1141,6 +1141,100 @@ func (x *DebugCreateNodeResponse) GetNode() *Node { return nil } +type BackfillNodeIPsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Confirmed bool `protobuf:"varint,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"` +} + +func (x *BackfillNodeIPsRequest) Reset() { + *x = BackfillNodeIPsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_node_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackfillNodeIPsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillNodeIPsRequest) ProtoMessage() {} + +func (x *BackfillNodeIPsRequest) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_node_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillNodeIPsRequest.ProtoReflect.Descriptor instead. +func (*BackfillNodeIPsRequest) Descriptor() ([]byte, []int) { + return file_headscale_v1_node_proto_rawDescGZIP(), []int{19} +} + +func (x *BackfillNodeIPsRequest) GetConfirmed() bool { + if x != nil { + return x.Confirmed + } + return false +} + +type BackfillNodeIPsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Changes []string `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` +} + +func (x *BackfillNodeIPsResponse) Reset() { + *x = BackfillNodeIPsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_headscale_v1_node_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackfillNodeIPsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillNodeIPsResponse) ProtoMessage() {} + +func (x *BackfillNodeIPsResponse) ProtoReflect() protoreflect.Message { + mi := &file_headscale_v1_node_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillNodeIPsResponse.ProtoReflect.Descriptor instead. +func (*BackfillNodeIPsResponse) Descriptor() ([]byte, []int) { + return file_headscale_v1_node_proto_rawDescGZIP(), []int{20} +} + +func (x *BackfillNodeIPsResponse) GetChanges() []string { + if x != nil { + return x.Changes + } + return nil +} + var File_headscale_v1_node_proto protoreflect.FileDescriptor var file_headscale_v1_node_proto_rawDesc = []byte{ @@ -1260,18 +1354,25 @@ var file_headscale_v1_node_proto_rawDesc = []byte{ 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, - 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, - 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, - 0x41, 0x55, 0x54, 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, - 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, - 0x49, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, - 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, - 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, - 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, - 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x36, 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b, + 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, + 0x22, 0x33, 0x0a, 0x17, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, + 0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x2a, 0x82, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x47, 0x49, + 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x47, + 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x41, 0x55, 0x54, + 0x48, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x47, 0x49, 0x53, + 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x10, 0x02, + 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x54, + 0x48, 0x4f, 0x44, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x03, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, + 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1287,7 +1388,7 @@ func file_headscale_v1_node_proto_rawDescGZIP() []byte { } var file_headscale_v1_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_headscale_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_headscale_v1_node_proto_goTypes = []interface{}{ (RegisterMethod)(0), // 0: headscale.v1.RegisterMethod (*Node)(nil), // 1: headscale.v1.Node @@ -1309,16 +1410,18 @@ var file_headscale_v1_node_proto_goTypes = []interface{}{ (*MoveNodeResponse)(nil), // 17: headscale.v1.MoveNodeResponse (*DebugCreateNodeRequest)(nil), // 18: headscale.v1.DebugCreateNodeRequest (*DebugCreateNodeResponse)(nil), // 19: headscale.v1.DebugCreateNodeResponse - (*User)(nil), // 20: headscale.v1.User - (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp - (*PreAuthKey)(nil), // 22: headscale.v1.PreAuthKey + (*BackfillNodeIPsRequest)(nil), // 20: headscale.v1.BackfillNodeIPsRequest + (*BackfillNodeIPsResponse)(nil), // 21: headscale.v1.BackfillNodeIPsResponse + (*User)(nil), // 22: headscale.v1.User + (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp + (*PreAuthKey)(nil), // 24: headscale.v1.PreAuthKey } var file_headscale_v1_node_proto_depIdxs = []int32{ - 20, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User - 21, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp - 21, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp - 22, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey - 21, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp + 22, // 0: headscale.v1.Node.user:type_name -> headscale.v1.User + 23, // 1: headscale.v1.Node.last_seen:type_name -> google.protobuf.Timestamp + 23, // 2: headscale.v1.Node.expiry:type_name -> google.protobuf.Timestamp + 24, // 3: headscale.v1.Node.pre_auth_key:type_name -> headscale.v1.PreAuthKey + 23, // 4: headscale.v1.Node.created_at:type_name -> google.protobuf.Timestamp 0, // 5: headscale.v1.Node.register_method:type_name -> headscale.v1.RegisterMethod 1, // 6: headscale.v1.RegisterNodeResponse.node:type_name -> headscale.v1.Node 1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node @@ -1571,6 +1674,30 @@ func file_headscale_v1_node_proto_init() { return nil } } + file_headscale_v1_node_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackfillNodeIPsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_headscale_v1_node_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackfillNodeIPsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1578,7 +1705,7 @@ func file_headscale_v1_node_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_headscale_v1_node_proto_rawDesc, NumEnums: 1, - NumMessages: 19, + NumMessages: 21, NumExtensions: 0, NumServices: 0, }, diff --git a/gen/go/headscale/v1/preauthkey.pb.go b/gen/go/headscale/v1/preauthkey.pb.go index 35a0dfe0..c3ae2818 100644 --- a/gen/go/headscale/v1/preauthkey.pb.go +++ b/gen/go/headscale/v1/preauthkey.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/preauthkey.proto diff --git a/gen/go/headscale/v1/routes.pb.go b/gen/go/headscale/v1/routes.pb.go index d2273047..9c7475b4 100644 --- a/gen/go/headscale/v1/routes.pb.go +++ b/gen/go/headscale/v1/routes.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/routes.proto diff --git a/gen/go/headscale/v1/user.pb.go b/gen/go/headscale/v1/user.pb.go index 17cb4b54..3fcd12bf 100644 --- a/gen/go/headscale/v1/user.pb.go +++ b/gen/go/headscale/v1/user.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: headscale/v1/user.proto diff --git a/gen/openapiv2/headscale/v1/headscale.swagger.json b/gen/openapiv2/headscale/v1/headscale.swagger.json index 7fe0b696..51b4ad22 100644 --- a/gen/openapiv2/headscale/v1/headscale.swagger.json +++ b/gen/openapiv2/headscale/v1/headscale.swagger.json @@ -194,6 +194,36 @@ ] } }, + "/api/v1/node/backfillips": { + "post": { + "operationId": "HeadscaleService_BackfillNodeIPs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1BackfillNodeIPsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "confirmed", + "in": "query", + "required": false, + "type": "boolean" + } + ], + "tags": [ + "HeadscaleService" + ] + } + }, "/api/v1/node/register": { "post": { "operationId": "HeadscaleService_RegisterNode", @@ -886,6 +916,17 @@ } } }, + "v1BackfillNodeIPsResponse": { + "type": "object", + "properties": { + "changes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "v1CreateApiKeyRequest": { "type": "object", "properties": { diff --git a/go.mod b/go.mod index 20bd86bd..e96bcc8a 100644 --- a/go.mod +++ b/go.mod @@ -1,54 +1,55 @@ module github.com/juanfont/headscale -go 1.22 +go 1.22.0 -toolchain go1.22.0 +toolchain go1.22.2 require ( github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/coreos/go-oidc/v3 v3.9.0 + github.com/coreos/go-oidc/v3 v3.10.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/deckarep/golang-set/v2 v2.6.0 - github.com/efekarakus/termcolor v1.0.1 - github.com/glebarez/sqlite v1.10.0 - github.com/go-gormigrate/gormigrate/v2 v2.1.1 - github.com/gofrs/uuid/v5 v5.0.0 + github.com/glebarez/sqlite v1.11.0 + github.com/go-gormigrate/gormigrate/v2 v2.1.2 + github.com/gofrs/uuid/v5 v5.2.0 github.com/google/go-cmp v0.6.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 - github.com/klauspost/compress v1.17.6 - github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 + github.com/jagottsicher/termcolor v1.0.2 + github.com/klauspost/compress v1.17.8 + github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 github.com/ory/dockertest/v3 v3.10.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/philip-bui/grpc-zerolog v1.0.1 github.com/pkg/profile v1.7.0 github.com/prometheus/client_golang v1.18.0 github.com/prometheus/common v0.46.0 - github.com/pterm/pterm v0.12.78 - github.com/puzpuzpuz/xsync/v3 v3.0.2 + github.com/pterm/pterm v0.12.79 + github.com/puzpuzpuz/xsync/v3 v3.1.0 github.com/rs/zerolog v1.32.0 github.com/samber/lo v1.39.0 + github.com/sasha-s/go-deadlock v0.3.1 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b + github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1 github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.21.0 - golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 - golang.org/x/net v0.22.0 - golang.org/x/oauth2 v0.17.0 - golang.org/x/sync v0.6.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 - google.golang.org/grpc v1.61.0 - google.golang.org/protobuf v1.32.0 + golang.org/x/crypto v0.23.0 + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 + golang.org/x/net v0.25.0 + golang.org/x/oauth2 v0.20.0 + golang.org/x/sync v0.7.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.1 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v3 v3.0.1 - gorm.io/driver/postgres v1.5.4 - gorm.io/gorm v1.25.5 - tailscale.com v1.58.2 + gorm.io/driver/postgres v1.5.7 + gorm.io/gorm v1.25.10 + tailscale.com v1.66.3 ) require ( @@ -58,7 +59,7 @@ require ( dario.cat/mergo v1.0.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/akutz/memconn v0.1.0 // indirect github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect @@ -77,35 +78,39 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/console v1.0.4 // indirect github.com/containerd/continuity v0.4.3 // indirect - github.com/coreos/go-iptables v0.7.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect + github.com/creachadair/mds v0.14.5 // indirect github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect - github.com/docker/cli v25.0.3+incompatible // indirect - github.com/docker/docker v25.0.3+incompatible // indirect + github.com/docker/cli v26.1.3+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/felixge/fgprof v0.9.3 // indirect + github.com/felixge/fgprof v0.9.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/gaissmai/bart v0.4.1 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect + github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/go-github v17.0.0+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect + github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect + github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gookit/color v1.5.4 // indirect @@ -119,7 +124,7 @@ require ( github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect - github.com/jackc/pgx/v5 v5.5.3 // indirect + github.com/jackc/pgx/v5 v5.5.5 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect @@ -144,12 +149,14 @@ require ( github.com/miekg/dns v1.1.58 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc6 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runc v1.1.12 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -166,16 +173,17 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780 // indirect github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect - github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0 // indirect - github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7 // indirect - github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 // indirect + github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 // indirect + github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 // indirect + github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 // indirect + github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 // indirect + github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754 // indirect github.com/tcnksm/go-httpstat v0.2.0 // indirect github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect @@ -187,25 +195,21 @@ require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.uber.org/multierr v1.11.0 // indirect go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.19.0 // indirect + golang.org/x/tools v0.21.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c // indirect - inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect - modernc.org/libc v1.49.3 // indirect + gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 // indirect + modernc.org/libc v1.50.6 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.8.0 // indirect - modernc.org/sqlite v1.28.0 // indirect + modernc.org/sqlite v1.29.9 // indirect nhooyr.io/websocket v1.8.10 // indirect ) diff --git a/go.sum b/go.sum index 63876d19..a534a8e4 100644 --- a/go.sum +++ b/go.sum @@ -29,8 +29,8 @@ github.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/ github.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE= github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4= github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -83,14 +83,22 @@ github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6 github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -100,17 +108,18 @@ github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= -github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= -github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= +github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/mds v0.14.5 h1:2amuO4yCbQkaAyDoLO5iCbwbTRQZz4EpRhOejQbf4+8= +github.com/creachadair/mds v0.14.5/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -121,40 +130,49 @@ github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80N github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= -github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc= +github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= +github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/efekarakus/termcolor v1.0.1 h1:YAKFO3bnLrqZGTWyNLcYoSIAQFKVOmbqmDnwsU/znzg= -github.com/efekarakus/termcolor v1.0.1/go.mod h1:AitrZNrE4nPO538fRsqf+p0WgLdAsGN5pUNrHEPsEMM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gaissmai/bart v0.4.1 h1:G1t58voWkNmT47lBDawH5QhtTDsdqRIO+ftq5x4P9Ls= +github.com/gaissmai/bart v0.4.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo= github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= -github.com/glebarez/sqlite v1.10.0 h1:u4gt8y7OND/cCei/NMHmfbLxF6xP2wgKcT/BJf2pYkc= -github.com/glebarez/sqlite v1.10.0/go.mod h1:IJ+lfSOmiekhQsFTJRx/lHtGYmCdtAiTaf5wI9u5uHA= -github.com/go-gormigrate/gormigrate/v2 v2.1.1 h1:eGS0WTFRV30r103lU8JNXY27KbviRnqqIDobW3EV3iY= -github.com/go-gormigrate/gormigrate/v2 v2.1.1/go.mod h1:L7nJ620PFDKei9QOhJzqA8kRCk+E3UbV2f5gv+1ndLc= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= +github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/go-gormigrate/gormigrate/v2 v2.1.2 h1:F/d1hpHbRAvKezziV2CC5KUE82cVe9zTgHSBoOOZ4CY= +github.com/go-gormigrate/gormigrate/v2 v2.1.2/go.mod h1:9nHVX6z3FCMCQPA7PThGcA55t22yKQfK/Dnsf5i7hUo= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= @@ -162,15 +180,18 @@ github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= -github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/gofrs/uuid/v5 v5.2.0 h1:qw1GMx6/y8vhVsx626ImfKMuS5CvJmhIKKtuyvfajMM= +github.com/gofrs/uuid/v5 v5.2.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -178,17 +199,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= @@ -197,11 +214,12 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c h1:06RMfw+TMMHtRuUOroMeatRCCgSMWXCJQeABvHU69YQ= -github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c/go.mod h1:BVIYo3cdnT4qSylnYqcd5YtmXhr51cJPGtnLBe/uLBU= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 h1:velgFPYr1X9TDwLIfkV7fWqsFlf7TeP11M/7kPd/dVI= +github.com/google/pprof v0.0.0-20240509144519-723abb6459b7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -218,10 +236,13 @@ github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kX github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= @@ -229,6 +250,7 @@ github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3s github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -239,10 +261,14 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.3 h1:Ces6/M3wbDXYpM8JyyPD57ivTtJACFZJd885pdIaV2s= -github.com/jackc/pgx/v5 v5.5.3/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM= +github.com/jagottsicher/termcolor v1.0.2/go.mod h1:RcH8uFwF/0wbEdQmi83rjmlJ+QOKdMSE9Rc1BEB7zFo= +github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= +github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= @@ -251,6 +277,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= @@ -260,13 +287,13 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= @@ -280,17 +307,18 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -315,27 +343,32 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282 h1:TQMyrpijtkFyXpNI3rY5hsZQZw+paiH+BfAlsb81HBY= -github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282/go.mod h1:rW25Kyd08Wdn3UVn0YBsDTSvReu0jqpmJKzxITPSjks= +github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3TcnjeqjPT2gSlha4asp8NvgcFRYExCaikCxk= +github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= -github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA= github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ= github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -368,10 +401,10 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE= github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8= github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s= -github.com/pterm/pterm v0.12.78 h1:QTWKaIAa4B32GKwqVXtu9m1DUMgWw3VRljMkMevX+b8= -github.com/pterm/pterm v0.12.78/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo= -github.com/puzpuzpuz/xsync/v3 v3.0.2 h1:3yESHrRFYr6xzkz61LLkvNiPFXxJEAABanTQpKbAaew= -github.com/puzpuzpuz/xsync/v3 v3.0.2/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/pterm/pterm v0.12.79 h1:lH3yrYMhdpeqX9y5Ep1u7DejyHy7NSQg9qrBjF9dFT4= +github.com/pterm/pterm v0.12.79/go.mod h1:1v/gzOF1N0FsjbgTHZ1wVycRkKiatFvJSJC4IGaQAAo= +github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4= +github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -392,6 +425,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= @@ -414,8 +449,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -423,9 +458,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= @@ -440,14 +475,22 @@ github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29X github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0 h1:0bcWsoeSBbY3XWRS1F8yp/g343E5TQMakwy5cxJS+ZU= -github.com/tailscale/setec v0.0.0-20240102233422-ba738f8ab5a0/go.mod h1:/8aqnX9aU8yubwQ2InR5mHi1OlfWQ8ei8Ea2eyLScOY= -github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b h1:FzqUT8XFn3OJTzTMteYMZlg3EUQMxoq7oJiaVj4SEBA= -github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b/go.mod h1:Nkao4BDbQqzxxg78ty4ejq+KgX/0Bxj00DxfxScuJoI= -github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7 h1:xAgOVncJuuxkFZ2oXXDKFTH4HDdFYSZRYdA6oMrCewg= -github.com/tailscale/web-client-prebuilt v0.0.0-20240111230031-5ca22df9e6e7/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 h1:zwsem4CaamMdC3tFoTpzrsUSMDPV0K6rhnQdF7kXekQ= -github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257 h1:6WsbDYsikRNmmbfZoRoyIEA9tfl0aspPAE0t7nBj2B4= +github.com/tailscale/setec v0.0.0-20240314234648-9da8e7407257/go.mod h1:hrq01/0LUDZf4mMkcZ7Ovmy33jvCi4RpESpb9kPxV6E= +github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185 h1:zT+qB+2Ghulj50d5Wq6h6vQYqD2sPdhy4FF6+FHedVE= +github.com/tailscale/squibble v0.0.0-20240418235321-9ee0eeb78185/go.mod h1:LoIjI6z/6efr9ebISQ5l2vjQmjc8QJrAYZdy3Ec3sVs= +github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1 h1:wmsnxEEuRlgK7Bhdkmm0JGrjjc0JoHZThLLo0WXXbLs= +github.com/tailscale/tailsql v0.0.0-20240418235827-820559f382c1/go.mod h1:XN193fbz9RR/5stlWPMMIZR+TTa1BUkDJm5Azwzxwgw= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= +github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= +github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754 h1:iazWjqVHE6CbNam7WXRhi33Qad5o7a8LVYgVoILpZdI= +github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= +github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk= github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= @@ -456,8 +499,8 @@ github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2 github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM= github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok= github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI= -github.com/u-root/u-root v0.11.0 h1:6gCZLOeRyevw7gbTwMj3fKxnr9+yHFlgF3N7udUVNO8= -github.com/u-root/u-root v0.11.0/go.mod h1:DBkDtiZyONk9hzVEdB/PWI9B4TxDkElWlVTHseglrZY= +github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= +github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= @@ -491,20 +534,19 @@ go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wus go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 h1:/RIbNt/Zr7rVhIkQhooTxCxFcdWLGIKnZA4IXNFSrvo= -golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp/typeparams v0.0.0-20230905200255-921286631fa9 h1:j3D9DvWRpUfIyFfDPws7LoIZ2MAI1OJHdQXtTnYtN+k= -golang.org/x/exp/typeparams v0.0.0-20230905200255-921286631fa9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.12.0 h1:w13vZbU4o5rKOFFR8y7M+c4A5jXDC0uXTdHYRP8X2DQ= -golang.org/x/image v0.12.0/go.mod h1:Lu90jvHG7GfemOIcldsh9A2hS01ocl6oNO7ype5mEnk= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a h1:8qmSSA8Gz/1kTrCe0nqR0R3Gb/NDhykzWw2q2mWZydM= +golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8= +golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -513,8 +555,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -524,14 +566,14 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -540,28 +582,25 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -573,26 +612,29 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -606,8 +648,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -618,28 +660,22 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= +google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 h1:4HZJ3Xv1cmrJ+0aFo304Zn79ur1HMxptAE7aCPNLSqc= +google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -647,8 +683,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -658,40 +692,32 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= -gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= -gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= -gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM= +gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA= +gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= +gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c h1:bYb98Ra11fJ8F2xFbZx0zg2VQ28lYqC1JxfaaF53xqY= -gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c/go.mod h1:AVgIgHMwK63XvmAzWG9vLQ41YnVHN0du0tEC46fI7yY= +gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= +gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= -honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs= +honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -inet.af/peercred v0.0.0-20210906144145-0893ea02156a h1:qdkS8Q5/i10xU2ArJMKYhVa1DORzBfYS/qA2UK2jheg= -inet.af/peercred v0.0.0-20210906144145-0893ea02156a/go.mod h1:FjawnflS/udxX+SvpsMgZfdqx2aykOlkISeAsADi5IU= -inet.af/wf v0.0.0-20221017222439-36129f591884 h1:zg9snq3Cpy50lWuVqDYM7AIRVTtU50y5WXETMFohW/Q= -inet.af/wf v0.0.0-20221017222439-36129f591884/go.mod h1:bSAQ38BYbY68uwpasXOTZo22dKGy9SNvI6PZFeKomZE= -lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= -lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= -modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= -modernc.org/cc/v4 v4.20.0 h1:45Or8mQfbUqJOG9WaxvlFYOAQO0lQ5RvqBcFCXngjxk= -modernc.org/cc/v4 v4.20.0/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= -modernc.org/ccgo/v3 v3.17.0 h1:o3OmOqx4/OFnl4Vm3G8Bgmqxnvxnh0nbxeT5p/dWChA= -modernc.org/ccgo/v3 v3.17.0/go.mod h1:Sg3fwVpmLvCUTaqEUjiBDAvshIaKDB0RXaf+zgqFu8I= -modernc.org/ccgo/v4 v4.16.0 h1:ofwORa6vx2FMm0916/CkZjpFPSR70VwTjUCe2Eg5BnA= -modernc.org/ccgo/v4 v4.16.0/go.mod h1:dkNyWIjFrVIZ68DTo36vHK+6/ShBn4ysU61So6PIqCI= +modernc.org/cc/v4 v4.21.2 h1:dycHFB/jDc3IyacKipCNSDrjIC0Lm1hyoWOZTRR20Lk= +modernc.org/cc/v4 v4.21.2/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.17.7 h1:+MG+Np7uYtsuPvtoH3KtZ1+pqNiJAOqqqVIxggE1iIo= +modernc.org/ccgo/v4 v4.17.7/go.mod h1:x87xuLLXuJv3Nn5ULTUqJn/HsTMMMiT1Eavo6rz1NiY= modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= -modernc.org/libc v1.49.3 h1:j2MRCRdwJI2ls/sGbeSk0t2bypOG/uvPZUsGQFDulqg= -modernc.org/libc v1.49.3/go.mod h1:yMZuGkn7pXbKfoT/M35gFJOAEdSKdxL0q64sF7KqCDo= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.50.6 h1:72NPEFMyKP01RJrKXS2eLXv35UklKqlJZ1b9P7gSo6I= +modernc.org/libc v1.50.6/go.mod h1:8lr2m1THY5Z3ikGyUc3JhLEQg1oaIBz/AQixw8/eksQ= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= @@ -700,15 +726,15 @@ modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= -modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= -modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/sqlite v1.29.9 h1:9RhNMklxJs+1596GNuAX+O/6040bvOwacTxuFcRuQow= +modernc.org/sqlite v1.29.9/go.mod h1:ItX2a1OVGgNsFh6Dv60JQvGfJfTPHPVpV6DF59akYOA= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -software.sslmate.com/src/go-pkcs12 v0.2.1 h1:tbT1jjaeFOF230tzOIRJ6U5S1jNqpsSyNjzDd58H3J8= -software.sslmate.com/src/go-pkcs12 v0.2.1/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= -tailscale.com v1.58.2 h1:5trkhh/fpUn7f6TUcGUQYJ0GokdNNfNrjh9ONJhoc5A= -tailscale.com v1.58.2/go.mod h1:faWR8XaXemnSKCDjHC7SAQzaagkUjA5x4jlLWiwxtuk= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +tailscale.com v1.66.3 h1:jpWat+hiobTtCosSV/c8D6S/ubgROf/S59MaIBdM9pY= +tailscale.com v1.66.3/go.mod h1:99BIV4U3UPw36Sva04xK2ZsEpVRUkY9jCdEDSAhaNGM= diff --git a/hscontrol/app.go b/hscontrol/app.go index b9238f4f..3cbbf1ac 100644 --- a/hscontrol/app.go +++ b/hscontrol/app.go @@ -9,7 +9,6 @@ import ( "net" "net/http" _ "net/http/pprof" //nolint - "net/netip" "os" "os/signal" "path/filepath" @@ -20,6 +19,7 @@ import ( "time" "github.com/coreos/go-oidc/v3/oidc" + "github.com/davecgh/go-spew/spew" "github.com/gorilla/mux" grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware" grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" @@ -28,6 +28,7 @@ import ( "github.com/juanfont/headscale/hscontrol/db" "github.com/juanfont/headscale/hscontrol/derp" derpServer "github.com/juanfont/headscale/hscontrol/derp/server" + "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/notifier" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" @@ -55,6 +56,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/types/key" + "tailscale.com/util/dnsname" ) var ( @@ -69,7 +71,7 @@ var ( const ( AuthPrefix = "Bearer " - updateInterval = 5000 + updateInterval = 5 * time.Second privateKeyFileMode = 0o600 headscaleDirPerm = 0o700 @@ -77,6 +79,11 @@ const ( registerCacheCleanup = time.Minute * 20 ) +// func init() { +// deadlock.Opts.DeadlockTimeout = 15 * time.Second +// deadlock.Opts.PrintAllCurrentGoroutines = true +// } + // Headscale represents the base app of the service. type Headscale struct { cfg *types.Config @@ -89,6 +96,7 @@ type Headscale struct { ACLPolicy *policy.ACLPolicy + mapper *mapper.Mapper nodeNotifier *notifier.Notifier oidcProvider *oidc.Provider @@ -96,15 +104,16 @@ type Headscale struct { registrationCache *cache.Cache - shutdownChan chan struct{} pollNetMapStreamWG sync.WaitGroup } var ( - profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED") + profilingEnabled = envknob.Bool("HEADSCALE_DEBUG_PROFILING_ENABLED") + profilingPath = envknob.String("HEADSCALE_DEBUG_PROFILING_PATH") tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED") tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR") tailsqlTSKey = envknob.String("TS_AUTHKEY") + dumpConfig = envknob.Bool("HEADSCALE_DEBUG_DUMP_CONFIG") ) func NewHeadscale(cfg *types.Config) (*Headscale, error) { @@ -128,7 +137,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { noisePrivateKey: noisePrivateKey, registrationCache: registrationCache, pollNetMapStreamWG: sync.WaitGroup{}, - nodeNotifier: notifier.NewNotifier(), + nodeNotifier: notifier.NewNotifier(cfg), } app.db, err = db.NewHeadscaleDatabase( @@ -138,7 +147,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { return nil, err } - app.ipAlloc, err = db.NewIPAllocator(app.db, *cfg.PrefixV4, *cfg.PrefixV6) + app.ipAlloc, err = db.NewIPAllocator(app.db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation) if err != nil { return nil, err } @@ -156,7 +165,15 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { if app.cfg.DNSConfig != nil && app.cfg.DNSConfig.Proxied { // if MagicDNS // TODO(kradalby): revisit why this takes a list. - magicDNSDomains := util.GenerateMagicDNSRootDomains([]netip.Prefix{*cfg.PrefixV4, *cfg.PrefixV6}) + + var magicDNSDomains []dnsname.FQDN + if cfg.PrefixV4 != nil { + magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...) + } + if cfg.PrefixV6 != nil { + magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...) + } + // we might have routes already from Split DNS if app.cfg.DNSConfig.Routes == nil { app.cfg.DNSConfig.Routes = make(map[string][]*dnstype.Resolver) @@ -199,54 +216,77 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, target, http.StatusFound) } -// expireEphemeralNodes deletes ephemeral node records that have not been +// deleteExpireEphemeralNodes deletes ephemeral node records that have not been // seen for longer than h.cfg.EphemeralNodeInactivityTimeout. -func (h *Headscale) expireEphemeralNodes(milliSeconds int64) { - ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond) +func (h *Headscale) deleteExpireEphemeralNodes(ctx context.Context, every time.Duration) { + ticker := time.NewTicker(every) - var update types.StateUpdate - var changed bool - for range ticker.C { - if err := h.db.DB.Transaction(func(tx *gorm.DB) error { - update, changed = db.ExpireEphemeralNodes(tx, h.cfg.EphemeralNodeInactivityTimeout) + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + var removed []types.NodeID + var changed []types.NodeID + if err := h.db.Write(func(tx *gorm.DB) error { + removed, changed = db.DeleteExpiredEphemeralNodes(tx, h.cfg.EphemeralNodeInactivityTimeout) - return nil - }); err != nil { - log.Error().Err(err).Msg("database error while expiring ephemeral nodes") - continue - } + return nil + }); err != nil { + log.Error().Err(err).Msg("database error while expiring ephemeral nodes") + continue + } - if changed && update.Valid() { - ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na") - h.nodeNotifier.NotifyAll(ctx, update) + if removed != nil { + ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerRemoved, + Removed: removed, + }) + } + + if changed != nil { + ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changed, + }) + } } } } -// expireExpiredMachines expires nodes that have an explicit expiry set +// expireExpiredNodes expires nodes that have an explicit expiry set // after that expiry time has passed. -func (h *Headscale) expireExpiredMachines(intervalMs int64) { - interval := time.Duration(intervalMs) * time.Millisecond - ticker := time.NewTicker(interval) +func (h *Headscale) expireExpiredNodes(ctx context.Context, every time.Duration) { + ticker := time.NewTicker(every) lastCheck := time.Unix(0, 0) var update types.StateUpdate var changed bool - for range ticker.C { - if err := h.db.DB.Transaction(func(tx *gorm.DB) error { - lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck) + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + if err := h.db.Write(func(tx *gorm.DB) error { + lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck) - return nil - }); err != nil { - log.Error().Err(err).Msg("database error while expiring nodes") - continue - } + return nil + }); err != nil { + log.Error().Err(err).Msg("database error while expiring nodes") + continue + } - log.Trace().Str("nodes", update.ChangeNodes.String()).Msgf("expiring nodes") - if changed && update.Valid() { - ctx := types.NotifyCtx(context.Background(), "expire-expired", "na") - h.nodeNotifier.NotifyAll(ctx, update) + if changed { + log.Trace().Interface("nodes", update.ChangePatches).Msgf("expiring nodes") + + ctx := types.NotifyCtx(context.Background(), "expire-expired", "na") + h.nodeNotifier.NotifyAll(ctx, update) + } } } } @@ -272,14 +312,11 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) { h.DERPMap.Regions[region.RegionID] = ®ion } - stateUpdate := types.StateUpdate{ + ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ Type: types.StateDERPUpdated, DERPMap: h.DERPMap, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na") - h.nodeNotifier.NotifyAll(ctx, stateUpdate) - } + }) } } } @@ -292,7 +329,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, // Check if the request is coming from the on-server client. // This is not secure, but it is to maintain maintainability // with the "legacy" database-based client - // It is also neede for grpc-gateway to be able to connect to + // It is also needed for grpc-gateway to be able to connect to // the server client, _ := peer.FromContext(ctx) @@ -303,11 +340,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, meta, ok := metadata.FromIncomingContext(ctx) if !ok { - log.Error(). - Caller(). - Str("client_address", client.Addr.String()). - Msg("Retrieving metadata is failed") - return ctx, status.Errorf( codes.InvalidArgument, "Retrieving metadata is failed", @@ -316,11 +348,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, authHeader, ok := meta["authorization"] if !ok { - log.Error(). - Caller(). - Str("client_address", client.Addr.String()). - Msg("Authorization token is not supplied") - return ctx, status.Errorf( codes.Unauthenticated, "Authorization token is not supplied", @@ -330,11 +357,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, token := authHeader[0] if !strings.HasPrefix(token, AuthPrefix) { - log.Error(). - Caller(). - Str("client_address", client.Addr.String()). - Msg(`missing "Bearer " prefix in "Authorization" header`) - return ctx, status.Error( codes.Unauthenticated, `missing "Bearer " prefix in "Authorization" header`, @@ -343,12 +365,6 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context, valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix)) if err != nil { - log.Error(). - Caller(). - Err(err). - Str("client_address", client.Addr.String()). - Msg("failed to validate token") - return ctx, status.Error(codes.Internal, "failed to validate token") } @@ -446,7 +462,7 @@ func (h *Headscale) ensureUnixSocketIsAbsent() error { func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { router := mux.NewRouter() - router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux) + router.Use(prometheusMiddleware) router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost) @@ -483,16 +499,16 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { return router } -// Serve launches a GIN server with the Headscale API. +// Serve launches the HTTP and gRPC server service Headscale and the API. func (h *Headscale) Serve() error { - if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile { - if profilePath, ok := os.LookupEnv("HEADSCALE_PROFILING_PATH"); ok { - err := os.MkdirAll(profilePath, os.ModePerm) + if profilingEnabled { + if profilingPath != "" { + err := os.MkdirAll(profilingPath, os.ModePerm) if err != nil { log.Fatal().Err(err).Msg("failed to create profiling directory") } - defer profile.Start(profile.ProfilePath(profilePath)).Stop() + defer profile.Start(profile.ProfilePath(profilingPath)).Stop() } else { defer profile.Start().Stop() } @@ -500,8 +516,13 @@ func (h *Headscale) Serve() error { var err error + if dumpConfig { + spew.Dump(h.cfg) + } + // Fetch an initial DERP Map before we start serving h.DERPMap = derp.GetDERPMap(h.cfg.DERP) + h.mapper = mapper.NewMapper(h.db, h.cfg, h.DERPMap, h.nodeNotifier) if h.cfg.DERP.ServerEnabled { // When embedded DERP is enabled we always need a STUN server @@ -511,7 +532,7 @@ func (h *Headscale) Serve() error { region, err := h.DERPServer.GenerateRegion() if err != nil { - return err + return fmt.Errorf("generating DERP region for embedded server: %w", err) } if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion { @@ -531,10 +552,13 @@ func (h *Headscale) Serve() error { return errEmptyInitialDERPMap } - // TODO(kradalby): These should have cancel channels and be cleaned - // up on shutdown. - go h.expireEphemeralNodes(updateInterval) - go h.expireExpiredMachines(updateInterval) + expireEphemeralCtx, expireEphemeralCancel := context.WithCancel(context.Background()) + defer expireEphemeralCancel() + go h.deleteExpireEphemeralNodes(expireEphemeralCtx, updateInterval) + + expireNodeCtx, expireNodeCancel := context.WithCancel(context.Background()) + defer expireNodeCancel() + go h.expireExpiredNodes(expireNodeCtx, updateInterval) if zl.GlobalLevel() == zl.TraceLevel { zerolog.RespLog = true @@ -586,14 +610,14 @@ func (h *Headscale) Serve() error { }..., ) if err != nil { - return err + return fmt.Errorf("setting up gRPC gateway via socket: %w", err) } // Connect to the gRPC server over localhost to skip // the authentication. err = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn) if err != nil { - return err + return fmt.Errorf("registering Headscale API service to gRPC: %w", err) } // Start the local gRPC server without TLS and without authentication @@ -614,9 +638,7 @@ func (h *Headscale) Serve() error { tlsConfig, err := h.getTLSSettings() if err != nil { - log.Error().Err(err).Msg("Failed to set up TLS configuration") - - return err + return fmt.Errorf("configuring TLS settings: %w", err) } // @@ -693,18 +715,17 @@ func (h *Headscale) Serve() error { // HTTP setup // // This is the regular router that we expose - // over our main Addr. It also serves the legacy Tailcale API + // over our main Addr router := h.createRouter(grpcGatewayMux) httpServer := &http.Server{ Addr: h.cfg.Addr, Handler: router, - ReadTimeout: types.HTTPReadTimeout, - // Go does not handle timeouts in HTTP very well, and there is - // no good way to handle streaming timeouts, therefore we need to - // keep this at unlimited and be careful to clean up connections - // https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/#aboutstreaming - WriteTimeout: 0, + ReadTimeout: types.HTTPTimeout, + + // Long polling should not have any timeout, this is overriden + // further down the chain + WriteTimeout: types.HTTPTimeout, } var httpListener net.Listener @@ -723,27 +744,30 @@ func (h *Headscale) Serve() error { log.Info(). Msgf("listening and serving HTTP on: %s", h.cfg.Addr) - promMux := http.NewServeMux() - promMux.Handle("/metrics", promhttp.Handler()) + debugMux := http.NewServeMux() + debugMux.Handle("/debug/pprof/", http.DefaultServeMux) + debugMux.HandleFunc("/debug/notifier", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(h.nodeNotifier.String())) + }) + debugMux.Handle("/metrics", promhttp.Handler()) - promHTTPServer := &http.Server{ + debugHTTPServer := &http.Server{ Addr: h.cfg.MetricsAddr, - Handler: promMux, - ReadTimeout: types.HTTPReadTimeout, + Handler: debugMux, + ReadTimeout: types.HTTPTimeout, WriteTimeout: 0, } - var promHTTPListener net.Listener - promHTTPListener, err = net.Listen("tcp", h.cfg.MetricsAddr) - + debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr) if err != nil { return fmt.Errorf("failed to bind to TCP address: %w", err) } - errorGroup.Go(func() error { return promHTTPServer.Serve(promHTTPListener) }) + errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) }) log.Info(). - Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr) + Msgf("listening and serving debug and metrics on: %s", h.cfg.MetricsAddr) var tailsqlContext context.Context if tailsqlEnabled { @@ -760,7 +784,6 @@ func (h *Headscale) Serve() error { } // Handle common process-killing signals so we can gracefully shut down: - h.shutdownChan = make(chan struct{}) sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, @@ -799,12 +822,15 @@ func (h *Headscale) Serve() error { } default: + trace := log.Trace().Msgf log.Info(). Str("signal", sig.String()). Msg("Received signal to stop, shutting down gracefully") - close(h.shutdownChan) + expireNodeCancel() + expireEphemeralCancel() + trace("waiting for netmap stream to close") h.pollNetMapStreamWG.Wait() // Gracefully shut down servers @@ -812,32 +838,44 @@ func (h *Headscale) Serve() error { context.Background(), types.HTTPShutdownTimeout, ) - if err := promHTTPServer.Shutdown(ctx); err != nil { + trace("shutting down debug http server") + if err := debugHTTPServer.Shutdown(ctx); err != nil { log.Error().Err(err).Msg("Failed to shutdown prometheus http") } + trace("shutting down main http server") if err := httpServer.Shutdown(ctx); err != nil { log.Error().Err(err).Msg("Failed to shutdown http") } + + trace("shutting down grpc server (socket)") grpcSocket.GracefulStop() if grpcServer != nil { + trace("shutting down grpc server (external)") grpcServer.GracefulStop() grpcListener.Close() } if tailsqlContext != nil { + trace("shutting down tailsql") tailsqlContext.Done() } + trace("closing node notifier") + h.nodeNotifier.Close() + // Close network listeners - promHTTPListener.Close() + trace("closing network listeners") + debugHTTPListener.Close() httpListener.Close() grpcGatewayConn.Close() // Stop listening (and unlink the socket if unix type): + trace("closing socket listener") socketListener.Close() // Close db connections + trace("closing database connection") err = h.db.Close() if err != nil { log.Error().Err(err).Msg("Failed to close db") @@ -895,7 +933,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) { server := &http.Server{ Addr: h.cfg.TLS.LetsEncrypt.Listen, Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)), - ReadTimeout: types.HTTPReadTimeout, + ReadTimeout: types.HTTPTimeout, } go func() { diff --git a/hscontrol/auth.go b/hscontrol/auth.go index b199fa55..5ee925a6 100644 --- a/hscontrol/auth.go +++ b/hscontrol/auth.go @@ -62,18 +62,18 @@ func logAuthFunc( func (h *Headscale) handleRegister( writer http.ResponseWriter, req *http.Request, - registerRequest tailcfg.RegisterRequest, + regReq tailcfg.RegisterRequest, machineKey key.MachinePublic, ) { - logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey) + logInfo, logTrace, logErr := logAuthFunc(regReq, machineKey) now := time.Now().UTC() logTrace("handleRegister called, looking up machine in DB") - node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey) + node, err := h.db.GetNodeByAnyKey(machineKey, regReq.NodeKey, regReq.OldNodeKey) logTrace("handleRegister database lookup has returned") if errors.Is(err, gorm.ErrRecordNotFound) { // If the node has AuthKey set, handle registration via PreAuthKeys - if registerRequest.Auth.AuthKey != "" { - h.handleAuthKey(writer, registerRequest, machineKey) + if regReq.Auth != nil && regReq.Auth.AuthKey != "" { + h.handleAuthKey(writer, regReq, machineKey) return } @@ -86,7 +86,7 @@ func (h *Headscale) handleRegister( // This is not implemented yet, as it is no strictly required. The only side-effect // is that the client will hammer headscale with requests until it gets a // successful RegisterResponse. - if registerRequest.Followup != "" { + if regReq.Followup != "" { logTrace("register request is a followup") if _, ok := h.registrationCache.Get(machineKey.String()); ok { logTrace("Node is waiting for interactive login") @@ -95,7 +95,7 @@ func (h *Headscale) handleRegister( case <-req.Context().Done(): return case <-time.After(registrationHoldoff): - h.handleNewNode(writer, registerRequest, machineKey) + h.handleNewNode(writer, regReq, machineKey) return } @@ -106,7 +106,7 @@ func (h *Headscale) handleRegister( givenName, err := h.db.GenerateGivenName( machineKey, - registerRequest.Hostinfo.Hostname, + regReq.Hostinfo.Hostname, ) if err != nil { logErr(err, "Failed to generate given name for node") @@ -120,16 +120,16 @@ func (h *Headscale) handleRegister( // happens newNode := types.Node{ MachineKey: machineKey, - Hostname: registerRequest.Hostinfo.Hostname, + Hostname: regReq.Hostinfo.Hostname, GivenName: givenName, - NodeKey: registerRequest.NodeKey, + NodeKey: regReq.NodeKey, LastSeen: &now, Expiry: &time.Time{}, } - if !registerRequest.Expiry.IsZero() { + if !regReq.Expiry.IsZero() { logTrace("Non-zero expiry time requested") - newNode.Expiry = ®isterRequest.Expiry + newNode.Expiry = ®Req.Expiry } h.registrationCache.Set( @@ -138,7 +138,7 @@ func (h *Headscale) handleRegister( registerCacheExpiration, ) - h.handleNewNode(writer, registerRequest, machineKey) + h.handleNewNode(writer, regReq, machineKey) return } @@ -169,11 +169,11 @@ func (h *Headscale) handleRegister( // - Trying to log out (sending a expiry in the past) // - A valid, registered node, looking for /map // - Expired node wanting to reauthenticate - if node.NodeKey.String() == registerRequest.NodeKey.String() { + if node.NodeKey.String() == regReq.NodeKey.String() { // The client sends an Expiry in the past if the client is requesting to expire the key (aka logout) // https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648 - if !registerRequest.Expiry.IsZero() && - registerRequest.Expiry.UTC().Before(now) { + if !regReq.Expiry.IsZero() && + regReq.Expiry.UTC().Before(now) { h.handleNodeLogOut(writer, *node, machineKey) return @@ -189,11 +189,11 @@ func (h *Headscale) handleRegister( } // The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration - if node.NodeKey.String() == registerRequest.OldNodeKey.String() && + if node.NodeKey.String() == regReq.OldNodeKey.String() && !node.IsExpired() { h.handleNodeKeyRefresh( writer, - registerRequest, + regReq, *node, machineKey, ) @@ -202,11 +202,11 @@ func (h *Headscale) handleRegister( } // When logged out and reauthenticating with OIDC, the OldNodeKey is not passed, but the NodeKey has changed - if node.NodeKey.String() != registerRequest.NodeKey.String() && - registerRequest.OldNodeKey.IsZero() && !node.IsExpired() { + if node.NodeKey.String() != regReq.NodeKey.String() && + regReq.OldNodeKey.IsZero() && !node.IsExpired() { h.handleNodeKeyRefresh( writer, - registerRequest, + regReq, *node, machineKey, ) @@ -214,7 +214,7 @@ func (h *Headscale) handleRegister( return } - if registerRequest.Followup != "" { + if regReq.Followup != "" { select { case <-req.Context().Done(): return @@ -223,7 +223,7 @@ func (h *Headscale) handleRegister( } // The node has expired or it is logged out - h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey) + h.handleNodeExpiredOrLoggedOut(writer, regReq, *node, machineKey) // TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use node.Expiry = &time.Time{} @@ -232,7 +232,7 @@ func (h *Headscale) handleRegister( // we need to make sure the NodeKey matches the one in the request // TODO(juan): What happens when using fast user switching between two // headscale-managed tailnets? - node.NodeKey = registerRequest.NodeKey + node.NodeKey = regReq.NodeKey h.registrationCache.Set( machineKey.String(), *node, @@ -273,8 +273,6 @@ func (h *Headscale) handleAuthKey( Err(err). Msg("Cannot encode message") http.Error(writer, "Internal server error", http.StatusInternalServerError) - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() return } @@ -294,13 +292,6 @@ func (h *Headscale) handleAuthKey( Str("node", registerRequest.Hostinfo.Hostname). Msg("Failed authentication via AuthKey") - if pak != nil { - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() - } else { - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", "unknown").Inc() - } - return } @@ -323,14 +314,21 @@ func (h *Headscale) handleAuthKey( Msg("node was already registered before, refreshing with new auth key") node.NodeKey = nodeKey - node.AuthKeyID = uint(pak.ID) - err := h.db.NodeSetExpiry(node.ID, registerRequest.Expiry) + pakID := uint(pak.ID) + if pakID != 0 { + node.AuthKeyID = &pakID + } + + node.Expiry = ®isterRequest.Expiry + node.User = pak.User + node.UserID = pak.UserID + err := h.db.DB.Save(node).Error if err != nil { log.Error(). Caller(). Str("node", node.Hostname). Err(err). - Msg("Failed to refresh node") + Msg("failed to save node after logging in with auth key") return } @@ -352,13 +350,8 @@ func (h *Headscale) handleAuthKey( } } - mkey := node.MachineKey - update := types.StateUpdateExpire(node.ID, registerRequest.Expiry) - - if update.Valid() { - ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na") - h.nodeNotifier.NotifyWithIgnore(ctx, update, mkey.String()) - } + ctx := types.NotifyCtx(context.Background(), "handle-authkey", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{Type: types.StatePeerChanged, ChangeNodes: []types.NodeID{node.ID}}) } else { now := time.Now().UTC() @@ -384,11 +377,10 @@ func (h *Headscale) handleAuthKey( Expiry: ®isterRequest.Expiry, NodeKey: nodeKey, LastSeen: &now, - AuthKeyID: uint(pak.ID), ForcedTags: pak.Proto().GetAclTags(), } - addrs, err := h.ipAlloc.Next() + ipv4, ipv6, err := h.ipAlloc.Next() if err != nil { log.Error(). Caller(). @@ -400,24 +392,26 @@ func (h *Headscale) handleAuthKey( return } + pakID := uint(pak.ID) + if pakID != 0 { + nodeToRegister.AuthKeyID = &pakID + } node, err = h.db.RegisterNode( nodeToRegister, - addrs, + ipv4, ipv6, ) if err != nil { log.Error(). Caller(). Err(err). Msg("could not register node") - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } } - err = h.db.DB.Transaction(func(tx *gorm.DB) error { + h.db.Write(func(tx *gorm.DB) error { return db.UsePreAuthKey(tx, pak) }) if err != nil { @@ -425,8 +419,6 @@ func (h *Headscale) handleAuthKey( Caller(). Err(err). Msg("Failed to use pre-auth key") - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return @@ -445,14 +437,10 @@ func (h *Headscale) handleAuthKey( Str("node", registerRequest.Hostinfo.Hostname). Err(err). Msg("Cannot encode message") - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } - nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "success", pak.User.Name). - Inc() writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) _, err = writer.Write(respBody) @@ -466,7 +454,6 @@ func (h *Headscale) handleAuthKey( log.Info(). Str("node", registerRequest.Hostinfo.Hostname). - Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")). Msg("Successfully authenticated via AuthKey") } @@ -538,11 +525,8 @@ func (h *Headscale) handleNodeLogOut( return } - stateUpdate := types.StateUpdateExpire(node.ID, now) - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na") - h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na") + h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID) resp.AuthURL = "" resp.MachineAuthorized = false @@ -572,7 +556,7 @@ func (h *Headscale) handleNodeLogOut( } if node.IsEphemeral() { - err = h.db.DeleteNode(&node, h.nodeNotifier.ConnectedMap()) + changedNodes, err := h.db.DeleteNode(&node, h.nodeNotifier.LikelyConnectedMap()) if err != nil { log.Error(). Err(err). @@ -580,13 +564,16 @@ func (h *Headscale) handleNodeLogOut( Msg("Cannot delete ephemeral node from the database") } - stateUpdate := types.StateUpdate{ + ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na") + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ Type: types.StatePeerRemoved, - Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)}, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na") - h.nodeNotifier.NotifyAll(ctx, stateUpdate) + Removed: []types.NodeID{node.ID}, + }) + if changedNodes != nil { + h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changedNodes, + }) } return @@ -622,14 +609,10 @@ func (h *Headscale) handleNodeWithValidRegistration( Caller(). Err(err). Msg("Cannot encode message") - nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } - nodeRegistrations.WithLabelValues("update", "web", "success", node.User.Name). - Inc() writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) @@ -660,7 +643,7 @@ func (h *Headscale) handleNodeKeyRefresh( Str("node", node.Hostname). Msg("We have the OldNodeKey in the database. This is a key refresh") - err := h.db.DB.Transaction(func(tx *gorm.DB) error { + err := h.db.Write(func(tx *gorm.DB) error { return db.NodeSetNodeKey(tx, &node, registerRequest.NodeKey) }) if err != nil { @@ -706,14 +689,14 @@ func (h *Headscale) handleNodeKeyRefresh( func (h *Headscale) handleNodeExpiredOrLoggedOut( writer http.ResponseWriter, - registerRequest tailcfg.RegisterRequest, + regReq tailcfg.RegisterRequest, node types.Node, machineKey key.MachinePublic, ) { resp := tailcfg.RegisterResponse{} - if registerRequest.Auth.AuthKey != "" { - h.handleAuthKey(writer, registerRequest, machineKey) + if regReq.Auth != nil && regReq.Auth.AuthKey != "" { + h.handleAuthKey(writer, regReq, machineKey) return } @@ -723,8 +706,8 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut( Caller(). Str("node", node.Hostname). Str("machine_key", machineKey.ShortString()). - Str("node_key", registerRequest.NodeKey.ShortString()). - Str("node_key_old", registerRequest.OldNodeKey.ShortString()). + Str("node_key", regReq.NodeKey.ShortString()). + Str("node_key_old", regReq.OldNodeKey.ShortString()). Msg("Node registration has expired or logged out. Sending a auth url to register") if h.oauth2Config != nil { @@ -743,14 +726,10 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut( Caller(). Err(err). Msg("Cannot encode message") - nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name). - Inc() http.Error(writer, "Internal server error", http.StatusInternalServerError) return } - nodeRegistrations.WithLabelValues("reauth", "web", "success", node.User.Name). - Inc() writer.Header().Set("Content-Type", "application/json; charset=utf-8") writer.WriteHeader(http.StatusOK) @@ -765,8 +744,8 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut( log.Trace(). Caller(). Str("machine_key", machineKey.ShortString()). - Str("node_key", registerRequest.NodeKey.ShortString()). - Str("node_key_old", registerRequest.OldNodeKey.ShortString()). + Str("node_key", regReq.NodeKey.ShortString()). + Str("node_key_old", regReq.OldNodeKey.ShortString()). Str("node", node.Hostname). Msg("Node logged out. Sent AuthURL for reauthentication") } diff --git a/hscontrol/auth_noise.go b/hscontrol/auth_noise.go index 323a49b0..6659dfa5 100644 --- a/hscontrol/auth_noise.go +++ b/hscontrol/auth_noise.go @@ -33,7 +33,6 @@ func (ns *noiseServer) NoiseRegistrationHandler( Caller(). Err(err). Msg("Cannot parse RegisterRequest") - nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc() http.Error(writer, "Internal error", http.StatusInternalServerError) return diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index 870ad599..a30939c1 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "net/netip" "path/filepath" "strconv" "strings" @@ -90,7 +91,8 @@ func NewHeadscaleDatabase( _ = tx.Migrator(). RenameColumn(&types.Node{}, "nickname", "given_name") - // If the Node table has a column for registered, + dbConn.Model(&types.Node{}).Where("auth_key_id = ?", 0).Update("auth_key_id", nil) + // If the Node table has a column for registered, // find all occourences of "false" and drop them. Then // remove the column. if tx.Migrator().HasColumn(&types.Node{}, "registered") { @@ -330,6 +332,75 @@ func NewHeadscaleDatabase( return nil }, }, + { + // Replace column with IP address list with dedicated + // IP v4 and v6 column. + // Note that previously, the list _could_ contain more + // than two addresses, which should not really happen. + // In that case, the first occurence of each type will + // be kept. + ID: "2024041121742", + Migrate: func(tx *gorm.DB) error { + _ = tx.Migrator().AddColumn(&types.Node{}, "ipv4") + _ = tx.Migrator().AddColumn(&types.Node{}, "ipv6") + + type node struct { + ID uint64 `gorm:"column:id"` + Addresses string `gorm:"column:ip_addresses"` + } + + var nodes []node + + _ = tx.Raw("SELECT id, ip_addresses FROM nodes").Scan(&nodes).Error + + for _, node := range nodes { + addrs := strings.Split(node.Addresses, ",") + + if len(addrs) == 0 { + return fmt.Errorf("no addresses found for node(%d)", node.ID) + } + + var v4 *netip.Addr + var v6 *netip.Addr + + for _, addrStr := range addrs { + addr, err := netip.ParseAddr(addrStr) + if err != nil { + return fmt.Errorf("parsing IP for node(%d) from database: %w", node.ID, err) + } + + if addr.Is4() && v4 == nil { + v4 = &addr + } + + if addr.Is6() && v6 == nil { + v6 = &addr + } + } + + if v4 != nil { + err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv4", v4.String()).Error + if err != nil { + return fmt.Errorf("saving ip addresses to new columns: %w", err) + } + } + + if v6 != nil { + err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv6", v6.String()).Error + if err != nil { + return fmt.Errorf("saving ip addresses to new columns: %w", err) + } + } + } + + _ = tx.Migrator().DropColumn(&types.Node{}, "ip_addresses") + + return nil + }, + Rollback: func(tx *gorm.DB) error { + return nil + }, + }, }, ) @@ -371,8 +442,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { db, err := gorm.Open( sqlite.Open(cfg.Sqlite.Path+"?_synchronous=1&_journal_mode=WAL"), &gorm.Config{ - DisableForeignKeyConstraintWhenMigrating: true, - Logger: dbLogger, + Logger: dbLogger, }, ) @@ -418,8 +488,7 @@ func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) { } db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{ - DisableForeignKeyConstraintWhenMigrating: true, - Logger: dbLogger, + Logger: dbLogger, }) if err != nil { return nil, err diff --git a/hscontrol/db/ip.go b/hscontrol/db/ip.go index dc49f8af..7d06e2e8 100644 --- a/hscontrol/db/ip.go +++ b/hscontrol/db/ip.go @@ -1,13 +1,17 @@ package db import ( + "crypto/rand" + "database/sql" "errors" "fmt" + "math/big" "net/netip" "sync" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" + "github.com/rs/zerolog/log" "go4.org/netipx" "gorm.io/gorm" ) @@ -20,13 +24,16 @@ import ( type IPAllocator struct { mu sync.Mutex - prefix4 netip.Prefix - prefix6 netip.Prefix + prefix4 *netip.Prefix + prefix6 *netip.Prefix // Previous IPs handed out prev4 netip.Addr prev6 netip.Addr + // strategy used for handing out IP addresses. + strategy types.IPAllocationStrategy + // Set of all IPs handed out. // This might not be in sync with the database, // but it is more conservative. If saves to the @@ -40,40 +47,71 @@ type IPAllocator struct { // provided IPv4 and IPv6 prefix. It needs to be created // when headscale starts and needs to finish its read // transaction before any writes to the database occur. -func NewIPAllocator(db *HSDatabase, prefix4, prefix6 netip.Prefix) (*IPAllocator, error) { - var addressesSlices []string +func NewIPAllocator( + db *HSDatabase, + prefix4, prefix6 *netip.Prefix, + strategy types.IPAllocationStrategy, +) (*IPAllocator, error) { + ret := IPAllocator{ + prefix4: prefix4, + prefix6: prefix6, + + strategy: strategy, + } + + var v4s []sql.NullString + var v6s []sql.NullString if db != nil { - db.Read(func(rx *gorm.DB) error { - return rx.Model(&types.Node{}).Pluck("ip_addresses", &addressesSlices).Error + err := db.Read(func(rx *gorm.DB) error { + return rx.Model(&types.Node{}).Pluck("ipv4", &v4s).Error }) + if err != nil { + return nil, fmt.Errorf("reading IPv4 addresses from database: %w", err) + } + + err = db.Read(func(rx *gorm.DB) error { + return rx.Model(&types.Node{}).Pluck("ipv6", &v6s).Error + }) + if err != nil { + return nil, fmt.Errorf("reading IPv6 addresses from database: %w", err) + } + } var ips netipx.IPSetBuilder // Add network and broadcast addrs to used pool so they // are not handed out to nodes. - network4, broadcast4 := util.GetIPPrefixEndpoints(prefix4) - network6, broadcast6 := util.GetIPPrefixEndpoints(prefix6) - ips.Add(network4) - ips.Add(broadcast4) - ips.Add(network6) - ips.Add(broadcast6) + if prefix4 != nil { + network4, broadcast4 := util.GetIPPrefixEndpoints(*prefix4) + ips.Add(network4) + ips.Add(broadcast4) + + // Use network as starting point, it will be used to call .Next() + // TODO(kradalby): Could potentially take all the IPs loaded from + // the database into account to start at a more "educated" location. + ret.prev4 = network4 + } + + if prefix6 != nil { + network6, broadcast6 := util.GetIPPrefixEndpoints(*prefix6) + ips.Add(network6) + ips.Add(broadcast6) + + ret.prev6 = network6 + } // Fetch all the IP Addresses currently handed out from the Database // and add them to the used IP set. - for _, slice := range addressesSlices { - var machineAddresses types.NodeAddresses - err := machineAddresses.Scan(slice) - if err != nil { - return nil, fmt.Errorf( - "parsing IPs from database %v: %w", machineAddresses, - err, - ) - } + for _, addrStr := range append(v4s, v6s...) { + if addrStr.Valid { + addr, err := netip.ParseAddr(addrStr.String) + if err != nil { + return nil, fmt.Errorf("parsing IP address from database: %w", err) + } - for _, ip := range machineAddresses { - ips.Add(ip) + ips.Add(addr) } } @@ -86,42 +124,61 @@ func NewIPAllocator(db *HSDatabase, prefix4, prefix6 netip.Prefix) (*IPAllocator ) } - return &IPAllocator{ - usedIPs: ips, + ret.usedIPs = ips - prefix4: prefix4, - prefix6: prefix6, - - // Use network as starting point, it will be used to call .Next() - // TODO(kradalby): Could potentially take all the IPs loaded from - // the database into account to start at a more "educated" location. - prev4: network4, - prev6: network6, - }, nil + return &ret, nil } -func (i *IPAllocator) Next() (types.NodeAddresses, error) { +func (i *IPAllocator) Next() (*netip.Addr, *netip.Addr, error) { i.mu.Lock() defer i.mu.Unlock() - v4, err := i.next(i.prev4, i.prefix4) - if err != nil { - return nil, fmt.Errorf("allocating IPv4 address: %w", err) + var err error + var ret4 *netip.Addr + var ret6 *netip.Addr + + if i.prefix4 != nil { + ret4, err = i.next(i.prev4, i.prefix4) + if err != nil { + return nil, nil, fmt.Errorf("allocating IPv4 address: %w", err) + } + i.prev4 = *ret4 } - v6, err := i.next(i.prev6, i.prefix6) - if err != nil { - return nil, fmt.Errorf("allocating IPv6 address: %w", err) + if i.prefix6 != nil { + ret6, err = i.next(i.prev6, i.prefix6) + if err != nil { + return nil, nil, fmt.Errorf("allocating IPv6 address: %w", err) + } + i.prev6 = *ret6 } - return types.NodeAddresses{*v4, *v6}, nil + return ret4, ret6, nil } var ErrCouldNotAllocateIP = errors.New("failed to allocate IP") -func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, error) { - // Get the first IP in our prefix - ip := prev.Next() +func (i *IPAllocator) nextLocked(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) { + i.mu.Lock() + defer i.mu.Unlock() + + return i.next(prev, prefix) +} + +func (i *IPAllocator) next(prev netip.Addr, prefix *netip.Prefix) (*netip.Addr, error) { + var err error + var ip netip.Addr + + switch i.strategy { + case types.IPAllocationStrategySequential: + // Get the first IP in our prefix + ip = prev.Next() + case types.IPAllocationStrategyRandom: + ip, err = randomNext(*prefix) + if err != nil { + return nil, fmt.Errorf("getting random IP: %w", err) + } + } // TODO(kradalby): maybe this can be done less often. set, err := i.usedIPs.IPSet() @@ -136,7 +193,15 @@ func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, e // Check if the IP has already been allocated. if set.Contains(ip) { - ip = ip.Next() + switch i.strategy { + case types.IPAllocationStrategySequential: + ip = ip.Next() + case types.IPAllocationStrategyRandom: + ip, err = randomNext(*prefix) + if err != nil { + return nil, fmt.Errorf("getting random IP: %w", err) + } + } continue } @@ -146,3 +211,120 @@ func (i *IPAllocator) next(prev netip.Addr, prefix netip.Prefix) (*netip.Addr, e return &ip, nil } } + +func randomNext(pfx netip.Prefix) (netip.Addr, error) { + rang := netipx.RangeOfPrefix(pfx) + fromIP, toIP := rang.From(), rang.To() + + var from, to big.Int + + from.SetBytes(fromIP.AsSlice()) + to.SetBytes(toIP.AsSlice()) + + // Find the max, this is how we can do "random range", + // get the "max" as 0 -> to - from and then add back from + // after. + tempMax := big.NewInt(0).Sub(&to, &from) + + out, err := rand.Int(rand.Reader, tempMax) + if err != nil { + return netip.Addr{}, fmt.Errorf("generating random IP: %w", err) + } + + valInRange := big.NewInt(0).Add(&from, out) + + ip, ok := netip.AddrFromSlice(valInRange.Bytes()) + if !ok { + return netip.Addr{}, fmt.Errorf("generated ip bytes are invalid ip") + } + + if !pfx.Contains(ip) { + return netip.Addr{}, fmt.Errorf( + "generated ip(%s) not in prefix(%s)", + ip.String(), + pfx.String(), + ) + } + + return ip, nil +} + +// BackfillNodeIPs will take a database transaction, and +// iterate through all of the current nodes in headscale +// and ensure it has IP addresses according to the current +// configuration. +// This means that if both IPv4 and IPv6 is set in the +// config, and some nodes are missing that type of IP, +// it will be added. +// If a prefix type has been removed (IPv4 or IPv6), it +// will remove the IPs in that family from the node. +func (db *HSDatabase) BackfillNodeIPs(i *IPAllocator) ([]string, error) { + var err error + var ret []string + err = db.Write(func(tx *gorm.DB) error { + if i == nil { + return errors.New("backfilling IPs: ip allocator was nil") + } + + log.Trace().Msgf("starting to backfill IPs") + + nodes, err := ListNodes(tx) + if err != nil { + return fmt.Errorf("listing nodes to backfill IPs: %w", err) + } + + for _, node := range nodes { + log.Trace().Uint64("node.id", node.ID.Uint64()).Msg("checking if need backfill") + + changed := false + // IPv4 prefix is set, but node ip is missing, alloc + if i.prefix4 != nil && node.IPv4 == nil { + ret4, err := i.nextLocked(i.prev4, i.prefix4) + if err != nil { + return fmt.Errorf("failed to allocate ipv4 for node(%d): %w", node.ID, err) + } + + node.IPv4 = ret4 + changed = true + ret = append(ret, fmt.Sprintf("assigned IPv4 %q to Node(%d) %q", ret4.String(), node.ID, node.Hostname)) + } + + // IPv6 prefix is set, but node ip is missing, alloc + if i.prefix6 != nil && node.IPv6 == nil { + ret6, err := i.nextLocked(i.prev6, i.prefix6) + if err != nil { + return fmt.Errorf("failed to allocate ipv6 for node(%d): %w", node.ID, err) + } + + node.IPv6 = ret6 + changed = true + ret = append(ret, fmt.Sprintf("assigned IPv6 %q to Node(%d) %q", ret6.String(), node.ID, node.Hostname)) + } + + // IPv4 prefix is not set, but node has IP, remove + if i.prefix4 == nil && node.IPv4 != nil { + ret = append(ret, fmt.Sprintf("removing IPv4 %q from Node(%d) %q", node.IPv4.String(), node.ID, node.Hostname)) + node.IPv4 = nil + changed = true + } + + // IPv6 prefix is not set, but node has IP, remove + if i.prefix6 == nil && node.IPv6 != nil { + ret = append(ret, fmt.Sprintf("removing IPv6 %q from Node(%d) %q", node.IPv6.String(), node.ID, node.Hostname)) + node.IPv6 = nil + changed = true + } + + if changed { + err := tx.Save(node).Error + if err != nil { + return fmt.Errorf("saving node(%d) after adding IPs: %w", node.ID, err) + } + } + } + + return nil + }) + + return ret, err +} diff --git a/hscontrol/db/ip_test.go b/hscontrol/db/ip_test.go index 17f39c81..c922fcdf 100644 --- a/hscontrol/db/ip_test.go +++ b/hscontrol/db/ip_test.go @@ -1,49 +1,41 @@ package db import ( + "database/sql" + "fmt" "net/netip" - "os" + "strings" "testing" "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" ) -func TestIPAllocator(t *testing.T) { - mpp := func(pref string) netip.Prefix { - return netip.MustParsePrefix(pref) - } - na := func(pref string) netip.Addr { - return netip.MustParseAddr(pref) - } - newDb := func() *HSDatabase { - tmpDir, err := os.MkdirTemp("", "headscale-db-test-*") - if err != nil { - t.Fatalf("creating temp dir: %s", err) - } - db, _ = NewHeadscaleDatabase( - types.DatabaseConfig{ - Type: "sqlite3", - Sqlite: types.SqliteConfig{ - Path: tmpDir + "/headscale_test.db", - }, - }, - "", - ) - - return db - } +var mpp = func(pref string) *netip.Prefix { + p := netip.MustParsePrefix(pref) + return &p +} +var na = func(pref string) netip.Addr { + return netip.MustParseAddr(pref) +} +var nap = func(pref string) *netip.Addr { + n := na(pref) + return &n +} +func TestIPAllocatorSequential(t *testing.T) { tests := []struct { name string dbFunc func() *HSDatabase - prefix4 netip.Prefix - prefix6 netip.Prefix + prefix4 *netip.Prefix + prefix6 *netip.Prefix getCount int - want []types.NodeAddresses + want4 []netip.Addr + want6 []netip.Addr }{ { name: "simple", @@ -56,23 +48,52 @@ func TestIPAllocator(t *testing.T) { getCount: 1, - want: []types.NodeAddresses{ - { - na("100.64.0.1"), - na("fd7a:115c:a1e0::1"), - }, + want4: []netip.Addr{ + na("100.64.0.1"), + }, + want6: []netip.Addr{ + na("fd7a:115c:a1e0::1"), + }, + }, + { + name: "simple-v4", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + + getCount: 1, + + want4: []netip.Addr{ + na("100.64.0.1"), + }, + }, + { + name: "simple-v6", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1, + + want6: []netip.Addr{ + na("fd7a:115c:a1e0::1"), }, }, { name: "simple-with-db", dbFunc: func() *HSDatabase { - db := newDb() + db := dbForTest(t, "simple-with-db") + user := types.User{Name: ""} + db.DB.Save(&user) db.DB.Save(&types.Node{ - IPAddresses: types.NodeAddresses{ - na("100.64.0.1"), - na("fd7a:115c:a1e0::1"), - }, + User: user, + IPv4: nap("100.64.0.1"), + IPv6: nap("fd7a:115c:a1e0::1"), }) return db @@ -83,23 +104,24 @@ func TestIPAllocator(t *testing.T) { getCount: 1, - want: []types.NodeAddresses{ - { - na("100.64.0.2"), - na("fd7a:115c:a1e0::2"), - }, + want4: []netip.Addr{ + na("100.64.0.2"), + }, + want6: []netip.Addr{ + na("fd7a:115c:a1e0::2"), }, }, { name: "before-after-free-middle-in-db", dbFunc: func() *HSDatabase { - db := newDb() + db := dbForTest(t, "before-after-free-middle-in-db") + user := types.User{Name: ""} + db.DB.Save(&user) db.DB.Save(&types.Node{ - IPAddresses: types.NodeAddresses{ - na("100.64.0.2"), - na("fd7a:115c:a1e0::2"), - }, + User: user, + IPv4: nap("100.64.0.2"), + IPv6: nap("fd7a:115c:a1e0::2"), }) return db @@ -110,15 +132,13 @@ func TestIPAllocator(t *testing.T) { getCount: 2, - want: []types.NodeAddresses{ - { - na("100.64.0.1"), - na("fd7a:115c:a1e0::1"), - }, - { - na("100.64.0.3"), - na("fd7a:115c:a1e0::3"), - }, + want4: []netip.Addr{ + na("100.64.0.1"), + na("100.64.0.3"), + }, + want6: []netip.Addr{ + na("fd7a:115c:a1e0::1"), + na("fd7a:115c:a1e0::3"), }, }, } @@ -127,24 +147,367 @@ func TestIPAllocator(t *testing.T) { t.Run(tt.name, func(t *testing.T) { db := tt.dbFunc() - alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6) + alloc, _ := NewIPAllocator( + db, + tt.prefix4, + tt.prefix6, + types.IPAllocationStrategySequential, + ) spew.Dump(alloc) - t.Logf("prefixes: %q, %q", tt.prefix4.String(), tt.prefix6.String()) - - var got []types.NodeAddresses + var got4s []netip.Addr + var got6s []netip.Addr for range tt.getCount { - gotSet, err := alloc.Next() + got4, got6, err := alloc.Next() if err != nil { t.Fatalf("allocating next IP: %s", err) } - got = append(got, gotSet) + if got4 != nil { + got4s = append(got4s, *got4) + } + + if got6 != nil { + got6s = append(got6s, *got6) + } } - if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { - t.Errorf("IPAllocator unexpected result (-want +got):\n%s", diff) + if diff := cmp.Diff(tt.want4, got4s, util.Comparers...); diff != "" { + t.Errorf("IPAllocator 4s unexpected result (-want +got):\n%s", diff) + } + + if diff := cmp.Diff(tt.want6, got6s, util.Comparers...); diff != "" { + t.Errorf("IPAllocator 6s unexpected result (-want +got):\n%s", diff) + } + }) + } +} + +func TestIPAllocatorRandom(t *testing.T) { + tests := []struct { + name string + dbFunc func() *HSDatabase + + getCount int + + prefix4 *netip.Prefix + prefix6 *netip.Prefix + want4 bool + want6 bool + }{ + { + name: "simple", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1, + + want4: true, + want6: true, + }, + { + name: "simple-v4", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + + getCount: 1, + + want4: true, + want6: false, + }, + { + name: "simple-v6", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1, + + want4: false, + want6: true, + }, + { + name: "generate-lots-of-random", + dbFunc: func() *HSDatabase { + return nil + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + getCount: 1000, + + want4: true, + want6: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := tt.dbFunc() + + alloc, _ := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategyRandom) + + spew.Dump(alloc) + + for range tt.getCount { + got4, got6, err := alloc.Next() + if err != nil { + t.Fatalf("allocating next IP: %s", err) + } + + t.Logf("addrs ipv4: %v, ipv6: %v", got4, got6) + + if tt.want4 { + if got4 == nil { + t.Fatalf("expected ipv4 addr, got nil") + } + } + + if tt.want6 { + if got6 == nil { + t.Fatalf("expected ipv4 addr, got nil") + } + } + } + }) + } +} + +func TestBackfillIPAddresses(t *testing.T) { + fullNodeP := func(i int) *types.Node { + v4 := fmt.Sprintf("100.64.0.%d", i) + v6 := fmt.Sprintf("fd7a:115c:a1e0::%d", i) + return &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: v4, + }, + IPv4: nap(v4), + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: v6, + }, + IPv6: nap(v6), + } + } + tests := []struct { + name string + dbFunc func() *HSDatabase + + prefix4 *netip.Prefix + prefix6 *netip.Prefix + want types.Nodes + }{ + { + name: "simple-backfill-ipv6", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-ipv6") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: "100.64.0.1", + }, + IPv4: nap("100.64.0.1"), + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: "fd7a:115c:a1e0::1", + }, + IPv6: nap("fd7a:115c:a1e0::1"), + }, + }, + }, + { + name: "simple-backfill-ipv4", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-ipv4") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv6: nap("fd7a:115c:a1e0::1"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: "100.64.0.1", + }, + IPv4: nap("100.64.0.1"), + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: "fd7a:115c:a1e0::1", + }, + IPv6: nap("fd7a:115c:a1e0::1"), + }, + }, + }, + { + name: "simple-backfill-remove-ipv6", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-remove-ipv6") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + IPv6: nap("fd7a:115c:a1e0::1"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + + want: types.Nodes{ + &types.Node{ + IPv4DatabaseField: sql.NullString{ + Valid: true, + String: "100.64.0.1", + }, + IPv4: nap("100.64.0.1"), + }, + }, + }, + { + name: "simple-backfill-remove-ipv4", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-remove-ipv4") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + IPv6: nap("fd7a:115c:a1e0::1"), + }) + + return db + }, + + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + &types.Node{ + IPv6DatabaseField: sql.NullString{ + Valid: true, + String: "fd7a:115c:a1e0::1", + }, + IPv6: nap("fd7a:115c:a1e0::1"), + }, + }, + }, + { + name: "multi-backfill-ipv6", + dbFunc: func() *HSDatabase { + db := dbForTest(t, "simple-backfill-ipv6") + user := types.User{Name: ""} + db.DB.Save(&user) + + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.1"), + }) + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.2"), + }) + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.3"), + }) + db.DB.Save(&types.Node{ + User: user, + IPv4: nap("100.64.0.4"), + }) + + return db + }, + + prefix4: mpp("100.64.0.0/10"), + prefix6: mpp("fd7a:115c:a1e0::/48"), + + want: types.Nodes{ + fullNodeP(1), + fullNodeP(2), + fullNodeP(3), + fullNodeP(4), + }, + }, + } + + comps := append(util.Comparers, cmpopts.IgnoreFields(types.Node{}, + "ID", + "MachineKeyDatabaseField", + "NodeKeyDatabaseField", + "DiscoKeyDatabaseField", + "User", + "UserID", + "Endpoints", + "HostinfoDatabaseField", + "Hostinfo", + "Routes", + "CreatedAt", + "UpdatedAt", + )) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := tt.dbFunc() + + alloc, err := NewIPAllocator(db, tt.prefix4, tt.prefix6, types.IPAllocationStrategySequential) + if err != nil { + t.Fatalf("failed to set up ip alloc: %s", err) + } + + logs, err := db.BackfillNodeIPs(alloc) + if err != nil { + t.Fatalf("failed to backfill: %s", err) + } + + t.Logf("backfill log: \n%s", strings.Join(logs, "\n")) + + got, err := db.ListNodes() + if err != nil { + t.Fatalf("failed to get nodes: %s", err) + } + + if diff := cmp.Diff(tt.want, got, comps...); diff != "" { + t.Errorf("Backfill unexpected result (-want +got):\n%s", diff) } }) } diff --git a/hscontrol/db/node.go b/hscontrol/db/node.go index d02c2d39..c675dc7c 100644 --- a/hscontrol/db/node.go +++ b/hscontrol/db/node.go @@ -5,12 +5,12 @@ import ( "fmt" "net/netip" "sort" - "strings" "time" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/patrickmn/go-cache" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog/log" "gorm.io/gorm" "tailscale.com/tailcfg" @@ -34,27 +34,22 @@ var ( ) ) -func (hsdb *HSDatabase) ListPeers(node *types.Node) (types.Nodes, error) { +func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID) (types.Nodes, error) { return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) { - return ListPeers(rx, node) + return ListPeers(rx, nodeID) }) } // ListPeers returns all peers of node, regardless of any Policy or if the node is expired. -func ListPeers(tx *gorm.DB, node *types.Node) (types.Nodes, error) { - log.Trace(). - Caller(). - Str("node", node.Hostname). - Msg("Finding direct peers") - +func ListPeers(tx *gorm.DB, nodeID types.NodeID) (types.Nodes, error) { nodes := types.Nodes{} if err := tx. Preload("AuthKey"). Preload("AuthKey.User"). Preload("User"). Preload("Routes"). - Where("node_key <> ?", - node.NodeKey.String()).Find(&nodes).Error; err != nil { + Where("id <> ?", + nodeID).Find(&nodes).Error; err != nil { return types.Nodes{}, err } @@ -119,14 +114,14 @@ func getNode(tx *gorm.DB, user string, name string) (*types.Node, error) { return nil, ErrNodeNotFound } -func (hsdb *HSDatabase) GetNodeByID(id uint64) (*types.Node, error) { +func (hsdb *HSDatabase) GetNodeByID(id types.NodeID) (*types.Node, error) { return Read(hsdb.DB, func(rx *gorm.DB) (*types.Node, error) { return GetNodeByID(rx, id) }) } // GetNodeByID finds a Node by ID and returns the Node struct. -func GetNodeByID(tx *gorm.DB, id uint64) (*types.Node, error) { +func GetNodeByID(tx *gorm.DB, id types.NodeID) (*types.Node, error) { mach := types.Node{} if result := tx. Preload("AuthKey"). @@ -197,7 +192,7 @@ func GetNodeByAnyKey( } func (hsdb *HSDatabase) SetTags( - nodeID uint64, + nodeID types.NodeID, tags []string, ) error { return hsdb.Write(func(tx *gorm.DB) error { @@ -208,10 +203,15 @@ func (hsdb *HSDatabase) SetTags( // SetTags takes a Node struct pointer and update the forced tags. func SetTags( tx *gorm.DB, - nodeID uint64, + nodeID types.NodeID, tags []string, ) error { if len(tags) == 0 { + // if no tags are provided, we remove all forced tags + if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("forced_tags", types.StringList{}).Error; err != nil { + return fmt.Errorf("failed to remove tags for node in the database: %w", err) + } + return nil } @@ -238,15 +238,7 @@ func RenameNode(tx *gorm.DB, newName, ) if err != nil { - log.Error(). - Caller(). - Str("func", "RenameNode"). - Uint64("nodeID", nodeID). - Str("newName", newName). - Err(err). - Msg("failed to rename node") - - return err + return fmt.Errorf("renaming node: %w", err) } if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("given_name", newName).Error; err != nil { @@ -256,7 +248,7 @@ func RenameNode(tx *gorm.DB, return nil } -func (hsdb *HSDatabase) NodeSetExpiry(nodeID uint64, expiry time.Time) error { +func (hsdb *HSDatabase) NodeSetExpiry(nodeID types.NodeID, expiry time.Time) error { return hsdb.Write(func(tx *gorm.DB) error { return NodeSetExpiry(tx, nodeID, expiry) }) @@ -264,14 +256,14 @@ func (hsdb *HSDatabase) NodeSetExpiry(nodeID uint64, expiry time.Time) error { // NodeSetExpiry takes a Node struct and a new expiry time. func NodeSetExpiry(tx *gorm.DB, - nodeID uint64, expiry time.Time, + nodeID types.NodeID, expiry time.Time, ) error { return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("expiry", expiry).Error } -func (hsdb *HSDatabase) DeleteNode(node *types.Node, isConnected map[key.MachinePublic]bool) error { - return hsdb.Write(func(tx *gorm.DB) error { - return DeleteNode(tx, node, isConnected) +func (hsdb *HSDatabase) DeleteNode(node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) { + return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return DeleteNode(tx, node, isLikelyConnected) }) } @@ -279,24 +271,24 @@ func (hsdb *HSDatabase) DeleteNode(node *types.Node, isConnected map[key.Machine // Caller is responsible for notifying all of change. func DeleteNode(tx *gorm.DB, node *types.Node, - isConnected map[key.MachinePublic]bool, -) error { - err := deleteNodeRoutes(tx, node, map[key.MachinePublic]bool{}) + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { + changed, err := deleteNodeRoutes(tx, node, isLikelyConnected) if err != nil { - return err + return changed, err } // Unscoped causes the node to be fully removed from the database. - if err := tx.Unscoped().Delete(&node).Error; err != nil { - return err + if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil { + return changed, err } - return nil + return changed, nil } -// UpdateLastSeen sets a node's last seen field indicating that we +// SetLastSeen sets a node's last seen field indicating that we // have recently communicating with this node. -func UpdateLastSeen(tx *gorm.DB, nodeID uint64, lastSeen time.Time) error { +func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error { return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("last_seen", lastSeen).Error } @@ -307,7 +299,8 @@ func RegisterNodeFromAuthCallback( userName string, nodeExpiry *time.Time, registrationMethod string, - addrs types.NodeAddresses, + ipv4 *netip.Addr, + ipv6 *netip.Addr, ) (*types.Node, error) { log.Debug(). Str("machine_key", mkey.ShortString()). @@ -343,7 +336,7 @@ func RegisterNodeFromAuthCallback( node, err := RegisterNode( tx, registrationNode, - addrs, + ipv4, ipv6, ) if err == nil { @@ -359,14 +352,14 @@ func RegisterNodeFromAuthCallback( return nil, ErrNodeNotFoundRegistrationCache } -func (hsdb *HSDatabase) RegisterNode(node types.Node, addrs types.NodeAddresses) (*types.Node, error) { +func (hsdb *HSDatabase) RegisterNode(node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) { return Write(hsdb.DB, func(tx *gorm.DB) (*types.Node, error) { - return RegisterNode(tx, node, addrs) + return RegisterNode(tx, node, ipv4, ipv6) }) } // RegisterNode is executed from the CLI to register a new Node using its MachineKey. -func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*types.Node, error) { +func RegisterNode(tx *gorm.DB, node types.Node, ipv4 *netip.Addr, ipv6 *netip.Addr) (*types.Node, error) { log.Debug(). Str("node", node.Hostname). Str("machine_key", node.MachineKey.ShortString()). @@ -374,10 +367,10 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ Str("user", node.User.Name). Msg("Registering node") - // If the node exists and we had already IPs for it, we just save it + // If the node exists and it already has IP(s), we just save it // so we store the node.Expire and node.Nodekey that has been set when // adding it to the registrationCache - if len(node.IPAddresses) > 0 { + if node.IPv4 != nil || node.IPv6 != nil { if err := tx.Save(&node).Error; err != nil { return nil, fmt.Errorf("failed register existing node in the database: %w", err) } @@ -393,7 +386,8 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ return &node, nil } - node.IPAddresses = addrs + node.IPv4 = ipv4 + node.IPv6 = ipv6 if err := tx.Save(&node).Error; err != nil { return nil, fmt.Errorf("failed register(save) node in the database: %w", err) @@ -402,7 +396,6 @@ func RegisterNode(tx *gorm.DB, node types.Node, addrs types.NodeAddresses) (*typ log.Trace(). Caller(). Str("node", node.Hostname). - Str("ip", strings.Join(addrs.StringSlice(), ",")). Msg("Node registered with the database") return &node, nil @@ -456,13 +449,7 @@ func GetAdvertisedRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) Preload("Node"). Where("node_id = ? AND advertised = ?", node.ID, true).Find(&routes).Error if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error(). - Caller(). - Err(err). - Str("node", node.Hostname). - Msg("Could not get advertised routes for node") - - return nil, err + return nil, fmt.Errorf("getting advertised routes for node(%d): %w", node.ID, err) } prefixes := []netip.Prefix{} @@ -488,13 +475,7 @@ func GetEnabledRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) { Where("node_id = ? AND advertised = ? AND enabled = ?", node.ID, true, true). Find(&routes).Error if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error(). - Caller(). - Err(err). - Str("node", node.Hostname). - Msg("Could not get enabled routes for node") - - return nil, err + return nil, fmt.Errorf("getting enabled routes for node(%d): %w", node.ID, err) } prefixes := []netip.Prefix{} @@ -513,8 +494,6 @@ func IsRoutesEnabled(tx *gorm.DB, node *types.Node, routeStr string) bool { enabledRoutes, err := GetEnabledRoutes(tx, node) if err != nil { - log.Error().Err(err).Msg("Could not get enabled routes") - return false } @@ -606,7 +585,7 @@ func enableRoutes(tx *gorm.DB, return &types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, + ChangeNodes: []types.NodeID{node.ID}, Message: "created in db.enableRoutes", }, nil } @@ -681,59 +660,49 @@ func GenerateGivenName( return givenName, nil } -func ExpireEphemeralNodes(tx *gorm.DB, - inactivityThreshhold time.Duration, -) (types.StateUpdate, bool) { +func DeleteExpiredEphemeralNodes(tx *gorm.DB, + inactivityThreshold time.Duration, +) ([]types.NodeID, []types.NodeID) { users, err := ListUsers(tx) if err != nil { - log.Error().Err(err).Msg("Error listing users") - - return types.StateUpdate{}, false + return nil, nil } - expired := make([]tailcfg.NodeID, 0) + var expired []types.NodeID + var changedNodes []types.NodeID for _, user := range users { nodes, err := ListNodesByUser(tx, user.Name) if err != nil { - log.Error(). - Err(err). - Str("user", user.Name). - Msg("Error listing nodes in user") - - return types.StateUpdate{}, false + return nil, nil } for idx, node := range nodes { if node.IsEphemeral() && node.LastSeen != nil && time.Now(). - After(node.LastSeen.Add(inactivityThreshhold)) { - expired = append(expired, tailcfg.NodeID(node.ID)) + After(node.LastSeen.Add(inactivityThreshold)) { + expired = append(expired, node.ID) log.Info(). Str("node", node.Hostname). Msg("Ephemeral client removed from database") // empty isConnected map as ephemeral nodes are not routes - err = DeleteNode(tx, nodes[idx], map[key.MachinePublic]bool{}) + changed, err := DeleteNode(tx, nodes[idx], nil) if err != nil { log.Error(). Err(err). Str("node", node.Hostname). Msg("🤮 Cannot delete ephemeral node from the database") } + + changedNodes = append(changedNodes, changed...) } } // TODO(kradalby): needs to be moved out of transaction } - if len(expired) > 0 { - return types.StateUpdate{ - Type: types.StatePeerRemoved, - Removed: expired, - }, true - } - return types.StateUpdate{}, false + return expired, changedNodes } func ExpireExpiredNodes(tx *gorm.DB, @@ -748,41 +717,14 @@ func ExpireExpiredNodes(tx *gorm.DB, nodes, err := ListNodes(tx) if err != nil { - log.Error(). - Err(err). - Msg("Error listing nodes to find expired nodes") - return time.Unix(0, 0), types.StateUpdate{}, false } - for index, node := range nodes { - if node.IsExpired() && - // TODO(kradalby): Replace this, it is very spammy - // It will notify about all nodes that has been expired. - // It should only notify about expired nodes since _last check_. - node.Expiry.After(lastCheck) { + for _, node := range nodes { + if node.IsExpired() && node.Expiry.After(lastCheck) { expired = append(expired, &tailcfg.PeerChange{ NodeID: tailcfg.NodeID(node.ID), KeyExpiry: node.Expiry, }) - - now := time.Now() - // Do not use setNodeExpiry as that has a notifier hook, which - // can cause a deadlock, we are updating all changed nodes later - // and there is no point in notifiying twice. - if err := tx.Model(&nodes[index]).Updates(types.Node{ - Expiry: &now, - }).Error; err != nil { - log.Error(). - Err(err). - Str("node", node.Hostname). - Str("name", node.GivenName). - Msg("🤮 Cannot expire node") - } else { - log.Info(). - Str("node", node.Hostname). - Str("name", node.GivenName). - Msg("Node successfully expired") - } } } diff --git a/hscontrol/db/node_test.go b/hscontrol/db/node_test.go index 5e8eb294..e95ee4ae 100644 --- a/hscontrol/db/node_test.go +++ b/hscontrol/db/node_test.go @@ -11,6 +11,7 @@ import ( "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" + "github.com/puzpuzpuz/xsync/v3" "gopkg.in/check.v1" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -28,6 +29,7 @@ func (s *Suite) TestGetNode(c *check.C) { nodeKey := key.NewNode() machineKey := key.NewMachine() + pakID := uint(pak.ID) node := &types.Node{ ID: 0, @@ -36,9 +38,10 @@ func (s *Suite) TestGetNode(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(node) + trx := db.DB.Save(node) + c.Assert(trx.Error, check.IsNil) _, err = db.getNode("test", "testnode") c.Assert(err, check.IsNil) @@ -57,6 +60,7 @@ func (s *Suite) TestGetNodeByID(c *check.C) { nodeKey := key.NewNode() machineKey := key.NewMachine() + pakID := uint(pak.ID) node := types.Node{ ID: 0, MachineKey: machineKey.Public(), @@ -64,9 +68,10 @@ func (s *Suite) TestGetNodeByID(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) _, err = db.GetNodeByID(0) c.Assert(err, check.IsNil) @@ -87,6 +92,7 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) { machineKey := key.NewMachine() + pakID := uint(pak.ID) node := types.Node{ ID: 0, MachineKey: machineKey.Public(), @@ -94,9 +100,10 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) _, err = db.GetNodeByAnyKey(machineKey.Public(), nodeKey.Public(), oldNodeKey.Public()) c.Assert(err, check.IsNil) @@ -116,11 +123,11 @@ func (s *Suite) TestHardDeleteNode(c *check.C) { Hostname: "testnode3", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(1), } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) - err = db.DeleteNode(&node, map[key.MachinePublic]bool{}) + _, err = db.DeleteNode(&node, xsync.NewMapOf[types.NodeID, bool]()) c.Assert(err, check.IsNil) _, err = db.getNode(user.Name, "testnode3") @@ -137,26 +144,28 @@ func (s *Suite) TestListPeers(c *check.C) { _, err = db.GetNodeByID(0) c.Assert(err, check.NotNil) + pakID := uint(pak.ID) for index := 0; index <= 10; index++ { nodeKey := key.NewNode() machineKey := key.NewMachine() node := types.Node{ - ID: uint64(index), + ID: types.NodeID(index), MachineKey: machineKey.Public(), NodeKey: nodeKey.Public(), Hostname: "testnode" + strconv.Itoa(index), UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) } node0ByID, err := db.GetNodeByID(0) c.Assert(err, check.IsNil) - peersOfNode0, err := db.ListPeers(node0ByID) + peersOfNode0, err := db.ListPeers(node0ByID.ID) c.Assert(err, check.IsNil) c.Assert(len(peersOfNode0), check.Equals, 9) @@ -187,20 +196,21 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) { for index := 0; index <= 10; index++ { nodeKey := key.NewNode() machineKey := key.NewMachine() + pakID := uint(stor[index%2].key.ID) + v4 := netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))) node := types.Node{ - ID: uint64(index), - MachineKey: machineKey.Public(), - NodeKey: nodeKey.Public(), - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))), - }, + ID: types.NodeID(index), + MachineKey: machineKey.Public(), + NodeKey: nodeKey.Public(), + IPv4: &v4, Hostname: "testnode" + strconv.Itoa(index), UserID: stor[index%2].user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(stor[index%2].key.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) } aclPolicy := &policy.ACLPolicy{ @@ -232,16 +242,16 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) { c.Logf("Node(%v), user: %v", testNode.Hostname, testNode.User) c.Assert(err, check.IsNil) - adminPeers, err := db.ListPeers(adminNode) + adminPeers, err := db.ListPeers(adminNode.ID) c.Assert(err, check.IsNil) - testPeers, err := db.ListPeers(testNode) + testPeers, err := db.ListPeers(testNode.ID) c.Assert(err, check.IsNil) - adminRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, adminNode, adminPeers) + adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers) c.Assert(err, check.IsNil) - testRules, _, err := policy.GenerateFilterAndSSHRules(aclPolicy, testNode, testPeers) + testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers) c.Assert(err, check.IsNil) peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules) @@ -272,6 +282,7 @@ func (s *Suite) TestExpireNode(c *check.C) { nodeKey := key.NewNode() machineKey := key.NewMachine() + pakID := uint(pak.ID) node := &types.Node{ ID: 0, @@ -280,7 +291,7 @@ func (s *Suite) TestExpireNode(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Expiry: &time.Time{}, } db.DB.Save(node) @@ -301,27 +312,6 @@ func (s *Suite) TestExpireNode(c *check.C) { c.Assert(nodeFromDB.IsExpired(), check.Equals, true) } -func (s *Suite) TestSerdeAddressStrignSlice(c *check.C) { - input := types.NodeAddresses([]netip.Addr{ - netip.MustParseAddr("192.0.2.1"), - netip.MustParseAddr("2001:db8::1"), - }) - serialized, err := input.Value() - c.Assert(err, check.IsNil) - if serial, ok := serialized.(string); ok { - c.Assert(serial, check.Equals, "192.0.2.1,2001:db8::1") - } - - var deserialized types.NodeAddresses - err = deserialized.Scan(serialized) - c.Assert(err, check.IsNil) - - c.Assert(len(deserialized), check.Equals, len(input)) - for i := range deserialized { - c.Assert(deserialized[i], check.Equals, input[i]) - } -} - func (s *Suite) TestGenerateGivenName(c *check.C) { user1, err := db.CreateUser("user-1") c.Assert(err, check.IsNil) @@ -337,6 +327,7 @@ func (s *Suite) TestGenerateGivenName(c *check.C) { machineKey2 := key.NewMachine() + pakID := uint(pak.ID) node := &types.Node{ ID: 0, MachineKey: machineKey.Public(), @@ -345,9 +336,11 @@ func (s *Suite) TestGenerateGivenName(c *check.C) { GivenName: "hostname-1", UserID: user1.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(node) + + trx := db.DB.Save(node) + c.Assert(trx.Error, check.IsNil) givenName, err := db.GenerateGivenName(machineKey2.Public(), "hostname-2") comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict") @@ -378,6 +371,7 @@ func (s *Suite) TestSetTags(c *check.C) { nodeKey := key.NewNode() machineKey := key.NewMachine() + pakID := uint(pak.ID) node := &types.Node{ ID: 0, MachineKey: machineKey.Public(), @@ -385,9 +379,11 @@ func (s *Suite) TestSetTags(c *check.C) { Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(node) + + trx := db.DB.Save(node) + c.Assert(trx.Error, check.IsNil) // assign simple tags sTags := []string{"tag:test", "tag:foo"} @@ -397,7 +393,7 @@ func (s *Suite) TestSetTags(c *check.C) { c.Assert(err, check.IsNil) c.Assert(node.ForcedTags, check.DeepEquals, types.StringList(sTags)) - // assign duplicat tags, expect no errors but no doubles in DB + // assign duplicate tags, expect no errors but no doubles in DB eTags := []string{"tag:bar", "tag:test", "tag:unknown", "tag:test"} err = db.SetTags(node.ID, eTags) c.Assert(err, check.IsNil) @@ -408,6 +404,13 @@ func (s *Suite) TestSetTags(c *check.C) { check.DeepEquals, types.StringList([]string{"tag:bar", "tag:test", "tag:unknown"}), ) + + // test removing tags + err = db.SetTags(node.ID, []string{}) + c.Assert(err, check.IsNil) + node, err = db.getNode("test", "testnode") + c.Assert(err, check.IsNil) + c.Assert(node.ForcedTags, check.DeepEquals, types.StringList([]string{})) } func TestHeadscale_generateGivenName(t *testing.T) { @@ -561,6 +564,8 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) { // Check if a subprefix of an autoapproved route is approved route2 := netip.MustParsePrefix("10.11.0.0/24") + v4 := netip.MustParseAddr("100.64.0.1") + pakID := uint(pak.ID) node := types.Node{ ID: 0, MachineKey: machineKey.Public(), @@ -568,15 +573,16 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) { Hostname: "test", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Hostinfo: &tailcfg.Hostinfo{ RequestTags: []string{"tag:exit"}, RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2}, }, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, + IPv4: &v4, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node) c.Assert(err, check.IsNil) @@ -586,7 +592,7 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) { c.Assert(err, check.IsNil) // TODO(kradalby): Check state update - _, err = db.EnableAutoApprovedRoutes(pol, node0ByID) + err = db.EnableAutoApprovedRoutes(pol, node0ByID) c.Assert(err, check.IsNil) enabledRoutes, err := db.GetEnabledRoutes(node0ByID) diff --git a/hscontrol/db/preauth_keys.go b/hscontrol/db/preauth_keys.go index d1d94bbe..adfd289a 100644 --- a/hscontrol/db/preauth_keys.go +++ b/hscontrol/db/preauth_keys.go @@ -83,7 +83,7 @@ func CreatePreAuthKey( if !seenTags[tag] { if err := tx.Save(&types.PreAuthKeyACLTag{PreAuthKeyID: key.ID, Tag: tag}).Error; err != nil { return nil, fmt.Errorf( - "failed to ceate key tag in the database: %w", + "failed to create key tag in the database: %w", err, ) } @@ -92,10 +92,6 @@ func CreatePreAuthKey( } } - if err != nil { - return nil, err - } - return &key, nil } @@ -201,9 +197,10 @@ func ValidatePreAuthKey(tx *gorm.DB, k string) (*types.PreAuthKey, error) { } nodes := types.Nodes{} + pakID := uint(pak.ID) if err := tx. Preload("AuthKey"). - Where(&types.Node{AuthKeyID: uint(pak.ID)}). + Where(&types.Node{AuthKeyID: &pakID}). Find(&nodes).Error; err != nil { return nil, err } diff --git a/hscontrol/db/preauth_keys_test.go b/hscontrol/db/preauth_keys_test.go index 53cf37c4..9cdcba80 100644 --- a/hscontrol/db/preauth_keys_test.go +++ b/hscontrol/db/preauth_keys_test.go @@ -76,14 +76,16 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) { pak, err := db.CreatePreAuthKey(user.Name, false, false, nil, nil) c.Assert(err, check.IsNil) + pakID := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "testest", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) key, err := db.ValidatePreAuthKey(pak.Key) c.Assert(err, check.Equals, ErrSingleUseAuthKeyHasBeenUsed) @@ -97,14 +99,16 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) { pak, err := db.CreatePreAuthKey(user.Name, true, false, nil, nil) c.Assert(err, check.IsNil) + pakID := uint(pak.ID) node := types.Node{ ID: 1, Hostname: "testest", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) key, err := db.ValidatePreAuthKey(pak.Key) c.Assert(err, check.IsNil) @@ -131,15 +135,17 @@ func (*Suite) TestEphemeralKeyReusable(c *check.C) { c.Assert(err, check.IsNil) now := time.Now().Add(-time.Second * 30) + pakID := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "testest", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, LastSeen: &now, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) _, err = db.ValidatePreAuthKey(pak.Key) c.Assert(err, check.IsNil) @@ -147,8 +153,8 @@ func (*Suite) TestEphemeralKeyReusable(c *check.C) { _, err = db.getNode("test7", "testest") c.Assert(err, check.IsNil) - db.DB.Transaction(func(tx *gorm.DB) error { - ExpireEphemeralNodes(tx, time.Second*20) + db.Write(func(tx *gorm.DB) error { + DeleteExpiredEphemeralNodes(tx, time.Second*20) return nil }) @@ -165,13 +171,14 @@ func (*Suite) TestEphemeralKeyNotReusable(c *check.C) { c.Assert(err, check.IsNil) now := time.Now().Add(-time.Second * 30) + pakId := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "testest", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, LastSeen: &now, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakId, } db.DB.Save(&node) @@ -181,8 +188,8 @@ func (*Suite) TestEphemeralKeyNotReusable(c *check.C) { _, err = db.getNode("test7", "testest") c.Assert(err, check.IsNil) - db.DB.Transaction(func(tx *gorm.DB) error { - ExpireEphemeralNodes(tx, time.Second*20) + db.Write(func(tx *gorm.DB) error { + DeleteExpiredEphemeralNodes(tx, time.Second*20) return nil }) diff --git a/hscontrol/db/routes.go b/hscontrol/db/routes.go index 1ee144a7..74b2b4b7 100644 --- a/hscontrol/db/routes.go +++ b/hscontrol/db/routes.go @@ -2,13 +2,16 @@ package db import ( "errors" + "fmt" "net/netip" + "sort" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog/log" "gorm.io/gorm" - "tailscale.com/types/key" + "tailscale.com/util/set" ) var ErrRouteIsNotAvailable = errors.New("route is not available") @@ -124,8 +127,8 @@ func EnableRoute(tx *gorm.DB, id uint64) (*types.StateUpdate, error) { func DisableRoute(tx *gorm.DB, id uint64, - isConnected map[key.MachinePublic]bool, -) (*types.StateUpdate, error) { + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { route, err := GetRoute(tx, id) if err != nil { return nil, err @@ -137,16 +140,15 @@ func DisableRoute(tx *gorm.DB, // Tailscale requires both IPv4 and IPv6 exit routes to // be enabled at the same time, as per // https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002 - var update *types.StateUpdate + var update []types.NodeID if !route.IsExitRoute() { - update, err = failoverRouteReturnUpdate(tx, isConnected, route) + route.Enabled = false + err = tx.Save(route).Error if err != nil { return nil, err } - route.Enabled = false - route.IsPrimary = false - err = tx.Save(route).Error + update, err = failoverRouteTx(tx, isLikelyConnected, route) if err != nil { return nil, err } @@ -160,6 +162,7 @@ func DisableRoute(tx *gorm.DB, if routes[i].IsExitRoute() { routes[i].Enabled = false routes[i].IsPrimary = false + err = tx.Save(&routes[i]).Error if err != nil { return nil, err @@ -168,26 +171,11 @@ func DisableRoute(tx *gorm.DB, } } - if routes == nil { - routes, err = GetNodeRoutes(tx, &node) - if err != nil { - return nil, err - } - } - - node.Routes = routes - // If update is empty, it means that one was not created // by failover (as a failover was not necessary), create // one and return to the caller. if update == nil { - update = &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{ - &node, - }, - Message: "called from db.DisableRoute", - } + update = []types.NodeID{node.ID} } return update, nil @@ -195,18 +183,18 @@ func DisableRoute(tx *gorm.DB, func (hsdb *HSDatabase) DeleteRoute( id uint64, - isConnected map[key.MachinePublic]bool, -) (*types.StateUpdate, error) { - return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { - return DeleteRoute(tx, id, isConnected) + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { + return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return DeleteRoute(tx, id, isLikelyConnected) }) } func DeleteRoute( tx *gorm.DB, id uint64, - isConnected map[key.MachinePublic]bool, -) (*types.StateUpdate, error) { + isLikelyConnected *xsync.MapOf[types.NodeID, bool], +) ([]types.NodeID, error) { route, err := GetRoute(tx, id) if err != nil { return nil, err @@ -218,9 +206,9 @@ func DeleteRoute( // Tailscale requires both IPv4 and IPv6 exit routes to // be enabled at the same time, as per // https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002 - var update *types.StateUpdate + var update []types.NodeID if !route.IsExitRoute() { - update, err = failoverRouteReturnUpdate(tx, isConnected, route) + update, err = failoverRouteTx(tx, isLikelyConnected, route) if err != nil { return nil, nil } @@ -229,7 +217,7 @@ func DeleteRoute( return nil, err } } else { - routes, err := GetNodeRoutes(tx, &node) + routes, err = GetNodeRoutes(tx, &node) if err != nil { return nil, err } @@ -259,35 +247,37 @@ func DeleteRoute( node.Routes = routes if update == nil { - update = &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{ - &node, - }, - Message: "called from db.DeleteRoute", - } + update = []types.NodeID{node.ID} } return update, nil } -func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isConnected map[key.MachinePublic]bool) error { +func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) { routes, err := GetNodeRoutes(tx, node) if err != nil { - return err + return nil, fmt.Errorf("getting node routes: %w", err) } + var changed []types.NodeID for i := range routes { if err := tx.Unscoped().Delete(&routes[i]).Error; err != nil { - return err + return nil, fmt.Errorf("deleting route(%d): %w", &routes[i].ID, err) } // TODO(kradalby): This is a bit too aggressive, we could probably // figure out which routes needs to be failed over rather than all. - failoverRouteReturnUpdate(tx, isConnected, &routes[i]) + chn, err := failoverRouteTx(tx, isLikelyConnected, &routes[i]) + if err != nil { + return changed, fmt.Errorf("failing over route after delete: %w", err) + } + + if chn != nil { + changed = append(changed, chn...) + } } - return nil + return changed, nil } // isUniquePrefix returns if there is another node providing the same route already. @@ -400,7 +390,7 @@ func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) { for prefix, exists := range advertisedRoutes { if !exists { route := types.Route{ - NodeID: node.ID, + NodeID: node.ID.Uint64(), Prefix: types.IPPrefix(prefix), Advertised: true, Enabled: false, @@ -415,11 +405,12 @@ func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) { return sendUpdate, nil } -// EnsureFailoverRouteIsAvailable takes a node and checks if the node's route -// currently have a functioning host that exposes the network. -func EnsureFailoverRouteIsAvailable( +// FailoverNodeRoutesIfNeccessary takes a node and checks if the node's route +// need to be failed over to another host. +// If needed, the failover will be attempted. +func FailoverNodeRoutesIfNeccessary( tx *gorm.DB, - isConnected map[key.MachinePublic]bool, + isLikelyConnected *xsync.MapOf[types.NodeID, bool], node *types.Node, ) (*types.StateUpdate, error) { nodeRoutes, err := GetNodeRoutes(tx, node) @@ -427,82 +418,57 @@ func EnsureFailoverRouteIsAvailable( return nil, nil } - var changedNodes types.Nodes + changedNodes := make(set.Set[types.NodeID]) + +nodeRouteLoop: for _, nodeRoute := range nodeRoutes { routes, err := getRoutesByPrefix(tx, netip.Prefix(nodeRoute.Prefix)) if err != nil { - return nil, err + return nil, fmt.Errorf("getting routes by prefix: %w", err) } for _, route := range routes { if route.IsPrimary { // if we have a primary route, and the node is connected // nothing needs to be done. - if isConnected[route.Node.MachineKey] { - continue + if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val { + continue nodeRouteLoop } // if not, we need to failover the route - update, err := failoverRouteReturnUpdate(tx, isConnected, &route) - if err != nil { - return nil, err - } + failover := failoverRoute(isLikelyConnected, &route, routes) + if failover != nil { + err := failover.save(tx) + if err != nil { + return nil, fmt.Errorf("saving failover routes: %w", err) + } - if update != nil { - changedNodes = append(changedNodes, update.ChangeNodes...) + changedNodes.Add(failover.old.Node.ID) + changedNodes.Add(failover.new.Node.ID) + + continue nodeRouteLoop } } } } + chng := changedNodes.Slice() + sort.SliceStable(chng, func(i, j int) bool { + return chng[i] < chng[j] + }) + if len(changedNodes) != 0 { return &types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: changedNodes, - Message: "called from db.EnsureFailoverRouteIsAvailable", + ChangeNodes: chng, + Message: "called from db.FailoverNodeRoutesIfNeccessary", }, nil } return nil, nil } -func failoverRouteReturnUpdate( - tx *gorm.DB, - isConnected map[key.MachinePublic]bool, - r *types.Route, -) (*types.StateUpdate, error) { - changedKeys, err := failoverRoute(tx, isConnected, r) - if err != nil { - return nil, err - } - - log.Trace(). - Interface("isConnected", isConnected). - Interface("changedKeys", changedKeys). - Msg("building route failover") - - if len(changedKeys) == 0 { - return nil, nil - } - - var nodes types.Nodes - for _, key := range changedKeys { - node, err := GetNodeByMachineKey(tx, key) - if err != nil { - return nil, err - } - - nodes = append(nodes, node) - } - - return &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: nodes, - Message: "called from db.failoverRouteReturnUpdate", - }, nil -} - -// failoverRoute takes a route that is no longer available, +// failoverRouteTx takes a route that is no longer available, // this can be either from: // - being disabled // - being deleted @@ -510,11 +476,11 @@ func failoverRouteReturnUpdate( // // and tries to find a new route to take over its place. // If the given route was not primary, it returns early. -func failoverRoute( +func failoverRouteTx( tx *gorm.DB, - isConnected map[key.MachinePublic]bool, + isLikelyConnected *xsync.MapOf[types.NodeID, bool], r *types.Route, -) ([]key.MachinePublic, error) { +) ([]types.NodeID, error) { if r == nil { return nil, nil } @@ -532,14 +498,72 @@ func failoverRoute( routes, err := getRoutesByPrefix(tx, netip.Prefix(r.Prefix)) if err != nil { - return nil, err + return nil, fmt.Errorf("getting routes by prefix: %w", err) + } + + fo := failoverRoute(isLikelyConnected, r, routes) + if fo == nil { + return nil, nil + } + + err = fo.save(tx) + if err != nil { + return nil, fmt.Errorf("saving failover route: %w", err) + } + + log.Trace(). + Str("hostname", fo.new.Node.Hostname). + Msgf("set primary to new route, was: id(%d), host(%s), now: id(%d), host(%s)", fo.old.ID, fo.old.Node.Hostname, fo.new.ID, fo.new.Node.Hostname) + + // Return a list of the machinekeys of the changed nodes. + return []types.NodeID{fo.old.Node.ID, fo.new.Node.ID}, nil +} + +type failover struct { + old *types.Route + new *types.Route +} + +func (f *failover) save(tx *gorm.DB) error { + err := tx.Save(f.old).Error + if err != nil { + return fmt.Errorf("saving old primary: %w", err) + } + + err = tx.Save(f.new).Error + if err != nil { + return fmt.Errorf("saving new primary: %w", err) + } + + return nil +} + +func failoverRoute( + isLikelyConnected *xsync.MapOf[types.NodeID, bool], + routeToReplace *types.Route, + altRoutes types.Routes, + +) *failover { + if routeToReplace == nil { + return nil + } + + // This route is not a primary route, and it is not + // being served to nodes. + if !routeToReplace.IsPrimary { + return nil + } + + // We do not have to failover exit nodes + if routeToReplace.IsExitRoute() { + return nil } var newPrimary *types.Route // Find a new suitable route - for idx, route := range routes { - if r.ID == route.ID { + for idx, route := range altRoutes { + if routeToReplace.ID == route.ID { continue } @@ -547,9 +571,11 @@ func failoverRoute( continue } - if isConnected[route.Node.MachineKey] { - newPrimary = &routes[idx] - break + if isLikelyConnected != nil { + if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val { + newPrimary = &altRoutes[idx] + break + } } } @@ -559,48 +585,23 @@ func failoverRoute( // the one currently marked as primary is the // best we got. if newPrimary == nil { - return nil, nil + return nil } - log.Trace(). - Str("hostname", newPrimary.Node.Hostname). - Msg("found new primary, updating db") - - // Remove primary from the old route - r.IsPrimary = false - err = tx.Save(&r).Error - if err != nil { - log.Error().Err(err).Msg("error disabling new primary route") - - return nil, err - } - - log.Trace(). - Str("hostname", newPrimary.Node.Hostname). - Msg("removed primary from old route") - - // Set primary for the new primary + routeToReplace.IsPrimary = false newPrimary.IsPrimary = true - err = tx.Save(&newPrimary).Error - if err != nil { - log.Error().Err(err).Msg("error enabling new primary route") - return nil, err + return &failover{ + old: routeToReplace, + new: newPrimary, } - - log.Trace(). - Str("hostname", newPrimary.Node.Hostname). - Msg("set primary to new route") - - // Return a list of the machinekeys of the changed nodes. - return []key.MachinePublic{r.Node.MachineKey, newPrimary.Node.MachineKey}, nil } func (hsdb *HSDatabase) EnableAutoApprovedRoutes( aclPolicy *policy.ACLPolicy, node *types.Node, -) (*types.StateUpdate, error) { - return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { +) error { + return hsdb.Write(func(tx *gorm.DB) error { return EnableAutoApprovedRoutes(tx, aclPolicy, node) }) } @@ -610,20 +611,14 @@ func EnableAutoApprovedRoutes( tx *gorm.DB, aclPolicy *policy.ACLPolicy, node *types.Node, -) (*types.StateUpdate, error) { - if len(node.IPAddresses) == 0 { - return nil, nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs +) error { + if node.IPv4 == nil && node.IPv6 == nil { + return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs } routes, err := GetNodeAdvertisedRoutes(tx, node) if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error(). - Caller(). - Err(err). - Str("node", node.Hostname). - Msg("Could not get advertised routes for node") - - return nil, err + return fmt.Errorf("getting advertised routes for node(%s %d): %w", node.Hostname, node.ID, err) } log.Trace().Interface("routes", routes).Msg("routes for autoapproving") @@ -639,12 +634,7 @@ func EnableAutoApprovedRoutes( netip.Prefix(advertisedRoute.Prefix), ) if err != nil { - log.Err(err). - Str("advertisedRoute", advertisedRoute.String()). - Uint64("nodeId", node.ID). - Msg("Failed to resolve autoApprovers for advertised route") - - return nil, err + return fmt.Errorf("failed to resolve autoApprovers for route(%d) for node(%s %d): %w", advertisedRoute.ID, node.Hostname, node.ID, err) } log.Trace(). @@ -661,40 +651,23 @@ func EnableAutoApprovedRoutes( // TODO(kradalby): figure out how to get this to depend on less stuff approvedIps, err := aclPolicy.ExpandAlias(types.Nodes{node}, approvedAlias) if err != nil { - log.Err(err). - Str("alias", approvedAlias). - Msg("Failed to expand alias when processing autoApprovers policy") - - return nil, err + return fmt.Errorf("expanding alias %q for autoApprovers: %w", approvedAlias, err) } // approvedIPs should contain all of node's IPs if it matches the rule, so check for first - if approvedIps.Contains(node.IPAddresses[0]) { + if approvedIps.Contains(*node.IPv4) { approvedRoutes = append(approvedRoutes, advertisedRoute) } } } } - update := &types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{}, - Message: "created in db.EnableAutoApprovedRoutes", - } - for _, approvedRoute := range approvedRoutes { - perHostUpdate, err := EnableRoute(tx, uint64(approvedRoute.ID)) + _, err := EnableRoute(tx, uint64(approvedRoute.ID)) if err != nil { - log.Err(err). - Str("approvedRoute", approvedRoute.String()). - Uint64("nodeId", node.ID). - Msg("Failed to enable approved route") - - return nil, err + return fmt.Errorf("enabling approved route(%d): %w", approvedRoute.ID, err) } - - update.ChangeNodes = append(update.ChangeNodes, perHostUpdate.ChangeNodes...) } - return update, nil + return nil } diff --git a/hscontrol/db/routes_test.go b/hscontrol/db/routes_test.go index f3357e2a..8bbc5948 100644 --- a/hscontrol/db/routes_test.go +++ b/hscontrol/db/routes_test.go @@ -7,15 +7,25 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" - "github.com/stretchr/testify/assert" + "github.com/puzpuzpuz/xsync/v3" "gopkg.in/check.v1" "gorm.io/gorm" "tailscale.com/tailcfg" - "tailscale.com/types/key" ) +var smap = func(m map[types.NodeID]bool) *xsync.MapOf[types.NodeID, bool] { + s := xsync.NewMapOf[types.NodeID, bool]() + + for k, v := range m { + s.Store(k, v) + } + + return s +} + func (s *Suite) TestGetRoutes(c *check.C) { user, err := db.CreateUser("test") c.Assert(err, check.IsNil) @@ -33,15 +43,17 @@ func (s *Suite) TestGetRoutes(c *check.C) { RoutableIPs: []netip.Prefix{route}, } + pakID := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "test_get_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Hostinfo: &hostInfo, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) su, err := db.SaveNodeRoutes(&node) c.Assert(err, check.IsNil) @@ -83,15 +95,17 @@ func (s *Suite) TestGetEnableRoutes(c *check.C) { RoutableIPs: []netip.Prefix{route, route2}, } + pakID := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Hostinfo: &hostInfo, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node) c.Assert(err, check.IsNil) @@ -155,15 +169,17 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) { hostInfo1 := tailcfg.Hostinfo{ RoutableIPs: []netip.Prefix{route, route2}, } + pakID := uint(pak.ID) node1 := types.Node{ ID: 1, Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Hostinfo: &hostInfo1, } - db.DB.Save(&node1) + trx := db.DB.Save(&node1) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node1) c.Assert(err, check.IsNil) @@ -183,7 +199,7 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) { Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Hostinfo: &hostInfo2, } db.DB.Save(&node2) @@ -237,16 +253,18 @@ func (s *Suite) TestDeleteRoutes(c *check.C) { } now := time.Now() + pakID := uint(pak.ID) node1 := types.Node{ ID: 1, Hostname: "test_enable_route_node", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, Hostinfo: &hostInfo1, LastSeen: &now, } - db.DB.Save(&node1) + trx := db.DB.Save(&node1) + c.Assert(trx.Error, check.IsNil) sendUpdate, err := db.SaveNodeRoutes(&node1) c.Assert(err, check.IsNil) @@ -262,7 +280,7 @@ func (s *Suite) TestDeleteRoutes(c *check.C) { c.Assert(err, check.IsNil) // TODO(kradalby): check stateupdate - _, err = db.DeleteRoute(uint64(routes[0].ID), map[key.MachinePublic]bool{}) + _, err = db.DeleteRoute(uint64(routes[0].ID), nil) c.Assert(err, check.IsNil) enabledRoutes1, err := db.GetEnabledRoutes(&node1) @@ -271,21 +289,387 @@ func (s *Suite) TestDeleteRoutes(c *check.C) { } var ipp = func(s string) types.IPPrefix { return types.IPPrefix(netip.MustParsePrefix(s)) } +var n = func(nid types.NodeID) types.Node { + return types.Node{ID: nid} +} +var np = func(nid types.NodeID) *types.Node { + no := n(nid) + return &no +} +var r = func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) types.Route { + return types.Route{ + Model: gorm.Model{ + ID: id, + }, + Node: n(nid), + Prefix: prefix, + Enabled: enabled, + IsPrimary: primary, + } +} +var rp = func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) *types.Route { + ro := r(id, nid, prefix, enabled, primary) + return &ro +} -func TestFailoverRoute(t *testing.T) { - machineKeys := []key.MachinePublic{ - key.NewMachine().Public(), - key.NewMachine().Public(), - key.NewMachine().Public(), - key.NewMachine().Public(), +func dbForTest(t *testing.T, testName string) *HSDatabase { + t.Helper() + + tmpDir, err := os.MkdirTemp("", testName) + if err != nil { + t.Fatalf("creating tempdir: %s", err) } + dbPath := tmpDir + "/headscale_test.db" + + db, err = NewHeadscaleDatabase( + types.DatabaseConfig{ + Type: "sqlite3", + Sqlite: types.SqliteConfig{ + Path: dbPath, + }, + }, + "", + ) + if err != nil { + t.Fatalf("setting up database: %s", err) + } + + t.Logf("database set up at: %s", dbPath) + + return db +} + +func TestFailoverNodeRoutesIfNeccessary(t *testing.T) { + su := func(nids ...types.NodeID) *types.StateUpdate { + return &types.StateUpdate{ + ChangeNodes: nids, + } + } + tests := []struct { + name string + nodes types.Nodes + routes types.Routes + isConnected []map[types.NodeID]bool + want []*types.StateUpdate + wantErr bool + }{ + { + name: "n1-down-n2-down-n1-up", + nodes: types.Nodes{ + np(1), + np(2), + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + }, + // n2 goes down + { + 1: false, + 2: false, + }, + // n1 comes up + { + 1: true, + 2: false, + }, + }, + want: []*types.StateUpdate{ + // route changes from 1 -> 2 + su(1, 2), + // both down, no change + nil, + // route changes from 2 -> 1 + su(1, 2), + }, + }, + { + name: "n1-recon-n2-down-n1-recon-n2-up", + nodes: types.Nodes{ + np(1), + np(2), + np(1), + np(2), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 up recon = noop + { + 1: true, + 2: true, + }, + // n2 goes down + { + 1: true, + 2: false, + }, + // n1 up recon = noop + { + 1: true, + 2: false, + }, + // n2 comes back up + { + 1: true, + 2: false, + }, + }, + want: []*types.StateUpdate{ + nil, + nil, + nil, + nil, + }, + }, + { + name: "n1-recon-n2-down-n1-recon-n2-up", + nodes: types.Nodes{ + np(1), + np(1), + np(3), + np(3), + np(2), + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: false, + 3: true, + }, + // n1 comes up + { + 1: true, + 2: false, + 3: true, + }, + // n3 goes down + { + 1: true, + 2: false, + 3: false, + }, + // n3 comes up + { + 1: true, + 2: false, + 3: true, + }, + // n2 comes up + { + 1: true, + 2: true, + 3: true, + }, + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 3), // n1 -> n3 + nil, + su(1, 3), // n3 -> n1 + nil, + nil, + su(1, 2), // n1 -> n2 + }, + }, + { + name: "n1-recon-n2-dis-n3-take", + nodes: types.Nodes{ + np(1), + np(3), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), false, false), + r(3, 3, ipp("10.0.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + // n3 goes down + { + 1: false, + 2: true, + 3: false, + }, + }, + want: []*types.StateUpdate{ + su(1, 3), // n1 -> n3 + nil, + }, + }, + { + name: "multi-n1-oneforeach-n2-n3", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.1.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 2, 3), // n1 -> n2,n3 + }, + }, + { + name: "multi-n1-onefor-n2-disabled-n3", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.1.0.0/24"), false, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 2), // n1 -> n2, n3 is not enabled + }, + }, + { + name: "multi-n1-onefor-n2-offline-n3", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + r(3, 3, ipp("10.1.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: false, + 2: true, + 3: false, + }, + }, + want: []*types.StateUpdate{ + su(1, 2), // n1 -> n2, n3 is offline + }, + }, + { + name: "multi-n2-back-to-multi-n1", + nodes: types.Nodes{ + np(1), + }, + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, false), + r(4, 1, ipp("10.1.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, true), + r(3, 3, ipp("10.1.0.0/24"), true, false), + }, + isConnected: []map[types.NodeID]bool{ + // n1 goes down + { + 1: true, + 2: false, + 3: true, + }, + }, + want: []*types.StateUpdate{ + su(1, 2), // n2 -> n1 + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if (len(tt.isConnected) != len(tt.want)) && len(tt.want) != len(tt.nodes) { + t.Fatalf("nodes (%d), isConnected updates (%d), wants (%d) must be equal", len(tt.nodes), len(tt.isConnected), len(tt.want)) + } + + db := dbForTest(t, tt.name) + + user := types.User{Name: tt.name} + if err := db.DB.Save(&user).Error; err != nil { + t.Fatalf("failed to create user: %s", err) + } + + for _, route := range tt.routes { + route.Node.User = user + if err := db.DB.Save(&route.Node).Error; err != nil { + t.Fatalf("failed to create node: %s", err) + } + if err := db.DB.Save(&route).Error; err != nil { + t.Fatalf("failed to create route: %s", err) + } + } + + for step := range len(tt.isConnected) { + node := tt.nodes[step] + isConnected := tt.isConnected[step] + want := tt.want[step] + + got, err := Write(db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { + return FailoverNodeRoutesIfNeccessary(tx, smap(isConnected), node) + }) + + if (err != nil) != tt.wantErr { + t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(types.StateUpdate{}, "Type", "Message")); diff != "" { + t.Errorf("failoverRoute() unexpected result (-want +got):\n%s", diff) + } + } + }) + } +} + +func TestFailoverRouteTx(t *testing.T) { tests := []struct { name string failingRoute types.Route routes types.Routes - isConnected map[key.MachinePublic]bool - want []key.MachinePublic + isConnected map[types.NodeID]bool + want []types.NodeID wantErr bool }{ { @@ -301,10 +685,8 @@ func TestFailoverRoute(t *testing.T) { Model: gorm.Model{ ID: 1, }, - Prefix: ipp("10.0.0.0/24"), - Node: types.Node{ - MachineKey: machineKeys[0], - }, + Prefix: ipp("10.0.0.0/24"), + Node: types.Node{}, IsPrimary: false, }, routes: types.Routes{}, @@ -317,10 +699,8 @@ func TestFailoverRoute(t *testing.T) { Model: gorm.Model{ ID: 1, }, - Prefix: ipp("0.0.0.0/0"), - Node: types.Node{ - MachineKey: machineKeys[0], - }, + Prefix: ipp("0.0.0.0/0"), + Node: types.Node{}, IsPrimary: true, }, routes: types.Routes{}, @@ -335,7 +715,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, }, @@ -346,7 +726,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, }, @@ -362,7 +742,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -374,7 +754,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -385,19 +765,19 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: false, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: false, - machineKeys[1]: true, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, }, - want: []key.MachinePublic{ - machineKeys[0], - machineKeys[1], + want: []types.NodeID{ + 1, + 2, }, wantErr: false, }, @@ -409,7 +789,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: false, Enabled: true, @@ -421,7 +801,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -432,7 +812,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: false, Enabled: true, @@ -449,7 +829,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: true, Enabled: true, @@ -461,7 +841,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: false, Enabled: true, @@ -472,7 +852,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: true, Enabled: true, @@ -483,20 +863,19 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[2], + ID: 3, }, IsPrimary: false, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: true, - machineKeys[1]: true, - machineKeys[2]: true, + isConnected: map[types.NodeID]bool{ + 1: true, + 2: true, + 3: true, }, - want: []key.MachinePublic{ - machineKeys[1], - machineKeys[0], + want: []types.NodeID{ + 2, 1, }, wantErr: false, }, @@ -508,7 +887,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -520,7 +899,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -532,15 +911,15 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[3], + ID: 4, }, IsPrimary: false, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: true, - machineKeys[3]: false, + isConnected: map[types.NodeID]bool{ + 1: true, + 4: false, }, want: nil, wantErr: false, @@ -553,7 +932,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -565,7 +944,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -577,7 +956,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[3], + ID: 4, }, IsPrimary: false, Enabled: true, @@ -588,20 +967,20 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: true, Enabled: true, }, }, - isConnected: map[key.MachinePublic]bool{ - machineKeys[0]: false, - machineKeys[1]: true, - machineKeys[3]: false, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, + 4: false, }, - want: []key.MachinePublic{ - machineKeys[0], - machineKeys[1], + want: []types.NodeID{ + 1, + 2, }, wantErr: false, }, @@ -613,7 +992,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -625,7 +1004,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[0], + ID: 1, }, IsPrimary: true, Enabled: true, @@ -637,7 +1016,7 @@ func TestFailoverRoute(t *testing.T) { }, Prefix: ipp("10.0.0.0/24"), Node: types.Node{ - MachineKey: machineKeys[1], + ID: 2, }, IsPrimary: false, Enabled: false, @@ -650,28 +1029,24 @@ func TestFailoverRoute(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "failover-db-test") - assert.NoError(t, err) - - db, err = NewHeadscaleDatabase( - types.DatabaseConfig{ - Type: "sqlite3", - Sqlite: types.SqliteConfig{ - Path: tmpDir + "/headscale_test.db", - }, - }, - "", - ) - assert.NoError(t, err) + db := dbForTest(t, tt.name) + user := types.User{Name: "test"} + if err := db.DB.Save(&user).Error; err != nil { + t.Fatalf("failed to create user: %s", err) + } for _, route := range tt.routes { + route.Node.User = user + if err := db.DB.Save(&route.Node).Error; err != nil { + t.Fatalf("failed to create node: %s", err) + } if err := db.DB.Save(&route).Error; err != nil { t.Fatalf("failed to create route: %s", err) } } - got, err := Write(db.DB, func(tx *gorm.DB) ([]key.MachinePublic, error) { - return failoverRoute(tx, tt.isConnected, &tt.failingRoute) + got, err := Write(db.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return failoverRouteTx(tx, smap(tt.isConnected), &tt.failingRoute) }) if (err != nil) != tt.wantErr { @@ -687,230 +1062,177 @@ func TestFailoverRoute(t *testing.T) { } } -// func TestDisableRouteFailover(t *testing.T) { -// machineKeys := []key.MachinePublic{ -// key.NewMachine().Public(), -// key.NewMachine().Public(), -// key.NewMachine().Public(), -// key.NewMachine().Public(), -// } +func TestFailoverRoute(t *testing.T) { + r := func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) types.Route { + return types.Route{ + Model: gorm.Model{ + ID: id, + }, + Node: types.Node{ + ID: nid, + }, + Prefix: prefix, + Enabled: enabled, + IsPrimary: primary, + } + } + rp := func(id uint, nid types.NodeID, prefix types.IPPrefix, enabled, primary bool) *types.Route { + ro := r(id, nid, prefix, enabled, primary) + return &ro + } + tests := []struct { + name string + failingRoute types.Route + routes types.Routes + isConnected map[types.NodeID]bool + want *failover + }{ + { + name: "no-route", + failingRoute: types.Route{}, + routes: types.Routes{}, + want: nil, + }, + { + name: "no-prime", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), false, false), -// tests := []struct { -// name string -// nodes types.Nodes + routes: types.Routes{}, + want: nil, + }, + { + name: "exit-node", + failingRoute: r(1, 1, ipp("0.0.0.0/0"), false, true), + routes: types.Routes{}, + want: nil, + }, + { + name: "no-failover-single-route", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), false, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), false, true), + }, + want: nil, + }, + { + name: "failover-primary", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, + }, + want: &failover{ + old: rp(1, 1, ipp("10.0.0.0/24"), true, false), + new: rp(2, 2, ipp("10.0.0.0/24"), true, true), + }, + }, + { + name: "failover-none-primary", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, false), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 2, ipp("10.0.0.0/24"), true, false), + }, + want: nil, + }, + { + name: "failover-primary-multi-route", + failingRoute: r(2, 2, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, false), + r(2, 2, ipp("10.0.0.0/24"), true, true), + r(3, 3, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: true, + 2: true, + 3: true, + }, + want: &failover{ + old: rp(2, 2, ipp("10.0.0.0/24"), true, false), + new: rp(1, 1, ipp("10.0.0.0/24"), true, true), + }, + }, + { + name: "failover-primary-no-online", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 4, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: true, + 4: false, + }, + want: nil, + }, + { + name: "failover-primary-one-not-online", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, true), + r(2, 4, ipp("10.0.0.0/24"), true, false), + r(3, 2, ipp("10.0.0.0/24"), true, false), + }, + isConnected: map[types.NodeID]bool{ + 1: false, + 2: true, + 4: false, + }, + want: &failover{ + old: rp(1, 1, ipp("10.0.0.0/24"), true, false), + new: rp(3, 2, ipp("10.0.0.0/24"), true, true), + }, + }, + { + name: "failover-primary-none-enabled", + failingRoute: r(1, 1, ipp("10.0.0.0/24"), true, true), + routes: types.Routes{ + r(1, 1, ipp("10.0.0.0/24"), true, false), + r(2, 2, ipp("10.0.0.0/24"), false, true), + }, + want: nil, + }, + } -// routeID uint64 -// isConnected map[key.MachinePublic]bool + cmps := append( + util.Comparers, + cmp.Comparer(func(x, y types.IPPrefix) bool { + return netip.Prefix(x) == netip.Prefix(y) + }), + ) -// wantMachineKey key.MachinePublic -// wantErr string -// }{ -// { -// name: "single-route", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// Node: types.Node{ -// MachineKey: machineKeys[0], -// }, -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[0], -// }, -// { -// name: "failover-simple", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// &types.Node{ -// ID: 1, -// MachineKey: machineKeys[1], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 2, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: false, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[1], -// }, -// { -// name: "no-failover-offline", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// &types.Node{ -// ID: 1, -// MachineKey: machineKeys[1], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 2, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: false, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// isConnected: map[key.MachinePublic]bool{ -// machineKeys[0]: true, -// machineKeys[1]: false, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[1], -// }, -// { -// name: "failover-to-online", -// nodes: types.Nodes{ -// &types.Node{ -// ID: 0, -// MachineKey: machineKeys[0], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 1, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: true, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// &types.Node{ -// ID: 1, -// MachineKey: machineKeys[1], -// Routes: []types.Route{ -// { -// Model: gorm.Model{ -// ID: 2, -// }, -// Prefix: ipp("10.0.0.0/24"), -// IsPrimary: false, -// }, -// }, -// Hostinfo: &tailcfg.Hostinfo{ -// RoutableIPs: []netip.Prefix{ -// netip.MustParsePrefix("10.0.0.0/24"), -// }, -// }, -// }, -// }, -// isConnected: map[key.MachinePublic]bool{ -// machineKeys[0]: true, -// machineKeys[1]: true, -// }, -// routeID: 1, -// wantMachineKey: machineKeys[1], -// }, -// } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotf := failoverRoute(smap(tt.isConnected), &tt.failingRoute, tt.routes) -// for _, tt := range tests { -// t.Run(tt.name, func(t *testing.T) { -// datab, err := NewHeadscaleDatabase("sqlite3", ":memory:", false, []netip.Prefix{}, "") -// assert.NoError(t, err) + if tt.want == nil && gotf != nil { + t.Fatalf("expected nil, got %+v", gotf) + } -// // bootstrap db -// datab.DB.Transaction(func(tx *gorm.DB) error { -// for _, node := range tt.nodes { -// err := tx.Save(node).Error -// if err != nil { -// return err -// } + if gotf == nil && tt.want != nil { + t.Fatalf("expected %+v, got nil", tt.want) + } -// _, err = SaveNodeRoutes(tx, node) -// if err != nil { -// return err -// } -// } + if tt.want != nil && gotf != nil { + want := map[string]*types.Route{ + "new": tt.want.new, + "old": tt.want.old, + } -// return nil -// }) + got := map[string]*types.Route{ + "new": gotf.new, + "old": gotf.old, + } -// got, err := Write(datab.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { -// return DisableRoute(tx, tt.routeID, tt.isConnected) -// }) - -// // if (err.Error() != "") != tt.wantErr { -// // t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr) - -// // return -// // } - -// if len(got.ChangeNodes) != 1 { -// t.Errorf("expected update with one machine, got %d", len(got.ChangeNodes)) -// } - -// if diff := cmp.Diff(tt.wantMachineKey, got.ChangeNodes[0].MachineKey, util.Comparers...); diff != "" { -// t.Errorf("DisableRoute() unexpected result (-want +got):\n%s", diff) -// } -// }) -// } -// } + if diff := cmp.Diff(want, got, cmps...); diff != "" { + t.Fatalf("failoverRoute unexpected result (-want +got):\n%s", diff) + } + } + }) + } +} diff --git a/hscontrol/db/users.go b/hscontrol/db/users.go index 99e93393..1cf8e92f 100644 --- a/hscontrol/db/users.go +++ b/hscontrol/db/users.go @@ -2,10 +2,10 @@ package db import ( "errors" + "fmt" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" - "github.com/rs/zerolog/log" "gorm.io/gorm" ) @@ -34,12 +34,7 @@ func CreateUser(tx *gorm.DB, name string) (*types.User, error) { } user.Name = name if err := tx.Create(&user).Error; err != nil { - log.Error(). - Str("func", "CreateUser"). - Err(err). - Msg("Could not create row") - - return nil, err + return nil, fmt.Errorf("creating user: %w", err) } return &user, nil diff --git a/hscontrol/db/users_test.go b/hscontrol/db/users_test.go index b36e8613..98dea6c0 100644 --- a/hscontrol/db/users_test.go +++ b/hscontrol/db/users_test.go @@ -46,14 +46,16 @@ func (s *Suite) TestDestroyUserErrors(c *check.C) { pak, err = db.CreatePreAuthKey(user.Name, false, false, nil, nil) c.Assert(err, check.IsNil) + pakID := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "testnode", UserID: user.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) err = db.DestroyUser("test") c.Assert(err, check.Equals, ErrUserStillHasNodes) @@ -98,14 +100,16 @@ func (s *Suite) TestSetMachineUser(c *check.C) { pak, err := db.CreatePreAuthKey(oldUser.Name, false, false, nil, nil) c.Assert(err, check.IsNil) + pakID := uint(pak.ID) node := types.Node{ ID: 0, Hostname: "testnode", UserID: oldUser.ID, RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: uint(pak.ID), + AuthKeyID: &pakID, } - db.DB.Save(&node) + trx := db.DB.Save(&node) + c.Assert(trx.Error, check.IsNil) c.Assert(node.UserID, check.Equals, oldUser.ID) err = db.AssignNodeToUser(&node, newUser.Name) diff --git a/hscontrol/derp/derp.go b/hscontrol/derp/derp.go index 83c200a2..80ec520d 100644 --- a/hscontrol/derp/derp.go +++ b/hscontrol/derp/derp.go @@ -31,7 +31,7 @@ func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) { } func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) { - ctx, cancel := context.WithTimeout(context.Background(), types.HTTPReadTimeout) + ctx, cancel := context.WithTimeout(context.Background(), types.HTTPTimeout) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil) @@ -40,7 +40,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) { } client := http.Client{ - Timeout: types.HTTPReadTimeout, + Timeout: types.HTTPTimeout, } resp, err := client.Do(req) diff --git a/hscontrol/derp/server/derp_server.go b/hscontrol/derp/server/derp_server.go index 52a63e9f..0b0c9b16 100644 --- a/hscontrol/derp/server/derp_server.go +++ b/hscontrol/derp/server/derp_server.go @@ -204,7 +204,7 @@ func DERPProbeHandler( } } -// DERPBootstrapDNSHandler implements the /bootsrap-dns endpoint +// DERPBootstrapDNSHandler implements the /bootstrap-dns endpoint // Described in https://github.com/tailscale/tailscale/issues/1405, // this endpoint provides a way to help a client when it fails to start up // because its DNS are broken. diff --git a/hscontrol/grpcv1.go b/hscontrol/grpcv1.go index 379502c7..d9cd653d 100644 --- a/hscontrol/grpcv1.go +++ b/hscontrol/grpcv1.go @@ -3,7 +3,7 @@ package hscontrol import ( "context" - "fmt" + "errors" "sort" "strings" "time" @@ -144,7 +144,7 @@ func (api headscaleV1APIServer) ExpirePreAuthKey( ctx context.Context, request *v1.ExpirePreAuthKeyRequest, ) (*v1.ExpirePreAuthKeyResponse, error) { - err := api.h.db.DB.Transaction(func(tx *gorm.DB) error { + err := api.h.db.Write(func(tx *gorm.DB) error { preAuthKey, err := db.GetPreAuthKey(tx, request.GetUser(), request.Key) if err != nil { return err @@ -195,7 +195,7 @@ func (api headscaleV1APIServer) RegisterNode( return nil, err } - addrs, err := api.h.ipAlloc.Next() + ipv4, ipv6, err := api.h.ipAlloc.Next() if err != nil { return nil, err } @@ -208,7 +208,7 @@ func (api headscaleV1APIServer) RegisterNode( request.GetUser(), nil, util.RegisterMethodCLI, - addrs, + ipv4, ipv6, ) }) if err != nil { @@ -222,7 +222,7 @@ func (api headscaleV1APIServer) GetNode( ctx context.Context, request *v1.GetNodeRequest, ) (*v1.GetNodeResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } @@ -231,7 +231,7 @@ func (api headscaleV1APIServer) GetNode( // Populate the online field based on // currently connected nodes. - resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey) + resp.Online = api.h.nodeNotifier.IsConnected(node.ID) return &v1.GetNodeResponse{Node: resp}, nil } @@ -248,12 +248,12 @@ func (api headscaleV1APIServer) SetTags( } node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) { - err := db.SetTags(tx, request.GetNodeId(), request.GetTags()) + err := db.SetTags(tx, types.NodeID(request.GetNodeId()), request.GetTags()) if err != nil { return nil, err } - return db.GetNodeByID(tx, request.GetNodeId()) + return db.GetNodeByID(tx, types.NodeID(request.GetNodeId())) }) if err != nil { return &v1.SetTagsResponse{ @@ -261,15 +261,12 @@ func (api headscaleV1APIServer) SetTags( }, status.Error(codes.InvalidArgument, err.Error()) } - stateUpdate := types.StateUpdate{ + ctx = types.NotifyCtx(ctx, "cli-settags", node.Hostname) + api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, + ChangeNodes: []types.NodeID{node.ID}, Message: "called from api.SetTags", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-settags", node.Hostname) - api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + }, node.ID) log.Trace(). Str("node", node.Hostname). @@ -281,13 +278,13 @@ func (api headscaleV1APIServer) SetTags( func validateTag(tag string) error { if strings.Index(tag, "tag:") != 0 { - return fmt.Errorf("tag must start with the string 'tag:'") + return errors.New("tag must start with the string 'tag:'") } if strings.ToLower(tag) != tag { - return fmt.Errorf("tag should be lowercase") + return errors.New("tag should be lowercase") } if len(strings.Fields(tag)) > 1 { - return fmt.Errorf("tag should not contains space") + return errors.New("tag should not contains space") } return nil } @@ -296,26 +293,30 @@ func (api headscaleV1APIServer) DeleteNode( ctx context.Context, request *v1.DeleteNodeRequest, ) (*v1.DeleteNodeResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } - err = api.h.db.DeleteNode( + changedNodes, err := api.h.db.DeleteNode( node, - api.h.nodeNotifier.ConnectedMap(), + api.h.nodeNotifier.LikelyConnectedMap(), ) if err != nil { return nil, err } - stateUpdate := types.StateUpdate{ + ctx = types.NotifyCtx(ctx, "cli-deletenode", node.Hostname) + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ Type: types.StatePeerRemoved, - Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)}, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-deletenode", node.Hostname) - api.h.nodeNotifier.NotifyAll(ctx, stateUpdate) + Removed: []types.NodeID{node.ID}, + }) + + if changedNodes != nil { + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changedNodes, + }) } return &v1.DeleteNodeResponse{}, nil @@ -330,33 +331,27 @@ func (api headscaleV1APIServer) ExpireNode( node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) { db.NodeSetExpiry( tx, - request.GetNodeId(), + types.NodeID(request.GetNodeId()), now, ) - return db.GetNodeByID(tx, request.GetNodeId()) + return db.GetNodeByID(tx, types.NodeID(request.GetNodeId())) }) if err != nil { return nil, err } - selfUpdate := types.StateUpdate{ - Type: types.StateSelfUpdate, - ChangeNodes: types.Nodes{node}, - } - if selfUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname) - api.h.nodeNotifier.NotifyByMachineKey( - ctx, - selfUpdate, - node.MachineKey) - } + ctx = types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname) + api.h.nodeNotifier.NotifyByNodeID( + ctx, + types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: []types.NodeID{node.ID}, + }, + node.ID) - stateUpdate := types.StateUpdateExpire(node.ID, now) - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname) - api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + ctx = types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname) + api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID) log.Trace(). Str("node", node.Hostname). @@ -380,21 +375,18 @@ func (api headscaleV1APIServer) RenameNode( return nil, err } - return db.GetNodeByID(tx, request.GetNodeId()) + return db.GetNodeByID(tx, types.NodeID(request.GetNodeId())) }) if err != nil { return nil, err } - stateUpdate := types.StateUpdate{ + ctx = types.NotifyCtx(ctx, "cli-renamenode", node.Hostname) + api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{ Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, + ChangeNodes: []types.NodeID{node.ID}, Message: "called from api.RenameNode", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(ctx, "cli-renamenode", node.Hostname) - api.h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + }, node.ID) log.Trace(). Str("node", node.Hostname). @@ -408,7 +400,7 @@ func (api headscaleV1APIServer) ListNodes( ctx context.Context, request *v1.ListNodesRequest, ) (*v1.ListNodesResponse, error) { - isConnected := api.h.nodeNotifier.ConnectedMap() + isLikelyConnected := api.h.nodeNotifier.LikelyConnectedMap() if request.GetUser() != "" { nodes, err := db.Read(api.h.db.DB, func(rx *gorm.DB) (types.Nodes, error) { return db.ListNodesByUser(rx, request.GetUser()) @@ -423,7 +415,9 @@ func (api headscaleV1APIServer) ListNodes( // Populate the online field based on // currently connected nodes. - resp.Online = isConnected[node.MachineKey] + if val, ok := isLikelyConnected.Load(node.ID); ok && val { + resp.Online = true + } response[index] = resp } @@ -446,7 +440,9 @@ func (api headscaleV1APIServer) ListNodes( // Populate the online field based on // currently connected nodes. - resp.Online = isConnected[node.MachineKey] + if val, ok := isLikelyConnected.Load(node.ID); ok && val { + resp.Online = true + } validTags, invalidTags := api.h.ACLPolicy.TagsOfNode( node, @@ -463,7 +459,7 @@ func (api headscaleV1APIServer) MoveNode( ctx context.Context, request *v1.MoveNodeRequest, ) (*v1.MoveNodeResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } @@ -476,6 +472,24 @@ func (api headscaleV1APIServer) MoveNode( return &v1.MoveNodeResponse{Node: node.Proto()}, nil } +func (api headscaleV1APIServer) BackfillNodeIPs( + ctx context.Context, + request *v1.BackfillNodeIPsRequest, +) (*v1.BackfillNodeIPsResponse, error) { + log.Trace().Msg("Backfill called") + + if !request.Confirmed { + return nil, errors.New("not confirmed, aborting") + } + + changes, err := api.h.db.BackfillNodeIPs(api.h.ipAlloc) + if err != nil { + return nil, err + } + + return &v1.BackfillNodeIPsResponse{Changes: changes}, nil +} + func (api headscaleV1APIServer) GetRoutes( ctx context.Context, request *v1.GetRoutesRequest, @@ -503,7 +517,7 @@ func (api headscaleV1APIServer) EnableRoute( return nil, err } - if update != nil && update.Valid() { + if update != nil { ctx := types.NotifyCtx(ctx, "cli-enableroute", "unknown") api.h.nodeNotifier.NotifyAll( ctx, *update) @@ -516,17 +530,19 @@ func (api headscaleV1APIServer) DisableRoute( ctx context.Context, request *v1.DisableRouteRequest, ) (*v1.DisableRouteResponse, error) { - isConnected := api.h.nodeNotifier.ConnectedMap() - update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { - return db.DisableRoute(tx, request.GetRouteId(), isConnected) + update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) { + return db.DisableRoute(tx, request.GetRouteId(), api.h.nodeNotifier.LikelyConnectedMap()) }) if err != nil { return nil, err } - if update != nil && update.Valid() { + if update != nil { ctx := types.NotifyCtx(ctx, "cli-disableroute", "unknown") - api.h.nodeNotifier.NotifyAll(ctx, *update) + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: update, + }) } return &v1.DisableRouteResponse{}, nil @@ -536,7 +552,7 @@ func (api headscaleV1APIServer) GetNodeRoutes( ctx context.Context, request *v1.GetNodeRoutesRequest, ) (*v1.GetNodeRoutesResponse, error) { - node, err := api.h.db.GetNodeByID(request.GetNodeId()) + node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId())) if err != nil { return nil, err } @@ -555,17 +571,20 @@ func (api headscaleV1APIServer) DeleteRoute( ctx context.Context, request *v1.DeleteRouteRequest, ) (*v1.DeleteRouteResponse, error) { - isConnected := api.h.nodeNotifier.ConnectedMap() - update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { + isConnected := api.h.nodeNotifier.LikelyConnectedMap() + update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) { return db.DeleteRoute(tx, request.GetRouteId(), isConnected) }) if err != nil { return nil, err } - if update != nil && update.Valid() { + if update != nil { ctx := types.NotifyCtx(ctx, "cli-deleteroute", "unknown") - api.h.nodeNotifier.NotifyWithIgnore(ctx, *update) + api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: update, + }) } return &v1.DeleteRouteResponse{}, nil diff --git a/hscontrol/handlers.go b/hscontrol/handlers.go index ee670733..a6bbd1b8 100644 --- a/hscontrol/handlers.go +++ b/hscontrol/handlers.go @@ -68,12 +68,6 @@ func (h *Headscale) KeyHandler( Msg("could not get capability version") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) - if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Failed to write response") - } return } @@ -82,19 +76,6 @@ func (h *Headscale) KeyHandler( Str("handler", "/key"). Int("cap_ver", int(capVer)). Msg("New noise client") - if err != nil { - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") - writer.WriteHeader(http.StatusBadRequest) - _, err := writer.Write([]byte("Wrong params")) - if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Failed to write response") - } - - return - } // TS2021 (Tailscale v2 protocol) requires to have a different key if capVer >= NoiseCapabilityVersion { diff --git a/hscontrol/mapper/mapper.go b/hscontrol/mapper/mapper.go index df0f4d9c..d4f4392a 100644 --- a/hscontrol/mapper/mapper.go +++ b/hscontrol/mapper/mapper.go @@ -16,12 +16,13 @@ import ( "time" mapset "github.com/deckarep/golang-set/v2" + "github.com/juanfont/headscale/hscontrol/db" + "github.com/juanfont/headscale/hscontrol/notifier" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/klauspost/compress/zstd" "github.com/rs/zerolog/log" - "golang.org/x/exp/maps" "tailscale.com/envknob" "tailscale.com/smallzstd" "tailscale.com/tailcfg" @@ -51,21 +52,14 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_ type Mapper struct { // Configuration // TODO(kradalby): figure out if this is the format we want this in - derpMap *tailcfg.DERPMap - baseDomain string - dnsCfg *tailcfg.DNSConfig - logtail bool - randomClientPort bool + db *db.HSDatabase + cfg *types.Config + derpMap *tailcfg.DERPMap + notif *notifier.Notifier uid string created time.Time seq uint64 - - // Map isnt concurrency safe, so we need to ensure - // only one func is accessing it over time. - mu sync.Mutex - peers map[uint64]*types.Node - patches map[uint64][]patch } type patch struct { @@ -74,35 +68,22 @@ type patch struct { } func NewMapper( - node *types.Node, - peers types.Nodes, + db *db.HSDatabase, + cfg *types.Config, derpMap *tailcfg.DERPMap, - baseDomain string, - dnsCfg *tailcfg.DNSConfig, - logtail bool, - randomClientPort bool, + notif *notifier.Notifier, ) *Mapper { - log.Debug(). - Caller(). - Str("node", node.Hostname). - Msg("creating new mapper") - uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength) return &Mapper{ - derpMap: derpMap, - baseDomain: baseDomain, - dnsCfg: dnsCfg, - logtail: logtail, - randomClientPort: randomClientPort, + db: db, + cfg: cfg, + derpMap: derpMap, + notif: notif, uid: uid, created: time.Now(), seq: 0, - - // TODO: populate - peers: peers.IDMap(), - patches: make(map[uint64][]patch), } } @@ -194,8 +175,8 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) { "device_model": []string{node.Hostinfo.OS}, } - if len(node.IPAddresses) > 0 { - attrs.Add("device_ip", node.IPAddresses[0].String()) + if len(node.IPs()) > 0 { + attrs.Add("device_ip", node.IPs()[0].String()) } resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode()) @@ -207,11 +188,10 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) { // It is a separate function to make testing easier. func (m *Mapper) fullMapResponse( node *types.Node, + peers types.Nodes, pol *policy.ACLPolicy, capVer tailcfg.CapabilityVersion, ) (*tailcfg.MapResponse, error) { - peers := nodeMapToList(m.peers) - resp, err := m.baseWithConfigMapResponse(node, pol, capVer) if err != nil { return nil, err @@ -219,14 +199,13 @@ func (m *Mapper) fullMapResponse( err = appendPeerChanges( resp, + true, // full change pol, node, capVer, peers, peers, - m.baseDomain, - m.dnsCfg, - m.randomClientPort, + m.cfg, ) if err != nil { return nil, err @@ -240,35 +219,25 @@ func (m *Mapper) FullMapResponse( mapRequest tailcfg.MapRequest, node *types.Node, pol *policy.ACLPolicy, + messages ...string, ) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - peers := maps.Keys(m.peers) - peersWithPatches := maps.Keys(m.patches) - slices.Sort(peers) - slices.Sort(peersWithPatches) - - if len(peersWithPatches) > 0 { - log.Debug(). - Str("node", node.Hostname). - Uints64("peers", peers). - Uints64("pending_patches", peersWithPatches). - Msgf("node requested full map response, but has pending patches") - } - - resp, err := m.fullMapResponse(node, pol, mapRequest.Version) + peers, err := m.ListPeers(node.ID) if err != nil { return nil, err } - return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress) + resp, err := m.fullMapResponse(node, peers, pol, mapRequest.Version) + if err != nil { + return nil, err + } + + return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...) } -// LiteMapResponse returns a MapResponse for the given node. +// ReadOnlyResponse returns a MapResponse for the given node. // Lite means that the peers has been omitted, this is intended // to be used to answer MapRequests with OmitPeers set to true. -func (m *Mapper) LiteMapResponse( +func (m *Mapper) ReadOnlyMapResponse( mapRequest tailcfg.MapRequest, node *types.Node, pol *policy.ACLPolicy, @@ -279,18 +248,6 @@ func (m *Mapper) LiteMapResponse( return nil, err } - rules, sshPolicy, err := policy.GenerateFilterAndSSHRules( - pol, - node, - nodeMapToList(m.peers), - ) - if err != nil { - return nil, err - } - - resp.PacketFilter = policy.ReduceFilterRules(node, rules) - resp.SSHPolicy = sshPolicy - return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress, messages...) } @@ -320,50 +277,74 @@ func (m *Mapper) DERPMapResponse( func (m *Mapper) PeerChangedResponse( mapRequest tailcfg.MapRequest, node *types.Node, - changed types.Nodes, + changed map[types.NodeID]bool, + patches []*tailcfg.PeerChange, pol *policy.ACLPolicy, messages ...string, ) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Update our internal map. - for _, node := range changed { - if patches, ok := m.patches[node.ID]; ok { - // preserve online status in case the patch has an outdated one - online := node.IsOnline - - for _, p := range patches { - // TODO(kradalby): Figure if this needs to be sorted by timestamp - node.ApplyPeerChange(p.change) - } - - // Ensure the patches are not applied again later - delete(m.patches, node.ID) - - node.IsOnline = online - } - - m.peers[node.ID] = node - } - resp := m.baseMapResponse() - err := appendPeerChanges( + peers, err := m.ListPeers(node.ID) + if err != nil { + return nil, err + } + + var removedIDs []tailcfg.NodeID + var changedIDs []types.NodeID + for nodeID, nodeChanged := range changed { + if nodeChanged { + changedIDs = append(changedIDs, nodeID) + } else { + removedIDs = append(removedIDs, nodeID.NodeID()) + } + } + + changedNodes := make(types.Nodes, 0, len(changedIDs)) + for _, peer := range peers { + if slices.Contains(changedIDs, peer.ID) { + changedNodes = append(changedNodes, peer) + } + } + + err = appendPeerChanges( &resp, + false, // partial change pol, node, mapRequest.Version, - nodeMapToList(m.peers), - changed, - m.baseDomain, - m.dnsCfg, - m.randomClientPort, + peers, + changedNodes, + m.cfg, ) if err != nil { return nil, err } + resp.PeersRemoved = removedIDs + + // Sending patches as a part of a PeersChanged response + // is technically not suppose to be done, but they are + // applied after the PeersChanged. The patch list + // should _only_ contain Nodes that are not in the + // PeersChanged or PeersRemoved list and the caller + // should filter them out. + // + // From tailcfg docs: + // These are applied after Peers* above, but in practice the + // control server should only send these on their own, without + // the Peers* fields also set. + if patches != nil { + resp.PeersChangedPatch = patches + } + + // Add the node itself, it might have changed, and particularly + // if there are no patches or changes, this is a self update. + tailnode, err := tailNode(node, mapRequest.Version, pol, m.cfg) + if err != nil { + return nil, err + } + resp.Node = tailnode + return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...) } @@ -375,71 +356,12 @@ func (m *Mapper) PeerChangedPatchResponse( changed []*tailcfg.PeerChange, pol *policy.ACLPolicy, ) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - sendUpdate := false - // patch the internal map - for _, change := range changed { - if peer, ok := m.peers[uint64(change.NodeID)]; ok { - peer.ApplyPeerChange(change) - sendUpdate = true - } else { - log.Trace().Str("node", node.Hostname).Msgf("Node with ID %s is missing from mapper for Node %s, saving patch for when node is available", change.NodeID, node.Hostname) - - p := patch{ - timestamp: time.Now(), - change: change, - } - - if patches, ok := m.patches[uint64(change.NodeID)]; ok { - m.patches[uint64(change.NodeID)] = append(patches, p) - } else { - m.patches[uint64(change.NodeID)] = []patch{p} - } - } - } - - if !sendUpdate { - return nil, nil - } - resp := m.baseMapResponse() resp.PeersChangedPatch = changed return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) } -// TODO(kradalby): We need some integration tests for this. -func (m *Mapper) PeerRemovedResponse( - mapRequest tailcfg.MapRequest, - node *types.Node, - removed []tailcfg.NodeID, -) ([]byte, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Some nodes might have been removed already - // so we dont want to ask downstream to remove - // twice, than can cause a panic in tailscaled. - notYetRemoved := []tailcfg.NodeID{} - - // remove from our internal map - for _, id := range removed { - if _, ok := m.peers[uint64(id)]; ok { - notYetRemoved = append(notYetRemoved, id) - } - - delete(m.peers, uint64(id)) - delete(m.patches, uint64(id)) - } - - resp := m.baseMapResponse() - resp.PeersRemoved = notYetRemoved - - return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) -} - func (m *Mapper) marshalMapResponse( mapRequest tailcfg.MapRequest, resp *tailcfg.MapResponse, @@ -451,10 +373,7 @@ func (m *Mapper) marshalMapResponse( jsonBody, err := json.Marshal(resp) if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Cannot marshal map response") + return nil, fmt.Errorf("marshalling map response: %w", err) } if debugDumpMapResponsePath != "" { @@ -469,10 +388,8 @@ func (m *Mapper) marshalMapResponse( switch { case resp.Peers != nil && len(resp.Peers) > 0: responseType = "full" - case isSelfUpdate(messages...): + case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil && resp.DERPMap == nil && !resp.KeepAlive: responseType = "self" - case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil: - responseType = "lite" case resp.PeersChanged != nil && len(resp.PeersChanged) > 0: responseType = "changed" case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0: @@ -483,10 +400,7 @@ func (m *Mapper) marshalMapResponse( body, err := json.MarshalIndent(data, "", " ") if err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Cannot marshal map response") + return nil, fmt.Errorf("marshalling map response: %w", err) } perms := fs.FileMode(debugMapResponsePerm) @@ -496,11 +410,11 @@ func (m *Mapper) marshalMapResponse( panic(err) } - now := time.Now().UnixNano() + now := time.Now().Format("2006-01-02T15-04-05.999999999") mapResponsePath := path.Join( mPath, - fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType), + fmt.Sprintf("%s-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType), ) log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath) @@ -574,7 +488,7 @@ func (m *Mapper) baseWithConfigMapResponse( ) (*tailcfg.MapResponse, error) { resp := m.baseMapResponse() - tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort) + tailnode, err := tailNode(node, capVer, pol, m.cfg) if err != nil { return nil, err } @@ -582,7 +496,7 @@ func (m *Mapper) baseWithConfigMapResponse( resp.DERPMap = m.derpMap - resp.Domain = m.baseDomain + resp.Domain = m.cfg.BaseDomain // Do not instruct clients to collect services we do not // support or do anything with them @@ -591,12 +505,26 @@ func (m *Mapper) baseWithConfigMapResponse( resp.KeepAlive = false resp.Debug = &tailcfg.Debug{ - DisableLogTail: !m.logtail, + DisableLogTail: !m.cfg.LogTail.Enabled, } return &resp, nil } +func (m *Mapper) ListPeers(nodeID types.NodeID) (types.Nodes, error) { + peers, err := m.db.ListPeers(nodeID) + if err != nil { + return nil, err + } + + for _, peer := range peers { + online := m.notif.IsLikelyConnected(peer.ID) + peer.IsOnline = &online + } + + return peers, nil +} + func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes { ret := make(types.Nodes, 0) @@ -612,42 +540,41 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes { func appendPeerChanges( resp *tailcfg.MapResponse, + fullChange bool, pol *policy.ACLPolicy, node *types.Node, capVer tailcfg.CapabilityVersion, peers types.Nodes, changed types.Nodes, - baseDomain string, - dnsCfg *tailcfg.DNSConfig, - randomClientPort bool, + cfg *types.Config, ) error { - fullChange := len(peers) == len(changed) - rules, sshPolicy, err := policy.GenerateFilterAndSSHRules( - pol, - node, - peers, - ) + packetFilter, err := pol.CompileFilterRules(append(peers, node)) + if err != nil { + return err + } + + sshPolicy, err := pol.CompileSSHPolicy(node, peers) if err != nil { return err } // If there are filter rules present, see if there are any nodes that cannot // access eachother at all and remove them from the peers. - if len(rules) > 0 { - changed = policy.FilterNodesByACL(node, changed, rules) + if len(packetFilter) > 0 { + changed = policy.FilterNodesByACL(node, changed, packetFilter) } - profiles := generateUserProfiles(node, changed, baseDomain) + profiles := generateUserProfiles(node, changed, cfg.BaseDomain) dnsConfig := generateDNSConfig( - dnsCfg, - baseDomain, + cfg.DNSConfig, + cfg.BaseDomain, node, peers, ) - tailPeers, err := tailNodes(changed, capVer, pol, dnsCfg, baseDomain, randomClientPort) + tailPeers, err := tailNodes(changed, capVer, pol, cfg) if err != nil { return err } @@ -663,19 +590,9 @@ func appendPeerChanges( resp.PeersChanged = tailPeers } resp.DNSConfig = dnsConfig - resp.PacketFilter = policy.ReduceFilterRules(node, rules) + resp.PacketFilter = policy.ReduceFilterRules(node, packetFilter) resp.UserProfiles = profiles resp.SSHPolicy = sshPolicy return nil } - -func isSelfUpdate(messages ...string) bool { - for _, message := range messages { - if strings.Contains(message, types.SelfUpdateIdentifier) { - return true - } - } - - return false -} diff --git a/hscontrol/mapper/mapper_test.go b/hscontrol/mapper/mapper_test.go index bcc17dd4..2ba3d031 100644 --- a/hscontrol/mapper/mapper_test.go +++ b/hscontrol/mapper/mapper_test.go @@ -17,6 +17,11 @@ import ( "tailscale.com/types/key" ) +var iap = func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip +} + func (s *Suite) TestGetMapResponseUserProfiles(c *check.C) { mach := func(hostname, username string, userid uint) *types.Node { return &types.Node{ @@ -176,17 +181,16 @@ func Test_fullMapResponse(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "mini", - GivenName: "mini", - UserID: 0, - User: types.User{Name: "mini"}, - ForcedTags: []string{}, - AuthKeyID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "mini", + GivenName: "mini", + UserID: 0, + User: types.User{Name: "mini"}, + ForcedTags: []string{}, AuthKey: &types.PreAuthKey{}, LastSeen: &lastSeen, Expiry: &expire, - Hostinfo: &tailcfg.Hostinfo{}, + Hostinfo: &tailcfg.Hostinfo{}, Routes: []types.Route{ { Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")), @@ -257,17 +261,17 @@ func Test_fullMapResponse(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "peer1", - GivenName: "peer1", - UserID: 0, - User: types.User{Name: "mini"}, - ForcedTags: []string{}, - LastSeen: &lastSeen, - Expiry: &expire, - Hostinfo: &tailcfg.Hostinfo{}, - Routes: []types.Route{}, - CreatedAt: created, + IPv4: iap("100.64.0.2"), + Hostname: "peer1", + GivenName: "peer1", + UserID: 0, + User: types.User{Name: "mini"}, + ForcedTags: []string{}, + LastSeen: &lastSeen, + Expiry: &expire, + Hostinfo: &tailcfg.Hostinfo{}, + Routes: []types.Route{}, + CreatedAt: created, } tailPeer1 := &tailcfg.Node{ @@ -312,17 +316,17 @@ func Test_fullMapResponse(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "peer2", - GivenName: "peer2", - UserID: 1, - User: types.User{Name: "peer2"}, - ForcedTags: []string{}, - LastSeen: &lastSeen, - Expiry: &expire, - Hostinfo: &tailcfg.Hostinfo{}, - Routes: []types.Route{}, - CreatedAt: created, + IPv4: iap("100.64.0.3"), + Hostname: "peer2", + GivenName: "peer2", + UserID: 1, + User: types.User{Name: "peer2"}, + ForcedTags: []string{}, + LastSeen: &lastSeen, + Expiry: &expire, + Hostinfo: &tailcfg.Hostinfo{}, + Routes: []types.Route{}, + CreatedAt: created, } tests := []struct { @@ -331,13 +335,10 @@ func Test_fullMapResponse(t *testing.T) { node *types.Node peers types.Nodes - baseDomain string - dnsConfig *tailcfg.DNSConfig - derpMap *tailcfg.DERPMap - logtail bool - randomClientPort bool - want *tailcfg.MapResponse - wantErr bool + derpMap *tailcfg.DERPMap + cfg *types.Config + want *tailcfg.MapResponse + wantErr bool }{ // { // name: "empty-node", @@ -349,15 +350,17 @@ func Test_fullMapResponse(t *testing.T) { // wantErr: true, // }, { - name: "no-pol-no-peers-map-response", - pol: &policy.ACLPolicy{}, - node: mini, - peers: types.Nodes{}, - baseDomain: "", - dnsConfig: &tailcfg.DNSConfig{}, - derpMap: &tailcfg.DERPMap{}, - logtail: false, - randomClientPort: false, + name: "no-pol-no-peers-map-response", + pol: &policy.ACLPolicy{}, + node: mini, + peers: types.Nodes{}, + derpMap: &tailcfg.DERPMap{}, + cfg: &types.Config{ + BaseDomain: "", + DNSConfig: &tailcfg.DNSConfig{}, + LogTail: types.LogTailConfig{Enabled: false}, + RandomizeClientPort: false, + }, want: &tailcfg.MapResponse{ Node: tailMini, KeepAlive: false, @@ -383,11 +386,13 @@ func Test_fullMapResponse(t *testing.T) { peers: types.Nodes{ peer1, }, - baseDomain: "", - dnsConfig: &tailcfg.DNSConfig{}, - derpMap: &tailcfg.DERPMap{}, - logtail: false, - randomClientPort: false, + derpMap: &tailcfg.DERPMap{}, + cfg: &types.Config{ + BaseDomain: "", + DNSConfig: &tailcfg.DNSConfig{}, + LogTail: types.LogTailConfig{Enabled: false}, + RandomizeClientPort: false, + }, want: &tailcfg.MapResponse{ KeepAlive: false, Node: tailMini, @@ -424,11 +429,13 @@ func Test_fullMapResponse(t *testing.T) { peer1, peer2, }, - baseDomain: "", - dnsConfig: &tailcfg.DNSConfig{}, - derpMap: &tailcfg.DERPMap{}, - logtail: false, - randomClientPort: false, + derpMap: &tailcfg.DERPMap{}, + cfg: &types.Config{ + BaseDomain: "", + DNSConfig: &tailcfg.DNSConfig{}, + LogTail: types.LogTailConfig{Enabled: false}, + RandomizeClientPort: false, + }, want: &tailcfg.MapResponse{ KeepAlive: false, Node: tailMini, @@ -463,17 +470,15 @@ func Test_fullMapResponse(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mappy := NewMapper( - tt.node, - tt.peers, + nil, + tt.cfg, tt.derpMap, - tt.baseDomain, - tt.dnsConfig, - tt.logtail, - tt.randomClientPort, + nil, ) got, err := mappy.fullMapResponse( tt.node, + tt.peers, tt.pol, 0, ) diff --git a/hscontrol/mapper/tail.go b/hscontrol/mapper/tail.go index c10da4de..ac39d35e 100644 --- a/hscontrol/mapper/tail.go +++ b/hscontrol/mapper/tail.go @@ -3,12 +3,10 @@ package mapper import ( "fmt" "net/netip" - "strconv" "time" "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" "github.com/samber/lo" "tailscale.com/tailcfg" ) @@ -17,9 +15,7 @@ func tailNodes( nodes types.Nodes, capVer tailcfg.CapabilityVersion, pol *policy.ACLPolicy, - dnsConfig *tailcfg.DNSConfig, - baseDomain string, - randomClientPort bool, + cfg *types.Config, ) ([]*tailcfg.Node, error) { tNodes := make([]*tailcfg.Node, len(nodes)) @@ -28,9 +24,7 @@ func tailNodes( node, capVer, pol, - dnsConfig, - baseDomain, - randomClientPort, + cfg, ) if err != nil { return nil, err @@ -48,11 +42,9 @@ func tailNode( node *types.Node, capVer tailcfg.CapabilityVersion, pol *policy.ACLPolicy, - dnsConfig *tailcfg.DNSConfig, - baseDomain string, - randomClientPort bool, + cfg *types.Config, ) (*tailcfg.Node, error) { - addrs := node.IPAddresses.Prefixes() + addrs := node.Prefixes() allowedIPs := append( []netip.Prefix{}, @@ -85,7 +77,7 @@ func tailNode( keyExpiry = time.Time{} } - hostname, err := node.GetFQDN(dnsConfig, baseDomain) + hostname, err := node.GetFQDN(cfg.DNSConfig, cfg.BaseDomain) if err != nil { return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err) } @@ -94,12 +86,10 @@ func tailNode( tags = lo.Uniq(append(tags, node.ForcedTags...)) tNode := tailcfg.Node{ - ID: tailcfg.NodeID(node.ID), // this is the actual ID - StableID: tailcfg.StableNodeID( - strconv.FormatUint(node.ID, util.Base10), - ), // in headscale, unlike tailcontrol server, IDs are permanent - Name: hostname, - Cap: capVer, + ID: tailcfg.NodeID(node.ID), // this is the actual ID + StableID: node.ID.StableID(), + Name: hostname, + Cap: capVer, User: tailcfg.UserID(node.UserID), @@ -133,7 +123,7 @@ func tailNode( tailcfg.CapabilitySSH: []tailcfg.RawMessage{}, } - if randomClientPort { + if cfg.RandomizeClientPort { tNode.CapMap[tailcfg.NodeAttrRandomizeClientPort] = []tailcfg.RawMessage{} } } else { @@ -143,7 +133,7 @@ func tailNode( tailcfg.CapabilitySSH, } - if randomClientPort { + if cfg.RandomizeClientPort { tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrRandomizeClientPort) } } diff --git a/hscontrol/mapper/tail_test.go b/hscontrol/mapper/tail_test.go index f6e370c4..47af68fe 100644 --- a/hscontrol/mapper/tail_test.go +++ b/hscontrol/mapper/tail_test.go @@ -89,9 +89,7 @@ func TestTailNode(t *testing.T) { DiscoKey: mustDK( "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084", ), - IPAddresses: []netip.Addr{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), Hostname: "mini", GivenName: "mini", UserID: 0, @@ -99,7 +97,6 @@ func TestTailNode(t *testing.T) { Name: "mini", }, ForcedTags: []string{}, - AuthKeyID: 0, AuthKey: &types.PreAuthKey{}, LastSeen: &lastSeen, Expiry: &expire, @@ -182,13 +179,16 @@ func TestTailNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + cfg := &types.Config{ + BaseDomain: tt.baseDomain, + DNSConfig: tt.dnsConfig, + RandomizeClientPort: false, + } got, err := tailNode( tt.node, 0, tt.pol, - tt.dnsConfig, - tt.baseDomain, - false, + cfg, ) if (err != nil) != tt.wantErr { diff --git a/hscontrol/metrics.go b/hscontrol/metrics.go index fc56f584..835a6aac 100644 --- a/hscontrol/metrics.go +++ b/hscontrol/metrics.go @@ -1,25 +1,120 @@ package hscontrol import ( + "net/http" + "strconv" + + "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "tailscale.com/envknob" ) +var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS") + +var mapResponseLastSentSeconds *prometheus.GaugeVec + +func init() { + if debugHighCardinalityMetrics { + mapResponseLastSentSeconds = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_last_sent_seconds", + Help: "last sent metric to node.id", + }, []string{"type", "id"}) + } +} + const prometheusNamespace = "headscale" var ( - // This is a high cardinality metric (user x node), we might want to make this - // configurable/opt-in in the future. - nodeRegistrations = promauto.NewCounterVec(prometheus.CounterOpts{ + mapResponseSent = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, - Name: "node_registrations_total", - Help: "The total amount of registered node attempts", - }, []string{"action", "auth", "status", "user"}) - - updateRequestsSentToNode = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mapresponse_sent_total", + Help: "total count of mapresponses sent to clients", + }, []string{"status", "type"}) + mapResponseUpdateReceived = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: prometheusNamespace, - Name: "update_request_sent_to_node_total", - Help: "The number of calls/messages issued on a specific nodes update channel", - }, []string{"user", "node", "status"}) - // TODO(kradalby): This is very debugging, we might want to remove it. + Name: "mapresponse_updates_received_total", + Help: "total count of mapresponse updates received on update channel", + }, []string{"type"}) + mapResponseWriteUpdatesInStream = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_write_updates_in_stream_total", + Help: "total count of writes that occured in a stream session, pre-68 nodes", + }, []string{"status"}) + mapResponseEndpointUpdates = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_endpoint_updates_total", + Help: "total count of endpoint updates received", + }, []string{"status"}) + mapResponseReadOnly = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_readonly_requests_total", + Help: "total count of readonly requests received", + }, []string{"status"}) + mapResponseEnded = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_ended_total", + Help: "total count of new mapsessions ended", + }, []string{"reason"}) + mapResponseClosed = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "mapresponse_closed_total", + Help: "total count of calls to mapresponse close", + }, []string{"return"}) + httpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: prometheusNamespace, + Name: "http_duration_seconds", + Help: "Duration of HTTP requests.", + }, []string{"path"}) + httpCounter = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "http_requests_total", + Help: "Total number of http requests processed", + }, []string{"code", "method", "path"}, + ) ) + +// prometheusMiddleware implements mux.MiddlewareFunc. +func prometheusMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + route := mux.CurrentRoute(r) + path, _ := route.GetPathTemplate() + + // Ignore streaming and noise sessions + // it has its own router further down. + if path == "/ts2021" || path == "/machine/map" || path == "/derp" || path == "/derp/probe" || path == "/bootstrap-dns" { + next.ServeHTTP(w, r) + return + } + + rw := &respWriterProm{ResponseWriter: w} + + timer := prometheus.NewTimer(httpDuration.WithLabelValues(path)) + next.ServeHTTP(rw, r) + timer.ObserveDuration() + httpCounter.WithLabelValues(strconv.Itoa(rw.status), r.Method, path).Inc() + }) +} + +type respWriterProm struct { + http.ResponseWriter + status int + written int64 + wroteHeader bool +} + +func (r *respWriterProm) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *respWriterProm) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} diff --git a/hscontrol/noise.go b/hscontrol/noise.go index 0fa28d19..360c7045 100644 --- a/hscontrol/noise.go +++ b/hscontrol/noise.go @@ -95,18 +95,19 @@ func (h *Headscale) NoiseUpgradeHandler( // The HTTP2 server that exposes this router is created for // a single hijacked connection from /ts2021, using netutil.NewOneConnListener router := mux.NewRouter() + router.Use(prometheusMiddleware) router.HandleFunc("/machine/register", noiseServer.NoiseRegistrationHandler). Methods(http.MethodPost) router.HandleFunc("/machine/map", noiseServer.NoisePollNetMapHandler) server := http.Server{ - ReadTimeout: types.HTTPReadTimeout, + ReadTimeout: types.HTTPTimeout, } noiseServer.httpBaseConfig = &http.Server{ Handler: router, - ReadHeaderTimeout: types.HTTPReadTimeout, + ReadHeaderTimeout: types.HTTPTimeout, } noiseServer.http2Server = &http2.Server{} @@ -163,3 +164,79 @@ func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error { return nil } + +const ( + MinimumCapVersion tailcfg.CapabilityVersion = 58 +) + +// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol +// +// This is the busiest endpoint, as it keeps the HTTP long poll that updates +// the clients when something in the network changes. +// +// The clients POST stuff like HostInfo and their Endpoints here, but +// only after their first request (marked with the ReadOnly field). +// +// At this moment the updates are sent in a quite horrendous way, but they kinda work. +func (ns *noiseServer) NoisePollNetMapHandler( + writer http.ResponseWriter, + req *http.Request, +) { + log.Trace(). + Str("handler", "NoisePollNetMap"). + Msg("PollNetMapHandler called") + + log.Trace(). + Any("headers", req.Header). + Caller(). + Msg("Headers") + + body, _ := io.ReadAll(req.Body) + + mapRequest := tailcfg.MapRequest{} + if err := json.Unmarshal(body, &mapRequest); err != nil { + log.Error(). + Caller(). + Err(err). + Msg("Cannot parse MapRequest") + http.Error(writer, "Internal error", http.StatusInternalServerError) + + return + } + + // Reject unsupported versions + if mapRequest.Version < MinimumCapVersion { + log.Info(). + Caller(). + Int("min_version", int(MinimumCapVersion)). + Int("client_version", int(mapRequest.Version)). + Msg("unsupported client connected") + http.Error(writer, "Internal error", http.StatusBadRequest) + + return + } + + ns.nodeKey = mapRequest.NodeKey + + node, err := ns.headscale.db.GetNodeByAnyKey( + ns.conn.Peer(), + mapRequest.NodeKey, + key.NodePublic{}, + ) + if err != nil { + log.Error(). + Str("handler", "NoisePollNetMap"). + Msgf("Failed to fetch node from the database with node key: %s", mapRequest.NodeKey.String()) + http.Error(writer, "Internal error", http.StatusInternalServerError) + + return + } + + sess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, node) + sess.tracef("a node sending a MapRequest with Noise protocol") + if !sess.isStreaming() { + sess.serve() + } else { + sess.serveLongPoll() + } +} diff --git a/hscontrol/notifier/metrics.go b/hscontrol/notifier/metrics.go new file mode 100644 index 00000000..8a7a8839 --- /dev/null +++ b/hscontrol/notifier/metrics.go @@ -0,0 +1,68 @@ +package notifier + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "tailscale.com/envknob" +) + +const prometheusNamespace = "headscale" + +var debugHighCardinalityMetrics = envknob.Bool("HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS") + +var notifierUpdateSent *prometheus.CounterVec + +func init() { + if debugHighCardinalityMetrics { + notifierUpdateSent = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "notifier_update_sent_total", + Help: "total count of update sent on nodes channel", + }, []string{"status", "type", "trigger", "id"}) + } else { + notifierUpdateSent = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "notifier_update_sent_total", + Help: "total count of update sent on nodes channel", + }, []string{"status", "type", "trigger"}) + } +} + +var ( + notifierWaitersForLock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_waiters_for_lock", + Help: "gauge of waiters for the notifier lock", + }, []string{"type", "action"}) + notifierWaitForLock = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: prometheusNamespace, + Name: "notifier_wait_for_lock_seconds", + Help: "histogram of time spent waiting for the notifier lock", + Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.5, 1, 3, 5, 10}, + }, []string{"action"}) + notifierUpdateReceived = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "notifier_update_received_total", + Help: "total count of updates received by notifier", + }, []string{"type", "trigger"}) + notifierNodeUpdateChans = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_open_channels_total", + Help: "total count open channels in notifier", + }) + notifierBatcherWaitersForLock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_batcher_waiters_for_lock", + Help: "gauge of waiters for the notifier batcher lock", + }, []string{"type", "action"}) + notifierBatcherChanges = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_batcher_changes_pending", + Help: "gauge of full changes pending in the notifier batcher", + }, []string{}) + notifierBatcherPatches = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "notifier_batcher_patches_pending", + Help: "gauge of patches pending in the notifier batcher", + }, []string{}) +) diff --git a/hscontrol/notifier/notifier.go b/hscontrol/notifier/notifier.go index 2384a40f..483c3f37 100644 --- a/hscontrol/notifier/notifier.go +++ b/hscontrol/notifier/notifier.go @@ -3,81 +3,143 @@ package notifier import ( "context" "fmt" + "sort" "strings" "sync" + "time" "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" + "github.com/puzpuzpuz/xsync/v3" "github.com/rs/zerolog/log" - "tailscale.com/types/key" + "github.com/sasha-s/go-deadlock" + "tailscale.com/envknob" + "tailscale.com/tailcfg" + "tailscale.com/util/set" ) -type Notifier struct { - l sync.RWMutex - nodes map[string]chan<- types.StateUpdate - connected map[key.MachinePublic]bool -} +var debugDeadlock = envknob.Bool("HEADSCALE_DEBUG_DEADLOCK") +var debugDeadlockTimeout = envknob.RegisterDuration("HEADSCALE_DEBUG_DEADLOCK_TIMEOUT") -func NewNotifier() *Notifier { - return &Notifier{ - nodes: make(map[string]chan<- types.StateUpdate), - connected: make(map[key.MachinePublic]bool), +func init() { + deadlock.Opts.Disable = !debugDeadlock + if debugDeadlock { + deadlock.Opts.DeadlockTimeout = debugDeadlockTimeout() + deadlock.Opts.PrintAllCurrentGoroutines = true } } -func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) { - log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node") - defer log.Trace(). - Caller(). - Str("key", machineKey.ShortString()). - Msg("releasing lock to add node") - - n.l.Lock() - defer n.l.Unlock() - - n.nodes[machineKey.String()] = c - n.connected[machineKey] = true - - log.Trace(). - Str("machine_key", machineKey.ShortString()). - Int("open_chans", len(n.nodes)). - Msg("Added new channel") +type Notifier struct { + l deadlock.Mutex + nodes map[types.NodeID]chan<- types.StateUpdate + connected *xsync.MapOf[types.NodeID, bool] + b *batcher + cfg *types.Config } -func (n *Notifier) RemoveNode(machineKey key.MachinePublic) { - log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node") - defer log.Trace(). - Caller(). - Str("key", machineKey.ShortString()). - Msg("releasing lock to remove node") +func NewNotifier(cfg *types.Config) *Notifier { + n := &Notifier{ + nodes: make(map[types.NodeID]chan<- types.StateUpdate), + connected: xsync.NewMapOf[types.NodeID, bool](), + cfg: cfg, + } + b := newBatcher(cfg.Tuning.BatchChangeDelay, n) + n.b = b + go b.doWork() + return n +} + +// Close stops the batcher inside the notifier. +func (n *Notifier) Close() { + n.b.close() +} + +func (n *Notifier) tracef(nID types.NodeID, msg string, args ...any) { + log.Trace(). + Uint64("node.id", nID.Uint64()). + Int("open_chans", len(n.nodes)).Msgf(msg, args...) +} + +func (n *Notifier) AddNode(nodeID types.NodeID, c chan<- types.StateUpdate) { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "add").Inc() n.l.Lock() defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "add").Dec() + notifierWaitForLock.WithLabelValues("add").Observe(time.Since(start).Seconds()) + + // If a channel exists, it means the node has opened a new + // connection. Close the old channel and replace it. + if curr, ok := n.nodes[nodeID]; ok { + n.tracef(nodeID, "channel present, closing and replacing") + close(curr) + } + + n.nodes[nodeID] = c + n.connected.Store(nodeID, true) + + n.tracef(nodeID, "added new channel") + notifierNodeUpdateChans.Inc() +} + +// RemoveNode removes a node and a given channel from the notifier. +// It checks that the channel is the same as currently being updated +// and ignores the removal if it is not. +// RemoveNode reports if the node/chan was removed. +func (n *Notifier) RemoveNode(nodeID types.NodeID, c chan<- types.StateUpdate) bool { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "remove").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "remove").Dec() + notifierWaitForLock.WithLabelValues("remove").Observe(time.Since(start).Seconds()) if len(n.nodes) == 0 { - return + return true } - delete(n.nodes, machineKey.String()) - n.connected[machineKey] = false + // If the channel exist, but it does not belong + // to the caller, ignore. + if curr, ok := n.nodes[nodeID]; ok { + if curr != c { + n.tracef(nodeID, "channel has been replaced, not removing") + return false + } + } - log.Trace(). - Str("machine_key", machineKey.ShortString()). - Int("open_chans", len(n.nodes)). - Msg("Removed channel") + delete(n.nodes, nodeID) + n.connected.Store(nodeID, false) + + n.tracef(nodeID, "removed channel") + notifierNodeUpdateChans.Dec() + + return true } // IsConnected reports if a node is connected to headscale and has a // poll session open. -func (n *Notifier) IsConnected(machineKey key.MachinePublic) bool { - n.l.RLock() - defer n.l.RUnlock() +func (n *Notifier) IsConnected(nodeID types.NodeID) bool { + notifierWaitersForLock.WithLabelValues("lock", "conncheck").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "conncheck").Dec() - return n.connected[machineKey] + if val, ok := n.connected.Load(nodeID); ok { + return val + } + return false } -// TODO(kradalby): This returns a pointer and can be dangerous. -func (n *Notifier) ConnectedMap() map[key.MachinePublic]bool { +// IsLikelyConnected reports if a node is connected to headscale and has a +// poll session open, but doesnt lock, so might be wrong. +func (n *Notifier) IsLikelyConnected(nodeID types.NodeID) bool { + if val, ok := n.connected.Load(nodeID); ok { + return val + } + return false +} + +func (n *Notifier) LikelyConnectedMap() *xsync.MapOf[types.NodeID, bool] { return n.connected } @@ -88,86 +150,288 @@ func (n *Notifier) NotifyAll(ctx context.Context, update types.StateUpdate) { func (n *Notifier) NotifyWithIgnore( ctx context.Context, update types.StateUpdate, - ignore ...string, + ignoreNodeIDs ...types.NodeID, ) { - log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify") - defer log.Trace(). - Caller(). - Interface("type", update.Type). - Msg("releasing lock, finished notifying") + notifierUpdateReceived.WithLabelValues(update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc() + n.b.addOrPassthrough(update) +} - n.l.RLock() - defer n.l.RUnlock() - - for key, c := range n.nodes { - if util.IsStringInSlice(ignore, key) { - continue - } +func (n *Notifier) NotifyByNodeID( + ctx context.Context, + update types.StateUpdate, + nodeID types.NodeID, +) { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "notify").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "notify").Dec() + notifierWaitForLock.WithLabelValues("notify").Observe(time.Since(start).Seconds()) + if c, ok := n.nodes[nodeID]; ok { select { case <-ctx.Done(): log.Error(). Err(ctx.Err()). - Str("mkey", key). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). + Uint64("node.id", nodeID.Uint64()). + Any("origin", types.NotifyOriginKey.Value(ctx)). + Any("origin-hostname", types.NotifyHostnameKey.Value(ctx)). Msgf("update not sent, context cancelled") + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc() + } return case c <- update: - log.Trace(). - Str("mkey", key). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). - Msgf("update successfully sent on chan") + n.tracef(nodeID, "update successfully sent on chan, origin: %s, origin-hostname: %s", ctx.Value("origin"), ctx.Value("hostname")) + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx), nodeID.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), types.NotifyOriginKey.Value(ctx)).Inc() + } } } } -func (n *Notifier) NotifyByMachineKey( - ctx context.Context, - update types.StateUpdate, - mKey key.MachinePublic, -) { - log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify") - defer log.Trace(). - Caller(). - Interface("type", update.Type). - Msg("releasing lock, finished notifying") +func (n *Notifier) sendAll(update types.StateUpdate) { + start := time.Now() + notifierWaitersForLock.WithLabelValues("lock", "send-all").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "send-all").Dec() + notifierWaitForLock.WithLabelValues("send-all").Observe(time.Since(start).Seconds()) - n.l.RLock() - defer n.l.RUnlock() - - if c, ok := n.nodes[mKey.String()]; ok { + for id, c := range n.nodes { + // Whenever an update is sent to all nodes, there is a chance that the node + // has disconnected and the goroutine that was supposed to consume the update + // has shut down the channel and is waiting for the lock held here in RemoveNode. + // This means that there is potential for a deadlock which would stop all updates + // going out to clients. This timeout prevents that from happening by moving on to the + // next node if the context is cancelled. Afther sendAll releases the lock, the add/remove + // call will succeed and the update will go to the correct nodes on the next call. + ctx, cancel := context.WithTimeout(context.Background(), n.cfg.Tuning.NotifierSendTimeout) + defer cancel() select { case <-ctx.Done(): log.Error(). Err(ctx.Err()). - Str("mkey", mKey.String()). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). + Uint64("node.id", id.Uint64()). Msgf("update not sent, context cancelled") + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all", id.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("cancelled", update.Type.String(), "send-all").Inc() + } return case c <- update: - log.Trace(). - Str("mkey", mKey.String()). - Any("origin", ctx.Value("origin")). - Any("hostname", ctx.Value("hostname")). - Msgf("update successfully sent on chan") + if debugHighCardinalityMetrics { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all", id.String()).Inc() + } else { + notifierUpdateSent.WithLabelValues("ok", update.Type.String(), "send-all").Inc() + } } } } func (n *Notifier) String() string { - n.l.RLock() - defer n.l.RUnlock() + notifierWaitersForLock.WithLabelValues("lock", "string").Inc() + n.l.Lock() + defer n.l.Unlock() + notifierWaitersForLock.WithLabelValues("lock", "string").Dec() - str := []string{"Notifier, in map:\n"} + var b strings.Builder + fmt.Fprintf(&b, "chans (%d):\n", len(n.nodes)) - for k, v := range n.nodes { - str = append(str, fmt.Sprintf("\t%s: %v\n", k, v)) + var keys []types.NodeID + n.connected.Range(func(key types.NodeID, value bool) bool { + keys = append(keys, key) + return true + }) + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + for _, key := range keys { + fmt.Fprintf(&b, "\t%d: %p\n", key, n.nodes[key]) } - return strings.Join(str, "") + b.WriteString("\n") + fmt.Fprintf(&b, "connected (%d):\n", len(n.nodes)) + + for _, key := range keys { + val, _ := n.connected.Load(key) + fmt.Fprintf(&b, "\t%d: %t\n", key, val) + } + + return b.String() +} + +type batcher struct { + tick *time.Ticker + + mu sync.Mutex + + cancelCh chan struct{} + + changedNodeIDs set.Slice[types.NodeID] + nodesChanged bool + patches map[types.NodeID]tailcfg.PeerChange + patchesChanged bool + + n *Notifier +} + +func newBatcher(batchTime time.Duration, n *Notifier) *batcher { + return &batcher{ + tick: time.NewTicker(batchTime), + cancelCh: make(chan struct{}), + patches: make(map[types.NodeID]tailcfg.PeerChange), + n: n, + } + +} + +func (b *batcher) close() { + b.cancelCh <- struct{}{} +} + +// addOrPassthrough adds the update to the batcher, if it is not a +// type that is currently batched, it will be sent immediately. +func (b *batcher) addOrPassthrough(update types.StateUpdate) { + notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Inc() + b.mu.Lock() + defer b.mu.Unlock() + notifierBatcherWaitersForLock.WithLabelValues("lock", "add").Dec() + + switch update.Type { + case types.StatePeerChanged: + b.changedNodeIDs.Add(update.ChangeNodes...) + b.nodesChanged = true + notifierBatcherChanges.WithLabelValues().Set(float64(b.changedNodeIDs.Len())) + + case types.StatePeerChangedPatch: + for _, newPatch := range update.ChangePatches { + if curr, ok := b.patches[types.NodeID(newPatch.NodeID)]; ok { + overwritePatch(&curr, newPatch) + b.patches[types.NodeID(newPatch.NodeID)] = curr + } else { + b.patches[types.NodeID(newPatch.NodeID)] = *newPatch + } + } + b.patchesChanged = true + notifierBatcherPatches.WithLabelValues().Set(float64(len(b.patches))) + + default: + b.n.sendAll(update) + } +} + +// flush sends all the accumulated patches to all +// nodes in the notifier. +func (b *batcher) flush() { + notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Inc() + b.mu.Lock() + defer b.mu.Unlock() + notifierBatcherWaitersForLock.WithLabelValues("lock", "flush").Dec() + + if b.nodesChanged || b.patchesChanged { + var patches []*tailcfg.PeerChange + // If a node is getting a full update from a change + // node update, then the patch can be dropped. + for nodeID, patch := range b.patches { + if b.changedNodeIDs.Contains(nodeID) { + delete(b.patches, nodeID) + } else { + patches = append(patches, &patch) + } + } + + changedNodes := b.changedNodeIDs.Slice().AsSlice() + sort.Slice(changedNodes, func(i, j int) bool { + return changedNodes[i] < changedNodes[j] + }) + + if b.changedNodeIDs.Slice().Len() > 0 { + update := types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: changedNodes, + } + + b.n.sendAll(update) + } + + if len(patches) > 0 { + patchUpdate := types.StateUpdate{ + Type: types.StatePeerChangedPatch, + ChangePatches: patches, + } + + b.n.sendAll(patchUpdate) + } + + b.changedNodeIDs = set.Slice[types.NodeID]{} + notifierBatcherChanges.WithLabelValues().Set(0) + b.nodesChanged = false + b.patches = make(map[types.NodeID]tailcfg.PeerChange, len(b.patches)) + notifierBatcherPatches.WithLabelValues().Set(0) + b.patchesChanged = false + } +} + +func (b *batcher) doWork() { + for { + select { + case <-b.cancelCh: + return + case <-b.tick.C: + b.flush() + } + } +} + +// overwritePatch takes the current patch and a newer patch +// and override any field that has changed +func overwritePatch(currPatch, newPatch *tailcfg.PeerChange) { + if newPatch.DERPRegion != 0 { + currPatch.DERPRegion = newPatch.DERPRegion + } + + if newPatch.Cap != 0 { + currPatch.Cap = newPatch.Cap + } + + if newPatch.CapMap != nil { + currPatch.CapMap = newPatch.CapMap + } + + if newPatch.Endpoints != nil { + currPatch.Endpoints = newPatch.Endpoints + } + + if newPatch.Key != nil { + currPatch.Key = newPatch.Key + } + + if newPatch.KeySignature != nil { + currPatch.KeySignature = newPatch.KeySignature + } + + if newPatch.DiscoKey != nil { + currPatch.DiscoKey = newPatch.DiscoKey + } + + if newPatch.Online != nil { + currPatch.Online = newPatch.Online + } + + if newPatch.LastSeen != nil { + currPatch.LastSeen = newPatch.LastSeen + } + + if newPatch.KeyExpiry != nil { + currPatch.KeyExpiry = newPatch.KeyExpiry + } } diff --git a/hscontrol/notifier/notifier_test.go b/hscontrol/notifier/notifier_test.go new file mode 100644 index 00000000..8841a46d --- /dev/null +++ b/hscontrol/notifier/notifier_test.go @@ -0,0 +1,249 @@ +package notifier + +import ( + "context" + "net/netip" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "tailscale.com/tailcfg" +) + +func TestBatcher(t *testing.T) { + tests := []struct { + name string + updates []types.StateUpdate + want []types.StateUpdate + }{ + { + name: "full-passthrough", + updates: []types.StateUpdate{ + { + Type: types.StateFullUpdate, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StateFullUpdate, + }, + }, + }, + { + name: "derp-passthrough", + updates: []types.StateUpdate{ + { + Type: types.StateDERPUpdated, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StateDERPUpdated, + }, + }, + }, + { + name: "single-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, + }, + }, + }, + }, + { + name: "merge-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, 4, + }, + }, + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, 3, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{ + 2, 3, 4, + }, + }, + }, + }, + { + name: "single-patch-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 5, + }, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 5, + }, + }, + }, + }, + }, + { + name: "merge-patch-to-same-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 5, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 6, + }, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 2, + DERPRegion: 6, + }, + }, + }, + }, + }, + { + name: "merge-patch-to-multiple-node-update", + updates: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 3, + Endpoints: []netip.AddrPort{ + netip.MustParseAddrPort("1.1.1.1:9090"), + }, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 3, + Endpoints: []netip.AddrPort{ + netip.MustParseAddrPort("1.1.1.1:9090"), + netip.MustParseAddrPort("2.2.2.2:8080"), + }, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 4, + DERPRegion: 6, + }, + }, + }, + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 4, + Cap: tailcfg.CapabilityVersion(54), + }, + }, + }, + }, + want: []types.StateUpdate{ + { + Type: types.StatePeerChangedPatch, + ChangePatches: []*tailcfg.PeerChange{ + { + NodeID: 3, + Endpoints: []netip.AddrPort{ + netip.MustParseAddrPort("1.1.1.1:9090"), + netip.MustParseAddrPort("2.2.2.2:8080"), + }, + }, + { + NodeID: 4, + DERPRegion: 6, + Cap: tailcfg.CapabilityVersion(54), + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := NewNotifier(&types.Config{ + Tuning: types.Tuning{ + // We will call flush manually for the tests, + // so do not run the worker. + BatchChangeDelay: time.Hour, + }, + }) + + ch := make(chan types.StateUpdate, 30) + defer close(ch) + n.AddNode(1, ch) + defer n.RemoveNode(1, ch) + + for _, u := range tt.updates { + n.NotifyAll(context.Background(), u) + } + + n.b.flush() + + var got []types.StateUpdate + for len(ch) > 0 { + out := <-ch + got = append(got, out) + } + + if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { + t.Errorf("batcher() unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/hscontrol/oidc.go b/hscontrol/oidc.go index 318aadae..b728a6d0 100644 --- a/hscontrol/oidc.go +++ b/hscontrol/oidc.go @@ -58,12 +58,7 @@ func (h *Headscale) initOIDC() error { h.oidcProvider, err = oidc.NewProvider(context.Background(), h.cfg.OIDC.Issuer) if err != nil { - log.Error(). - Err(err). - Caller(). - Msgf("Could not retrieve OIDC Config: %s", err.Error()) - - return err + return fmt.Errorf("creating OIDC provider from issuer config: %w", err) } h.oauth2Config = &oauth2.Config{ @@ -514,12 +509,6 @@ func (h *Headscale) validateNodeForOIDCCallback( User: claims.Email, Verb: "Reauthenticated", }); err != nil { - log.Error(). - Str("func", "OIDCCallback"). - Str("type", "reauthenticate"). - Err(err). - Msg("Could not render OIDC callback template") - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("Could not render OIDC callback template")) @@ -527,7 +516,7 @@ func (h *Headscale) validateNodeForOIDCCallback( util.LogErr(err, "Failed to write response") } - return nil, true, err + return nil, true, fmt.Errorf("rendering OIDC callback template: %w", err) } writer.Header().Set("Content-Type", "text/html; charset=utf-8") @@ -537,11 +526,8 @@ func (h *Headscale) validateNodeForOIDCCallback( util.LogErr(err, "Failed to write response") } - stateUpdate := types.StateUpdateExpire(node.ID, expiry) - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "oidc-expiry", "na") - h.nodeNotifier.NotifyWithIgnore(ctx, stateUpdate, node.MachineKey.String()) - } + ctx := types.NotifyCtx(context.Background(), "oidc-expiry", "na") + h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, expiry), node.ID) return nil, true, nil } @@ -582,10 +568,6 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback( if errors.Is(err, db.ErrUserNotFound) { user, err = h.db.CreateUser(userName) if err != nil { - log.Error(). - Err(err). - Caller(). - Msgf("could not create new user '%s'", userName) writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("could not create user")) @@ -593,14 +575,9 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback( util.LogErr(err, "Failed to write response") } - return nil, err + return nil, fmt.Errorf("creating new user: %w", err) } } else if err != nil { - log.Error(). - Caller(). - Err(err). - Str("user", userName). - Msg("could not find or create user") writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("could not find or create user")) @@ -608,7 +585,7 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback( util.LogErr(err, "Failed to write response") } - return nil, err + return nil, fmt.Errorf("find or create user: %w", err) } return user, nil @@ -620,12 +597,12 @@ func (h *Headscale) registerNodeForOIDCCallback( machineKey *key.MachinePublic, expiry time.Time, ) error { - addrs, err := h.ipAlloc.Next() + ipv4, ipv6, err := h.ipAlloc.Next() if err != nil { return err } - if err := h.db.DB.Transaction(func(tx *gorm.DB) error { + if err := h.db.Write(func(tx *gorm.DB) error { if _, err := db.RegisterNodeFromAuthCallback( // TODO(kradalby): find a better way to use the cache across modules tx, @@ -634,7 +611,7 @@ func (h *Headscale) registerNodeForOIDCCallback( user.Name, &expiry, util.RegisterMethodOIDC, - addrs, + ipv4, ipv6, ); err != nil { return err } @@ -664,12 +641,6 @@ func renderOIDCCallbackTemplate( User: claims.Email, Verb: "Authenticated", }); err != nil { - log.Error(). - Str("func", "OIDCCallback"). - Str("type", "authenticate"). - Err(err). - Msg("Could not render OIDC callback template") - writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.WriteHeader(http.StatusInternalServerError) _, werr := writer.Write([]byte("Could not render OIDC callback template")) @@ -677,7 +648,7 @@ func renderOIDCCallbackTemplate( util.LogErr(err, "Failed to write response") } - return nil, err + return nil, fmt.Errorf("rendering OIDC callback template: %w", err) } return &content, nil diff --git a/hscontrol/policy/acls.go b/hscontrol/policy/acls.go index 2ccc56b4..1196995d 100644 --- a/hscontrol/policy/acls.go +++ b/hscontrol/policy/acls.go @@ -36,6 +36,38 @@ const ( expectedTokenItems = 2 ) +var theInternetSet *netipx.IPSet + +// theInternet returns the IPSet for the Internet. +// https://www.youtube.com/watch?v=iDbyYGrswtg +func theInternet() *netipx.IPSet { + if theInternetSet != nil { + return theInternetSet + } + + var internetBuilder netipx.IPSetBuilder + internetBuilder.AddPrefix(netip.MustParsePrefix("2000::/3")) + internetBuilder.AddPrefix(netip.MustParsePrefix("0.0.0.0/0")) + + // Delete Private network addresses + // https://datatracker.ietf.org/doc/html/rfc1918 + internetBuilder.RemovePrefix(netip.MustParsePrefix("fc00::/7")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("10.0.0.0/8")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("172.16.0.0/12")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("192.168.0.0/16")) + + // Delete Tailscale networks + internetBuilder.RemovePrefix(netip.MustParsePrefix("fd7a:115c:a1e0::/48")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("100.64.0.0/10")) + + // Delete "cant find DHCP networks" + internetBuilder.RemovePrefix(netip.MustParsePrefix("fe80::/10")) // link-loca + internetBuilder.RemovePrefix(netip.MustParsePrefix("169.254.0.0/16")) + + theInternetSet, _ := internetBuilder.IPSet() + return theInternetSet +} + // For some reason golang.org/x/net/internal/iana is an internal package. const ( protocolICMP = 1 // Internet Control Message @@ -114,7 +146,7 @@ func LoadACLPolicyFromBytes(acl []byte, format string) (*ACLPolicy, error) { return &policy, nil } -func GenerateFilterAndSSHRules( +func GenerateFilterAndSSHRulesForTests( policy *ACLPolicy, node *types.Node, peers types.Nodes, @@ -124,40 +156,31 @@ func GenerateFilterAndSSHRules( return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil } - rules, err := policy.generateFilterRules(node, peers) + rules, err := policy.CompileFilterRules(append(peers, node)) if err != nil { return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err } log.Trace().Interface("ACL", rules).Str("node", node.GivenName).Msg("ACL rules") - var sshPolicy *tailcfg.SSHPolicy - sshRules, err := policy.generateSSHRules(node, peers) + sshPolicy, err := policy.CompileSSHPolicy(node, peers) if err != nil { return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err } - log.Trace(). - Interface("SSH", sshRules). - Str("node", node.GivenName). - Msg("SSH rules") - - if sshPolicy == nil { - sshPolicy = &tailcfg.SSHPolicy{} - } - sshPolicy.Rules = sshRules - return rules, sshPolicy, nil } -// generateFilterRules takes a set of nodes and an ACLPolicy and generates a +// CompileFilterRules takes a set of nodes and an ACLPolicy and generates a // set of Tailscale compatible FilterRules used to allow traffic on clients. -func (pol *ACLPolicy) generateFilterRules( - node *types.Node, - peers types.Nodes, +func (pol *ACLPolicy) CompileFilterRules( + nodes types.Nodes, ) ([]tailcfg.FilterRule, error) { + if pol == nil { + return tailcfg.FilterAllowAll, nil + } + rules := []tailcfg.FilterRule{} - nodes := append(peers, node) for index, acl := range pol.ACLs { if acl.Action != "accept" { @@ -168,23 +191,14 @@ func (pol *ACLPolicy) generateFilterRules( for srcIndex, src := range acl.Sources { srcs, err := pol.expandSource(src, nodes) if err != nil { - log.Error(). - Interface("src", src). - Int("ACL index", index). - Int("Src index", srcIndex). - Msgf("Error parsing ACL") - - return nil, err + return nil, fmt.Errorf("parsing policy, acl index: %d->%d: %w", index, srcIndex, err) } srcIPs = append(srcIPs, srcs...) } protocols, isWildcard, err := parseProtocol(acl.Protocol) if err != nil { - log.Error(). - Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol) - - return nil, err + return nil, fmt.Errorf("parsing policy, protocol err: %w ", err) } destPorts := []tailcfg.NetPortRange{} @@ -239,28 +253,28 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F // record if the rule is actually relevant for the given node. dests := []tailcfg.NetPortRange{} + DEST_LOOP: for _, dest := range rule.DstPorts { expanded, err := util.ParseIPSet(dest.IP, nil) // Fail closed, if we cant parse it, then we should not allow // access. if err != nil { - continue + continue DEST_LOOP } - if node.IPAddresses.InIPSet(expanded) { + if node.InIPSet(expanded) { dests = append(dests, dest) + continue DEST_LOOP } // If the node exposes routes, ensure they are note removed // when the filters are reduced. if node.Hostinfo != nil { - // TODO(kradalby): Evaluate if we should only keep - // the routes if the route is enabled. This will - // require database access in this part of the code. if len(node.Hostinfo.RoutableIPs) > 0 { for _, routableIP := range node.Hostinfo.RoutableIPs { - if expanded.ContainsPrefix(routableIP) { + if expanded.OverlapsPrefix(routableIP) { dests = append(dests, dest) + continue DEST_LOOP } } } @@ -279,10 +293,14 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F return ret } -func (pol *ACLPolicy) generateSSHRules( +func (pol *ACLPolicy) CompileSSHPolicy( node *types.Node, peers types.Nodes, -) ([]*tailcfg.SSHRule, error) { +) (*tailcfg.SSHPolicy, error) { + if pol == nil { + return nil, nil + } + rules := []*tailcfg.SSHRule{} acceptAction := tailcfg.SSHAction{ @@ -320,7 +338,7 @@ func (pol *ACLPolicy) generateSSHRules( return nil, err } - if !node.IPAddresses.InIPSet(destSet) { + if !node.InIPSet(destSet) { continue } @@ -331,16 +349,12 @@ func (pol *ACLPolicy) generateSSHRules( case "check": checkAction, err := sshCheckAction(sshACL.CheckPeriod) if err != nil { - log.Error(). - Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod) + return nil, fmt.Errorf("parsing SSH policy, parsing check duration, index: %d: %w", index, err) } else { action = *checkAction } default: - log.Error(). - Msgf("Error parsing SSH %d, unknown action '%s', skipping", index, sshACL.Action) - - continue + return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", sshACL.Action, index, err) } principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources)) @@ -352,10 +366,7 @@ func (pol *ACLPolicy) generateSSHRules( } else if isGroup(rawSrc) { users, err := pol.expandUsersFromGroup(rawSrc) if err != nil { - log.Error(). - Msgf("Error parsing SSH %d, Source %d", index, innerIndex) - - return nil, err + return nil, fmt.Errorf("parsing SSH policy, expanding user from group, index: %d->%d: %w", index, innerIndex, err) } for _, user := range users { @@ -369,10 +380,7 @@ func (pol *ACLPolicy) generateSSHRules( rawSrc, ) if err != nil { - log.Error(). - Msgf("Error parsing SSH %d, Source %d", index, innerIndex) - - return nil, err + return nil, fmt.Errorf("parsing SSH policy, expanding alias, index: %d->%d: %w", index, innerIndex, err) } for _, expandedSrc := range expandedSrcs.Prefixes() { principals = append(principals, &tailcfg.SSHPrincipal{ @@ -393,7 +401,9 @@ func (pol *ACLPolicy) generateSSHRules( }) } - return rules, nil + return &tailcfg.SSHPolicy{ + Rules: rules, + }, nil } func sshCheckAction(duration string) (*tailcfg.SSHAction, error) { @@ -502,7 +512,7 @@ func parseProtocol(protocol string) ([]int, bool, error) { default: protocolNumber, err := strconv.Atoi(protocol) if err != nil { - return nil, false, err + return nil, false, fmt.Errorf("parsing protocol number: %w", err) } needsWildcard := protocolNumber != protocolTCP && protocolNumber != protocolUDP && @@ -539,6 +549,7 @@ func (pol *ACLPolicy) expandSource( // - a host // - an ip // - a cidr +// - an autogroup // and transform these in IPAddresses. func (pol *ACLPolicy) ExpandAlias( nodes types.Nodes, @@ -564,6 +575,10 @@ func (pol *ACLPolicy) ExpandAlias( return pol.expandIPsFromTag(alias, nodes) } + if isAutoGroup(alias) { + return expandAutoGroup(alias) + } + // if alias is a user if ips, err := pol.expandIPsFromUser(alias, nodes); ips != nil { return ips, err @@ -766,7 +781,7 @@ func (pol *ACLPolicy) expandIPsFromGroup( for _, user := range users { filteredNodes := filterNodesByUser(nodes, user) for _, node := range filteredNodes { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } @@ -782,7 +797,7 @@ func (pol *ACLPolicy) expandIPsFromTag( // check for forced tags for _, node := range nodes { if util.StringOrPrefixListContains(node.ForcedTags, alias) { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } @@ -814,7 +829,7 @@ func (pol *ACLPolicy) expandIPsFromTag( } if util.StringOrPrefixListContains(node.Hostinfo.RequestTags, alias) { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } } @@ -837,7 +852,7 @@ func (pol *ACLPolicy) expandIPsFromUser( } for _, node := range filteredNodes { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } return build.IPSet() @@ -855,7 +870,7 @@ func (pol *ACLPolicy) expandIPsFromSingleIP( build.Add(ip) for _, node := range matches { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } return build.IPSet() @@ -872,11 +887,11 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix( // This is suboptimal and quite expensive, but if we only add the prefix, we will miss all the relevant IPv6 // addresses for the hosts that belong to tailscale. This doesnt really affect stuff like subnet routers. for _, node := range nodes { - for _, ip := range node.IPAddresses { + for _, ip := range node.IPs() { // log.Trace(). // Msgf("checking if node ip (%s) is part of prefix (%s): %v, is single ip prefix (%v), addr: %s", ip.String(), prefix.String(), prefix.Contains(ip), prefix.IsSingleIP(), prefix.Addr().String()) if prefix.Contains(ip) { - node.IPAddresses.AppendToIPSet(&build) + node.AppendToIPSet(&build) } } } @@ -884,6 +899,16 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix( return build.IPSet() } +func expandAutoGroup(alias string) (*netipx.IPSet, error) { + switch { + case strings.HasPrefix(alias, "autogroup:internet"): + return theInternet(), nil + + default: + return nil, fmt.Errorf("unknown autogroup %q", alias) + } +} + func isWildcard(str string) bool { return str == "*" } @@ -896,6 +921,10 @@ func isTag(str string) bool { return strings.HasPrefix(str, "tag:") } +func isAutoGroup(str string) bool { + return strings.HasPrefix(str, "autogroup:") +} + // TagsOfNode will return the tags of the current node. // Invalid tags are tags added by a user on a node, and that user doesn't have authority to add this tag. // Valid tags are tags added by a user that is allowed in the ACL policy to add this tag. diff --git a/hscontrol/policy/acls_test.go b/hscontrol/policy/acls_test.go index ff18dd05..b0cafe10 100644 --- a/hscontrol/policy/acls_test.go +++ b/hscontrol/policy/acls_test.go @@ -16,6 +16,11 @@ import ( "tailscale.com/tailcfg" ) +var iap = func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip +} + func Test(t *testing.T) { check.TestingT(t) } @@ -385,15 +390,12 @@ acls: return } - rules, err := pol.generateFilterRules(&types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.100.100.100"), - }, - }, types.Nodes{ + rules, err := pol.CompileFilterRules(types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("200.200.200.200"), - }, + IPv4: iap("100.100.100.100"), + }, + &types.Node{ + IPv4: iap("200.200.200.200"), User: types.User{ Name: "testuser", }, @@ -530,7 +532,7 @@ func (s *Suite) TestRuleInvalidGeneration(c *check.C) { "example-host-2:80" ], "deny": [ - "exapmle-host-2:100" + "example-host-2:100" ], }, { @@ -546,7 +548,7 @@ func (s *Suite) TestRuleInvalidGeneration(c *check.C) { c.Assert(pol.ACLs, check.HasLen, 6) c.Assert(err, check.IsNil) - rules, err := pol.generateFilterRules(&types.Node{}, types.Nodes{}) + rules, err := pol.CompileFilterRules(types.Nodes{}) c.Assert(err, check.NotNil) c.Assert(rules, check.IsNil) } @@ -562,7 +564,7 @@ func (s *Suite) TestInvalidAction(c *check.C) { }, }, } - _, _, err := GenerateFilterAndSSHRules(pol, &types.Node{}, types.Nodes{}) + _, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}) c.Assert(errors.Is(err, ErrInvalidAction), check.Equals, true) } @@ -581,7 +583,7 @@ func (s *Suite) TestInvalidGroupInGroup(c *check.C) { }, }, } - _, _, err := GenerateFilterAndSSHRules(pol, &types.Node{}, types.Nodes{}) + _, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}) c.Assert(errors.Is(err, ErrInvalidGroup), check.Equals, true) } @@ -597,7 +599,7 @@ func (s *Suite) TestInvalidTagOwners(c *check.C) { }, } - _, _, err := GenerateFilterAndSSHRules(pol, &types.Node{}, types.Nodes{}) + _, _, err := GenerateFilterAndSSHRulesForTests(pol, &types.Node{}, types.Nodes{}) c.Assert(errors.Is(err, ErrInvalidTag), check.Equals, true) } @@ -633,7 +635,7 @@ func Test_expandGroup(t *testing.T) { wantErr: false, }, { - name: "InexistantGroup", + name: "InexistentGroup", field: field{ pol: ACLPolicy{ Groups: Groups{ @@ -996,12 +998,10 @@ func Test_expandAlias(t *testing.T) { alias: "*", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, + IPv4: iap("100.64.0.1"), }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.78.84.227"), - }, + IPv4: iap("100.78.84.227"), }, }, }, @@ -1022,27 +1022,19 @@ func Test_expandAlias(t *testing.T) { alias: "group:accountant", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1063,27 +1055,19 @@ func Test_expandAlias(t *testing.T) { alias: "group:hr", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1128,9 +1112,7 @@ func Test_expandAlias(t *testing.T) { alias: "10.0.0.1", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("10.0.0.1"), - }, + IPv4: iap("10.0.0.1"), User: types.User{Name: "mickael"}, }, }, @@ -1149,10 +1131,8 @@ func Test_expandAlias(t *testing.T) { alias: "10.0.0.1", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("10.0.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("10.0.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1171,10 +1151,8 @@ func Test_expandAlias(t *testing.T) { alias: "fd7a:115c:a1e0:ab12:4843:2222:6273:2222", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("10.0.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("10.0.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1240,9 +1218,7 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1251,9 +1227,7 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1262,15 +1236,11 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, }, }, @@ -1294,27 +1264,19 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1331,29 +1293,21 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:hr-webserver"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:hr-webserver"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1374,16 +1328,12 @@ func Test_expandAlias(t *testing.T) { alias: "tag:hr-webserver", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:hr-webserver"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1392,15 +1342,11 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "mickael"}, }, }, @@ -1419,9 +1365,7 @@ func Test_expandAlias(t *testing.T) { alias: "joe", nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1430,9 +1374,7 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1441,16 +1383,12 @@ func Test_expandAlias(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + IPv4: iap("100.64.0.3"), User: types.User{Name: "marc"}, Hostinfo: &tailcfg.Hostinfo{}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1498,9 +1436,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1509,9 +1445,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1520,9 +1454,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1531,9 +1463,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, - User: types.User{Name: "joe"}, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.4"), + User: types.User{Name: "joe"}, + Hostinfo: &tailcfg.Hostinfo{}, }, }, }, @@ -1550,9 +1482,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1561,9 +1491,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1572,9 +1500,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1583,9 +1509,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, - User: types.User{Name: "joe"}, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.4"), + User: types.User{Name: "joe"}, + Hostinfo: &tailcfg.Hostinfo{}, }, }, }, @@ -1597,9 +1523,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1608,17 +1532,13 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, ForcedTags: []string{"tag:accountant-webserver"}, Hostinfo: &tailcfg.Hostinfo{}, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1627,9 +1547,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, - User: types.User{Name: "joe"}, - Hostinfo: &tailcfg.Hostinfo{}, + IPv4: iap("100.64.0.4"), + User: types.User{Name: "joe"}, + Hostinfo: &tailcfg.Hostinfo{}, }, }, }, @@ -1641,9 +1561,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1652,9 +1570,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1663,9 +1579,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1674,9 +1588,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, want: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1685,9 +1597,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + IPv4: iap("100.64.0.2"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{ OS: "centos", @@ -1696,9 +1606,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) { }, }, &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - }, + IPv4: iap("100.64.0.4"), User: types.User{Name: "joe"}, Hostinfo: &tailcfg.Hostinfo{}, }, @@ -1724,8 +1632,7 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { pol ACLPolicy } type args struct { - node *types.Node - peers types.Nodes + nodes types.Nodes } tests := []struct { name string @@ -1755,13 +1662,12 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { }, }, args: args{ - node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), + nodes: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), }, }, - peers: types.Nodes{}, }, want: []tailcfg.FilterRule{ { @@ -1800,19 +1706,15 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { }, }, args: args{ - node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), - }, - User: types.User{Name: "mickael"}, - }, - peers: types.Nodes{ + nodes: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), + User: types.User{Name: "mickael"}, + }, + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1846,9 +1748,8 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.field.pol.generateFilterRules( - tt.args.node, - tt.args.peers, + got, err := tt.field.pol.CompileFilterRules( + tt.args.nodes, ) if (err != nil) != tt.wantErr { t.Errorf("ACLgenerateFilterRules() error = %v, wantErr %v", err, tt.wantErr) @@ -1864,6 +1765,108 @@ func TestACLPolicy_generateFilterRules(t *testing.T) { } } +// tsExitNodeDest is the list of destination IP ranges that are allowed when +// you dump the filter list from a Tailscale node connected to Tailscale SaaS. +var tsExitNodeDest = []tailcfg.NetPortRange{ + { + IP: "0.0.0.0-9.255.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "11.0.0.0-100.63.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "100.128.0.0-169.253.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "169.255.0.0-172.15.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "172.32.0.0-192.167.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "192.169.0.0-255.255.255.255", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "2000::-3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + Ports: tailcfg.PortRangeAny, + }, +} + +// hsExitNodeDest is the list of destination IP ranges that are allowed when +// we use headscale "autogroup:internet" +var hsExitNodeDest = []tailcfg.NetPortRange{ + {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "2000::/3", Ports: tailcfg.PortRangeAny}, +} + +func TestTheInternet(t *testing.T) { + internetSet := theInternet() + + internetPrefs := internetSet.Prefixes() + + for i, _ := range internetPrefs { + if internetPrefs[i].String() != hsExitNodeDest[i].IP { + t.Errorf("prefix from internet set %q != hsExit list %q", internetPrefs[i].String(), hsExitNodeDest[i].IP) + } + } + + if len(internetPrefs) != len(hsExitNodeDest) { + t.Fatalf("expected same length of prefixes, internet: %d, hsExit: %d", len(internetPrefs), len(hsExitNodeDest)) + } +} + func TestReduceFilterRules(t *testing.T) { tests := []struct { name string @@ -1884,18 +1887,14 @@ func TestReduceFilterRules(t *testing.T) { }, }, node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), - }, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), User: types.User{Name: "mickael"}, }, peers: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - }, + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), User: types.User{Name: "mickael"}, }, }, @@ -1921,10 +1920,8 @@ func TestReduceFilterRules(t *testing.T) { }, }, node: &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0::1"), - }, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), User: types.User{Name: "user1"}, Hostinfo: &tailcfg.Hostinfo{ RoutableIPs: []netip.Prefix{ @@ -1934,10 +1931,8 @@ func TestReduceFilterRules(t *testing.T) { }, peers: types.Nodes{ &types.Node{ - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0::2"), - }, + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), User: types.User{Name: "user1"}, }, }, @@ -1976,16 +1971,473 @@ func TestReduceFilterRules(t *testing.T) { }, }, }, + { + name: "1786-reducing-breaks-exit-nodes-the-client", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "autogroup:internet:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + // "internal" exit node + &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{types.ExitRouteV4, types.ExitRouteV6}, + }, + }, + }, + want: []tailcfg.FilterRule{}, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-exit", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "autogroup:internet:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{types.ExitRouteV4, types.ExitRouteV6}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: hsExitNodeDest, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-example-from-issue", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "0.0.0.0/5:*", + "8.0.0.0/7:*", + "11.0.0.0/8:*", + "12.0.0.0/6:*", + "16.0.0.0/4:*", + "32.0.0.0/3:*", + "64.0.0.0/2:*", + "128.0.0.0/3:*", + "160.0.0.0/5:*", + "168.0.0.0/6:*", + "172.0.0.0/12:*", + "172.32.0.0/11:*", + "172.64.0.0/10:*", + "172.128.0.0/9:*", + "173.0.0.0/8:*", + "174.0.0.0/7:*", + "176.0.0.0/4:*", + "192.0.0.0/9:*", + "192.128.0.0/11:*", + "192.160.0.0/13:*", + "192.169.0.0/16:*", + "192.170.0.0/15:*", + "192.172.0.0/14:*", + "192.176.0.0/12:*", + "192.192.0.0/10:*", + "193.0.0.0/8:*", + "194.0.0.0/7:*", + "196.0.0.0/6:*", + "200.0.0.0/5:*", + "208.0.0.0/4:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{types.ExitRouteV4, types.ExitRouteV6}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::100/128", Ports: tailcfg.PortRangeAny}, + {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-app-connector-like", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "8.0.0.0/8:*", + "16.0.0.0/8:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "8.0.0.0/8", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "16.0.0.0/8", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-app-connector-like2", + pol: ACLPolicy{ + Hosts: Hosts{ + // Exit node + "internal": netip.MustParsePrefix("100.64.0.100/32"), + }, + Groups: Groups{ + "group:team": {"user3", "user2", "user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "internal:*", + }, + }, + { + Action: "accept", + Sources: []string{"group:team"}, + Destinations: []string{ + "8.0.0.0/16:*", + "16.0.0.0/16:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "8.0.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "16.0.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1817-reduce-breaks-32-mask", + pol: ACLPolicy{ + Hosts: Hosts{ + "vlan1": netip.MustParsePrefix("172.16.0.0/24"), + "dns1": netip.MustParsePrefix("172.16.0.21/32"), + }, + Groups: Groups{ + "group:access": {"user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:access"}, + Destinations: []string{ + "tag:access-servers:*", + "dns1:*", + }, + }, + }, + }, + node: &types.Node{ + IPv4: iap("100.64.0.100"), + IPv6: iap("fd7a:115c:a1e0::100"), + User: types.User{Name: "user100"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")}, + }, + ForcedTags: types.StringList{"tag:access-servers"}, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "172.16.0.21/32", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - rules, _ := tt.pol.generateFilterRules( - tt.node, - tt.peers, + got, _ := tt.pol.CompileFilterRules( + append(tt.peers, tt.node), ) - got := ReduceFilterRules(tt.node, rules) + got = ReduceFilterRules(tt.node, got) if diff := cmp.Diff(tt.want, got); diff != "" { log.Trace().Interface("got", got).Msg("result") @@ -2152,26 +2604,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { { name: "all hosts can talk to each other", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2184,47 +2630,41 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 1, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - User: types.User{Name: "joe"}, + ID: 1, + IPv4: iap("100.64.0.1"), + User: types.User{Name: "joe"}, }, }, want: types.Nodes{ &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.3")}, - User: types.User{Name: "mickael"}, + ID: 3, + IPv4: iap("100.64.0.3"), + User: types.User{Name: "mickael"}, }, }, }, { name: "One host can talk to another, but not all hosts", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2237,42 +2677,36 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 1, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - User: types.User{Name: "joe"}, + ID: 1, + IPv4: iap("100.64.0.1"), + User: types.User{Name: "joe"}, }, }, want: types.Nodes{ &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, }, { name: "host cannot directly talk to destination, but return path is authorized", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2285,42 +2719,36 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, want: types.Nodes{ &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.3")}, - User: types.User{Name: "mickael"}, + ID: 3, + IPv4: iap("100.64.0.3"), + User: types.User{Name: "mickael"}, }, }, }, { name: "rules allows all hosts to reach one destination", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2333,19 +2761,15 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, }, want: types.Nodes{ &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, }, @@ -2353,26 +2777,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { { name: "rules allows all hosts to reach one destination, destination can reach all hosts", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2385,26 +2803,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, }, want: types.Nodes{ &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2412,26 +2824,20 @@ func Test_getFilteredByACLPeers(t *testing.T) { { name: "rule allows all hosts to reach all destinations", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, @@ -2444,58 +2850,50 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, want: types.Nodes{ &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.3")}, - User: types.User{Name: "mickael"}, + ID: 3, + IPv4: iap("100.64.0.3"), + User: types.User{Name: "mickael"}, }, }, }, { name: "without rule all communications are forbidden", args: args{ - nodes: types.Nodes{ // list of all nodess in the database + nodes: types.Nodes{ // list of all nodes in the database &types.Node{ - ID: 1, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - }, + ID: 1, + IPv4: iap("100.64.0.1"), User: types.User{Name: "joe"}, }, &types.Node{ - ID: 2, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - }, + ID: 2, + IPv4: iap("100.64.0.2"), User: types.User{Name: "marc"}, }, &types.Node{ - ID: 3, - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - }, + ID: 3, + IPv4: iap("100.64.0.3"), User: types.User{Name: "mickael"}, }, }, rules: []tailcfg.FilterRule{ // list of all ACLRules registered }, node: &types.Node{ // current nodes - ID: 2, - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - User: types.User{Name: "marc"}, + ID: 2, + IPv4: iap("100.64.0.2"), + User: types.User{Name: "marc"}, }, }, want: types.Nodes{}, @@ -2511,38 +2909,30 @@ func Test_getFilteredByACLPeers(t *testing.T) { &types.Node{ ID: 1, Hostname: "ts-head-upcrmb", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - netip.MustParseAddr("fd7a:115c:a1e0::3"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.3"), + IPv6: iap("fd7a:115c:a1e0::3"), + User: types.User{Name: "user1"}, }, &types.Node{ ID: 2, Hostname: "ts-unstable-rlwpvr", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - netip.MustParseAddr("fd7a:115c:a1e0::4"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.4"), + IPv6: iap("fd7a:115c:a1e0::4"), + User: types.User{Name: "user1"}, }, &types.Node{ ID: 3, Hostname: "ts-head-8w6paa", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0::1"), - }, - User: types.User{Name: "user2"}, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user2"}, }, &types.Node{ ID: 4, Hostname: "ts-unstable-lys2ib", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("fd7a:115c:a1e0::2"), - }, - User: types.User{Name: "user2"}, + IPv4: iap("100.64.0.2"), + IPv6: iap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, }, }, rules: []tailcfg.FilterRule{ // list of all ACLRules registered @@ -2562,31 +2952,25 @@ func Test_getFilteredByACLPeers(t *testing.T) { node: &types.Node{ // current nodes ID: 3, Hostname: "ts-head-8w6paa", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.1"), - netip.MustParseAddr("fd7a:115c:a1e0::1"), - }, - User: types.User{Name: "user2"}, + IPv4: iap("100.64.0.1"), + IPv6: iap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user2"}, }, }, want: types.Nodes{ &types.Node{ ID: 1, Hostname: "ts-head-upcrmb", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.3"), - netip.MustParseAddr("fd7a:115c:a1e0::3"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.3"), + IPv6: iap("fd7a:115c:a1e0::3"), + User: types.User{Name: "user1"}, }, &types.Node{ ID: 2, Hostname: "ts-unstable-rlwpvr", - IPAddresses: types.NodeAddresses{ - netip.MustParseAddr("100.64.0.4"), - netip.MustParseAddr("fd7a:115c:a1e0::4"), - }, - User: types.User{Name: "user1"}, + IPv4: iap("100.64.0.4"), + IPv6: iap("fd7a:115c:a1e0::4"), + User: types.User{Name: "user1"}, }, }, }, @@ -2595,16 +2979,16 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "peer1", - User: types.User{Name: "mini"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "peer1", + User: types.User{Name: "mini"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "peer2", - User: types.User{Name: "peer2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "peer2", + User: types.User{Name: "peer2"}, }, }, rules: []tailcfg.FilterRule{ @@ -2617,18 +3001,18 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "mini", - User: types.User{Name: "mini"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "mini", + User: types.User{Name: "mini"}, }, }, want: []*types.Node{ { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "peer2", - User: types.User{Name: "peer2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "peer2", + User: types.User{Name: "peer2"}, }, }, }, @@ -2637,22 +3021,22 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1-1", - User: types.User{Name: "user1"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, rules: []tailcfg.FilterRule{ @@ -2686,30 +3070,30 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "user-2-1", - User: types.User{Name: "user2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, }, }, want: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1-1", - User: types.User{Name: "user1"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, }, @@ -2718,22 +3102,22 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "user-2-1", - User: types.User{Name: "user2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, rules: []tailcfg.FilterRule{ @@ -2767,30 +3151,30 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 0, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1-1", - User: types.User{Name: "user1"}, + ID: 0, + IPv4: iap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, }, }, want: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "user1-2", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, - Hostname: "user-2-1", - User: types.User{Name: "user2"}, + ID: 2, + IPv4: iap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, }, { - ID: 3, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")}, - Hostname: "user2-2", - User: types.User{Name: "user2"}, + ID: 3, + IPv4: iap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, }, }, }, @@ -2800,16 +3184,16 @@ func Test_getFilteredByACLPeers(t *testing.T) { args: args{ nodes: []*types.Node{ { - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.1"), + Hostname: "user1", + User: types.User{Name: "user1"}, }, { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "router", - User: types.User{Name: "router"}, + ID: 2, + IPv4: iap("100.64.0.2"), + Hostname: "router", + User: types.User{Name: "router"}, Routes: types.Routes{ types.Route{ NodeID: 2, @@ -2831,18 +3215,18 @@ func Test_getFilteredByACLPeers(t *testing.T) { }, }, node: &types.Node{ - ID: 1, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, - Hostname: "user1", - User: types.User{Name: "user1"}, + ID: 1, + IPv4: iap("100.64.0.1"), + Hostname: "user1", + User: types.User{Name: "user1"}, }, }, want: []*types.Node{ { - ID: 2, - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, - Hostname: "router", - User: types.User{Name: "router"}, + ID: 2, + IPv4: iap("100.64.0.2"), + Hostname: "router", + User: types.User{Name: "router"}, Routes: types.Routes{ types.Route{ NodeID: 2, @@ -2883,23 +3267,23 @@ func TestSSHRules(t *testing.T) { node types.Node peers types.Nodes pol ACLPolicy - want []*tailcfg.SSHRule + want *tailcfg.SSHPolicy }{ { name: "peers-can-connect", node: types.Node{ - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.99.42")}, - UserID: 0, + Hostname: "testnodes", + IPv4: iap("100.64.99.42"), + UserID: 0, User: types.User{ Name: "user1", }, }, peers: types.Nodes{ &types.Node{ - Hostname: "testnodes2", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 0, + Hostname: "testnodes2", + IPv4: iap("100.64.0.1"), + UserID: 0, User: types.User{ Name: "user1", }, @@ -2946,7 +3330,7 @@ func TestSSHRules(t *testing.T) { }, }, }, - want: []*tailcfg.SSHRule{ + want: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{ { Principals: []*tailcfg.SSHPrincipal{ { @@ -2991,23 +3375,23 @@ func TestSSHRules(t *testing.T) { }, Action: &tailcfg.SSHAction{Accept: true, AllowLocalPortForwarding: true}, }, - }, + }}, }, { name: "peers-cannot-connect", node: types.Node{ - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 0, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 0, User: types.User{ Name: "user1", }, }, peers: types.Nodes{ &types.Node{ - Hostname: "testnodes2", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.99.42")}, - UserID: 0, + Hostname: "testnodes2", + IPv4: iap("100.64.99.42"), + UserID: 0, User: types.User{ Name: "user1", }, @@ -3042,13 +3426,13 @@ func TestSSHRules(t *testing.T) { }, }, }, - want: []*tailcfg.SSHRule{}, + want: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.pol.generateSSHRules(&tt.node, tt.peers) + got, err := tt.pol.CompileSSHPolicy(&tt.node, tt.peers) assert.NoError(t, err) if diff := cmp.Diff(tt.want, got); diff != "" { @@ -3132,10 +3516,10 @@ func TestValidExpandTagOwnersInSources(t *testing.T) { } node := &types.Node{ - ID: 0, - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 0, + ID: 0, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 0, User: types.User{ Name: "user1", }, @@ -3155,7 +3539,7 @@ func TestValidExpandTagOwnersInSources(t *testing.T) { }, } - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}) assert.NoError(t, err) want := []tailcfg.FilterRule{ @@ -3184,10 +3568,10 @@ func TestInvalidTagValidUser(t *testing.T) { } node := &types.Node{ - ID: 1, - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 1, + ID: 1, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3206,7 +3590,7 @@ func TestInvalidTagValidUser(t *testing.T) { }, } - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}) assert.NoError(t, err) want := []tailcfg.FilterRule{ @@ -3235,10 +3619,10 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) { } node := &types.Node{ - ID: 1, - Hostname: "testnodes", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 1, + ID: 1, + Hostname: "testnodes", + IPv4: iap("100.64.0.1"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3265,7 +3649,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) { // c.Assert(rules[0].DstPorts, check.HasLen, 1) // c.Assert(rules[0].DstPorts[0].IP, check.Equals, "100.64.0.1/32") - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{}) assert.NoError(t, err) want := []tailcfg.FilterRule{ @@ -3296,10 +3680,10 @@ func TestValidTagInvalidUser(t *testing.T) { } node := &types.Node{ - ID: 1, - Hostname: "webserver", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, - UserID: 1, + ID: 1, + Hostname: "webserver", + IPv4: iap("100.64.0.1"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3313,10 +3697,10 @@ func TestValidTagInvalidUser(t *testing.T) { } nodes2 := &types.Node{ - ID: 2, - Hostname: "user", - IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, - UserID: 1, + ID: 2, + Hostname: "user", + IPv4: iap("100.64.0.2"), + UserID: 1, User: types.User{ Name: "user1", }, @@ -3335,7 +3719,7 @@ func TestValidTagInvalidUser(t *testing.T) { }, } - got, _, err := GenerateFilterAndSSHRules(pol, node, types.Nodes{nodes2}) + got, _, err := GenerateFilterAndSSHRulesForTests(pol, node, types.Nodes{nodes2}) assert.NoError(t, err) want := []tailcfg.FilterRule{ diff --git a/hscontrol/poll.go b/hscontrol/poll.go index 22dd78ff..d3c82117 100644 --- a/hscontrol/poll.go +++ b/hscontrol/poll.go @@ -1,9 +1,13 @@ package hscontrol import ( + "cmp" "context" "fmt" + "math/rand/v2" "net/http" + "net/netip" + "sort" "strings" "time" @@ -11,70 +15,130 @@ import ( "github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog/log" + "github.com/sasha-s/go-deadlock" xslices "golang.org/x/exp/slices" "gorm.io/gorm" - "tailscale.com/envknob" "tailscale.com/tailcfg" ) const ( - keepAliveInterval = 60 * time.Second + keepAliveInterval = 50 * time.Second ) type contextKey string const nodeNameContextKey = contextKey("nodeName") -type UpdateNode func() +type mapSession struct { + h *Headscale + req tailcfg.MapRequest + ctx context.Context + capVer tailcfg.CapabilityVersion + mapper *mapper.Mapper -func logPollFunc( - mapRequest tailcfg.MapRequest, - node *types.Node, -) (func(string), func(string), func(error, string)) { - return func(msg string) { - log.Trace(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Msg(msg) - }, - func(msg string) { - log.Warn(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Msg(msg) - }, - func(err error, msg string) { - log.Error(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Err(err). - Msg(msg) - } + cancelChMu deadlock.Mutex + + ch chan types.StateUpdate + cancelCh chan struct{} + cancelChOpen bool + + keepAlive time.Duration + keepAliveTicker *time.Ticker + + node *types.Node + w http.ResponseWriter + + warnf func(string, ...any) + infof func(string, ...any) + tracef func(string, ...any) + errf func(error, string, ...any) } -// handlePoll ensures the node gets the appropriate updates from either -// polling or immediate responses. -// -//nolint:gocyclo -func (h *Headscale) handlePoll( - writer http.ResponseWriter, +func (h *Headscale) newMapSession( ctx context.Context, + req tailcfg.MapRequest, + w http.ResponseWriter, node *types.Node, - mapRequest tailcfg.MapRequest, -) { - logTrace, logWarn, logErr := logPollFunc(mapRequest, node) +) *mapSession { + warnf, infof, tracef, errf := logPollFunc(req, node) + + var updateChan chan types.StateUpdate + if req.Stream { + // Use a buffered channel in case a node is not fully ready + // to receive a message to make sure we dont block the entire + // notifier. + updateChan = make(chan types.StateUpdate, h.cfg.Tuning.NodeMapSessionBufferedChanSize) + updateChan <- types.StateUpdate{ + Type: types.StateFullUpdate, + } + } + + ka := keepAliveInterval + (time.Duration(rand.IntN(9000)) * time.Millisecond) + + return &mapSession{ + h: h, + ctx: ctx, + req: req, + w: w, + node: node, + capVer: req.Version, + mapper: h.mapper, + + ch: updateChan, + cancelCh: make(chan struct{}), + cancelChOpen: true, + + keepAlive: ka, + keepAliveTicker: nil, + + // Loggers + warnf: warnf, + infof: infof, + tracef: tracef, + errf: errf, + } +} + +func (m *mapSession) close() { + m.cancelChMu.Lock() + defer m.cancelChMu.Unlock() + + if !m.cancelChOpen { + mapResponseClosed.WithLabelValues("chanclosed").Inc() + return + } + + m.tracef("mapSession (%p) sending message on cancel chan", m) + select { + case m.cancelCh <- struct{}{}: + mapResponseClosed.WithLabelValues("sent").Inc() + m.tracef("mapSession (%p) sent message on cancel chan", m) + case <-time.After(30 * time.Second): + mapResponseClosed.WithLabelValues("timeout").Inc() + m.tracef("mapSession (%p) timed out sending close message", m) + } +} + +func (m *mapSession) isStreaming() bool { + return m.req.Stream && !m.req.ReadOnly +} + +func (m *mapSession) isEndpointUpdate() bool { + return !m.req.Stream && !m.req.ReadOnly && m.req.OmitPeers +} + +func (m *mapSession) isReadOnlyUpdate() bool { + return !m.req.Stream && m.req.OmitPeers && m.req.ReadOnly +} + +func (m *mapSession) resetKeepAlive() { + m.keepAliveTicker.Reset(m.keepAlive) +} + +// serve handles non-streaming requests. +func (m *mapSession) serve() { + // TODO(kradalby): A set todos to harden: + // - func to tell the stream to die, readonly -> false, !stream && omitpeers -> false, true // This is the mechanism where the node gives us information about its // current configuration. @@ -84,473 +148,251 @@ func (h *Headscale) handlePoll( // breaking existing long-polling (Stream == true) connections. // In this case, the server can omit the entire response; the client // only checks the HTTP response status code. + // + // This is what Tailscale calls a Lite update, the client ignores + // the response and just wants a 200. + // !req.stream && !req.ReadOnly && req.OmitPeers + // // TODO(kradalby): remove ReadOnly when we only support capVer 68+ - if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly { - log.Info(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("node", node.Hostname). - Int("cap_ver", int(mapRequest.Version)). - Msg("Received update") + if m.isEndpointUpdate() { + m.handleEndpointUpdate() - change := node.PeerChangeFromMapRequest(mapRequest) + return + } - online := h.nodeNotifier.IsConnected(node.MachineKey) - change.Online = &online + // ReadOnly is whether the client just wants to fetch the + // MapResponse, without updating their Endpoints. The + // Endpoints field will be ignored and LastSeen will not be + // updated and peers will not be notified of changes. + // + // The intended use is for clients to discover the DERP map at + // start-up before their first real endpoint update. + if m.isReadOnlyUpdate() { + m.handleReadOnlyRequest() - node.ApplyPeerChange(&change) + return + } - hostInfoChange := node.Hostinfo.Equal(mapRequest.Hostinfo) +} - logTracePeerChange(node.Hostname, hostInfoChange, &change) +// serveLongPoll ensures the node gets the appropriate updates from either +// polling or immediate responses. +// +//nolint:gocyclo +func (m *mapSession) serveLongPoll() { + // Clean up the session when the client disconnects + defer func() { + m.cancelChMu.Lock() + m.cancelChOpen = false + close(m.cancelCh) + m.cancelChMu.Unlock() - // Check if the Hostinfo of the node has changed. - // If it has changed, check if there has been a change tod - // the routable IPs of the host and update update them in - // the database. Then send a Changed update - // (containing the whole node object) to peers to inform about - // the route change. - // If the hostinfo has changed, but not the routes, just update - // hostinfo and let the function continue. - if !hostInfoChange { - oldRoutes := node.Hostinfo.RoutableIPs - newRoutes := mapRequest.Hostinfo.RoutableIPs - - oldServicesCount := len(node.Hostinfo.Services) - newServicesCount := len(mapRequest.Hostinfo.Services) - - node.Hostinfo = mapRequest.Hostinfo - - sendUpdate := false - - // Route changes come as part of Hostinfo, which means that - // when an update comes, the Node Route logic need to run. - // This will require a "change" in comparison to a "patch", - // which is more costly. - if !xslices.Equal(oldRoutes, newRoutes) { - var err error - sendUpdate, err = h.db.SaveNodeRoutes(node) - if err != nil { - logErr(err, "Error processing node routes") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - - if h.ACLPolicy != nil { - // update routes with peer information - update, err := h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node) - if err != nil { - logErr(err, "Error running auto approved routes") - } - - if update != nil { - sendUpdate = true - } - } - } - - // Services is mostly useful for discovery and not critical, - // except for peerapi, which is how nodes talk to eachother. - // If peerapi was not part of the initial mapresponse, we - // need to make sure its sent out later as it is needed for - // Taildrop. - // TODO(kradalby): Length comparison is a bit naive, replace. - if oldServicesCount != newServicesCount { - sendUpdate = true - } - - if sendUpdate { - if err := h.db.DB.Save(node).Error; err != nil { - logErr(err, "Failed to persist/update node in the database") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - - // Send an update to all peers to propagate the new routes - // available. - stateUpdate := types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, - Message: "called from handlePoll -> update -> new hostinfo", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-peers-hostinfochange", node.Hostname) - h.nodeNotifier.NotifyWithIgnore( - ctx, - stateUpdate, - node.MachineKey.String()) - } - - // Send an update to the node itself with to ensure it - // has an updated packetfilter allowing the new route - // if it is defined in the ACL. - selfUpdate := types.StateUpdate{ - Type: types.StateSelfUpdate, - ChangeNodes: types.Nodes{node}, - } - if selfUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-self-hostinfochange", node.Hostname) - h.nodeNotifier.NotifyByMachineKey( - ctx, - selfUpdate, - node.MachineKey) - } - - return - } + // only update node status if the node channel was removed. + // in principal, it will be removed, but the client rapidly + // reconnects, the channel might be of another connection. + // In that case, it is not closed and the node is still online. + if m.h.nodeNotifier.RemoveNode(m.node.ID, m.ch) { + // Failover the node's routes if any. + m.h.updateNodeOnlineStatus(false, m.node) + m.pollFailoverRoutes("node closing connection", m.node) } - if err := h.db.DB.Save(node).Error; err != nil { - logErr(err, "Failed to persist/update node in the database") - http.Error(writer, "", http.StatusInternalServerError) + m.infof("node has disconnected, mapSession: %p, chan: %p", m, m.ch) + }() + // From version 68, all streaming requests can be treated as read only. + // TODO: Remove when we drop support for 1.48 + if m.capVer < 68 { + // Error has been handled/written to client in the func + // return + err := m.handleSaveNode() + if err != nil { + mapResponseWriteUpdatesInStream.WithLabelValues("error").Inc() + + m.close() return } - - stateUpdate := types.StateUpdate{ - Type: types.StatePeerChangedPatch, - ChangePatches: []*tailcfg.PeerChange{&change}, - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-peers-patch", node.Hostname) - h.nodeNotifier.NotifyWithIgnore( - ctx, - stateUpdate, - node.MachineKey.String()) - } - - writer.WriteHeader(http.StatusOK) - if f, ok := writer.(http.Flusher); ok { - f.Flush() - } - - return - } else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly { - // ReadOnly is whether the client just wants to fetch the - // MapResponse, without updating their Endpoints. The - // Endpoints field will be ignored and LastSeen will not be - // updated and peers will not be notified of changes. - // - // The intended use is for clients to discover the DERP map at - // start-up before their first real endpoint update. - } else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly { - h.handleLiteRequest(writer, node, mapRequest) - - return - } else if mapRequest.OmitPeers && mapRequest.Stream { - logErr(nil, "Ignoring request, don't know how to handle it") - - return - } - - change := node.PeerChangeFromMapRequest(mapRequest) - - // A stream is being set up, the node is Online - online := true - change.Online = &online - - node.ApplyPeerChange(&change) - - // Only save HostInfo if changed, update routes if changed - // TODO(kradalby): Remove when capver is over 68 - if !node.Hostinfo.Equal(mapRequest.Hostinfo) { - oldRoutes := node.Hostinfo.RoutableIPs - newRoutes := mapRequest.Hostinfo.RoutableIPs - - node.Hostinfo = mapRequest.Hostinfo - - if !xslices.Equal(oldRoutes, newRoutes) { - _, err := h.db.SaveNodeRoutes(node) - if err != nil { - logErr(err, "Error processing node routes") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - } - } - - if err := h.db.DB.Save(node).Error; err != nil { - logErr(err, "Failed to persist/update node in the database") - http.Error(writer, "", http.StatusInternalServerError) - - return + mapResponseWriteUpdatesInStream.WithLabelValues("ok").Inc() } // Set up the client stream - h.pollNetMapStreamWG.Add(1) - defer h.pollNetMapStreamWG.Done() + m.h.pollNetMapStreamWG.Add(1) + defer m.h.pollNetMapStreamWG.Done() - // Use a buffered channel in case a node is not fully ready - // to receive a message to make sure we dont block the entire - // notifier. - // 12 is arbitrarily chosen. - chanSize := 3 - if size, ok := envknob.LookupInt("HEADSCALE_TUNING_POLL_QUEUE_SIZE"); ok { - chanSize = size - } - updateChan := make(chan types.StateUpdate, chanSize) - defer closeChanWithLog(updateChan, node.Hostname, "updateChan") + m.pollFailoverRoutes("node connected", m.node) - // Register the node's update channel - h.nodeNotifier.AddNode(node.MachineKey, updateChan) - defer h.nodeNotifier.RemoveNode(node.MachineKey) + // Upgrade the writer to a ResponseController + rc := http.NewResponseController(m.w) - // When a node connects to control, list the peers it has at - // that given point, further updates are kept in memory in - // the Mapper, which lives for the duration of the polling - // session. - peers, err := h.db.ListPeers(node) - if err != nil { - logErr(err, "Failed to list peers when opening poller") - http.Error(writer, "", http.StatusInternalServerError) + // Longpolling will break if there is a write timeout, + // so it needs to be disabled. + rc.SetWriteDeadline(time.Time{}) - return - } - - isConnected := h.nodeNotifier.ConnectedMap() - for _, peer := range peers { - online := isConnected[peer.MachineKey] - peer.IsOnline = &online - } - - mapp := mapper.NewMapper( - node, - peers, - h.DERPMap, - h.cfg.BaseDomain, - h.cfg.DNSConfig, - h.cfg.LogTail.Enabled, - h.cfg.RandomizeClientPort, - ) - - // update ACLRules with peer informations (to update server tags if necessary) - if h.ACLPolicy != nil { - // update routes with peer information - // This state update is ignored as it will be sent - // as part of the whole node - // TODO(kradalby): figure out if that is actually correct - _, err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node) - if err != nil { - logErr(err, "Error running auto approved routes") - } - } - - logTrace("Sending initial map") - - mapResp, err := mapp.FullMapResponse(mapRequest, node, h.ACLPolicy) - if err != nil { - logErr(err, "Failed to create MapResponse") - http.Error(writer, "", http.StatusInternalServerError) - - return - } - - // Send the client an update to make sure we send an initial mapresponse - _, err = writer.Write(mapResp) - if err != nil { - logErr(err, "Could not write the map response") - - return - } - - if flusher, ok := writer.(http.Flusher); ok { - flusher.Flush() - } else { - return - } - - stateUpdate := types.StateUpdate{ - Type: types.StatePeerChanged, - ChangeNodes: types.Nodes{node}, - Message: "called from handlePoll -> new node added", - } - if stateUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-newnode-peers", node.Hostname) - h.nodeNotifier.NotifyWithIgnore( - ctx, - stateUpdate, - node.MachineKey.String()) - } - - if len(node.Routes) > 0 { - go h.pollFailoverRoutes(logErr, "new node", node) - } - - keepAliveTicker := time.NewTicker(keepAliveInterval) - - ctx, cancel := context.WithCancel(context.WithValue(ctx, nodeNameContextKey, node.Hostname)) + ctx, cancel := context.WithCancel(context.WithValue(m.ctx, nodeNameContextKey, m.node.Hostname)) defer cancel() + m.keepAliveTicker = time.NewTicker(m.keepAlive) + + m.h.nodeNotifier.AddNode(m.node.ID, m.ch) + go m.h.updateNodeOnlineStatus(true, m.node) + + m.infof("node has connected, mapSession: %p, chan: %p", m, m.ch) + + // Loop through updates and continuously send them to the + // client. for { - logTrace("Waiting for update on stream channel") + // consume channels with update, keep alives or "batch" blocking signals select { - case <-keepAliveTicker.C: - data, err := mapp.KeepAliveResponse(mapRequest, node) - if err != nil { - logErr(err, "Error generating the keep alive msg") + case <-m.cancelCh: + m.tracef("poll cancelled received") + mapResponseEnded.WithLabelValues("cancelled").Inc() + return - return - } - _, err = writer.Write(data) - if err != nil { - logErr(err, "Cannot write keep alive message") - - return - } - if flusher, ok := writer.(http.Flusher); ok { - flusher.Flush() - } else { - log.Error().Msg("Failed to create http flusher") + case <-ctx.Done(): + m.tracef("poll context done") + mapResponseEnded.WithLabelValues("done").Inc() + return + // Consume updates sent to node + case update, ok := <-m.ch: + if !ok { + m.tracef("update channel closed, streaming session is likely being replaced") return } - // This goroutine is not ideal, but we have a potential issue here - // where it blocks too long and that holds up updates. - // One alternative is to split these different channels into - // goroutines, but then you might have a problem without a lock - // if a keepalive is written at the same time as an update. - go h.updateNodeOnlineStatus(true, node) - - case update := <-updateChan: - logTrace("Received update") - now := time.Now() + m.tracef("received stream update: %s %s", update.Type.String(), update.Message) + mapResponseUpdateReceived.WithLabelValues(update.Type.String()).Inc() var data []byte var err error + var lastMessage string // Ensure the node object is updated, for example, there // might have been a hostinfo update in a sidechannel // which contains data needed to generate a map response. - node, err = h.db.GetNodeByMachineKey(node.MachineKey) + m.node, err = m.h.db.GetNodeByID(m.node.ID) if err != nil { - logErr(err, "Could not get machine from db") + m.errf(err, "Could not get machine from db") return } - startMapResp := time.Now() + updateType := "full" switch update.Type { case types.StateFullUpdate: - logTrace("Sending Full MapResponse") - - data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy) + m.tracef("Sending Full MapResponse") + data, err = m.mapper.FullMapResponse(m.req, m.node, m.h.ACLPolicy, fmt.Sprintf("from mapSession: %p, stream: %t", m, m.isStreaming())) case types.StatePeerChanged: - logTrace(fmt.Sprintf("Sending Changed MapResponse: %s", update.Message)) + changed := make(map[types.NodeID]bool, len(update.ChangeNodes)) - isConnectedMap := h.nodeNotifier.ConnectedMap() - for _, node := range update.ChangeNodes { - // If a node is not reported to be online, it might be - // because the value is outdated, check with the notifier. - // However, if it is set to Online, and not in the notifier, - // this might be because it has announced itself, but not - // reached the stage to actually create the notifier channel. - if node.IsOnline != nil && !*node.IsOnline { - isOnline := isConnectedMap[node.MachineKey] - node.IsOnline = &isOnline - } + for _, nodeID := range update.ChangeNodes { + changed[nodeID] = true } - data, err = mapp.PeerChangedResponse(mapRequest, node, update.ChangeNodes, h.ACLPolicy, update.Message) + lastMessage = update.Message + m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage)) + data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, update.ChangePatches, m.h.ACLPolicy, lastMessage) + updateType = "change" + case types.StatePeerChangedPatch: - logTrace("Sending PeerChangedPatch MapResponse") - data, err = mapp.PeerChangedPatchResponse(mapRequest, node, update.ChangePatches, h.ACLPolicy) + m.tracef(fmt.Sprintf("Sending Changed Patch MapResponse: %v", lastMessage)) + data, err = m.mapper.PeerChangedPatchResponse(m.req, m.node, update.ChangePatches, m.h.ACLPolicy) + updateType = "patch" case types.StatePeerRemoved: - logTrace("Sending PeerRemoved MapResponse") - data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed) - case types.StateSelfUpdate: - if len(update.ChangeNodes) == 1 { - logTrace("Sending SelfUpdate MapResponse") - node = update.ChangeNodes[0] - data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy, types.SelfUpdateIdentifier) - } else { - logWarn("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.") + changed := make(map[types.NodeID]bool, len(update.Removed)) + + for _, nodeID := range update.Removed { + changed[nodeID] = false } + m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage)) + data, err = m.mapper.PeerChangedResponse(m.req, m.node, changed, update.ChangePatches, m.h.ACLPolicy, lastMessage) + updateType = "remove" + case types.StateSelfUpdate: + lastMessage = update.Message + m.tracef(fmt.Sprintf("Sending Changed MapResponse: %v", lastMessage)) + // create the map so an empty (self) update is sent + data, err = m.mapper.PeerChangedResponse(m.req, m.node, make(map[types.NodeID]bool), update.ChangePatches, m.h.ACLPolicy, lastMessage) + updateType = "remove" case types.StateDERPUpdated: - logTrace("Sending DERPUpdate MapResponse") - data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap) + m.tracef("Sending DERPUpdate MapResponse") + data, err = m.mapper.DERPMapResponse(m.req, m.node, m.h.DERPMap) + updateType = "derp" } if err != nil { - logErr(err, "Could not get the create map update") + m.errf(err, "Could not get the create map update") return } - log.Trace().Str("node", node.Hostname).TimeDiff("timeSpent", time.Now(), startMapResp).Str("mkey", node.MachineKey.String()).Int("type", int(update.Type)).Msg("finished making map response") - // Only send update if there is change if data != nil { startWrite := time.Now() - _, err = writer.Write(data) + _, err = m.w.Write(data) if err != nil { - logErr(err, "Could not write the map response") - - updateRequestsSentToNode.WithLabelValues(node.User.Name, node.Hostname, "failed"). - Inc() - + mapResponseSent.WithLabelValues("error", updateType).Inc() + m.errf(err, "could not write the map response(%s), for mapSession: %p", update.Type.String(), m) return } - if flusher, ok := writer.(http.Flusher); ok { - flusher.Flush() - } else { - log.Error().Msg("Failed to create http flusher") - + err = rc.Flush() + if err != nil { + mapResponseSent.WithLabelValues("error", updateType).Inc() + m.errf(err, "flushing the map response to client, for mapSession: %p", m) return } - log.Trace().Str("node", node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", node.MachineKey.String()).Int("type", int(update.Type)).Msg("finished writing mapresp to node") - log.Debug(). - Caller(). - Bool("readOnly", mapRequest.ReadOnly). - Bool("omitPeers", mapRequest.OmitPeers). - Bool("stream", mapRequest.Stream). - Str("node_key", node.NodeKey.ShortString()). - Str("machine_key", node.MachineKey.ShortString()). - Str("node", node.Hostname). - TimeDiff("timeSpent", time.Now(), now). - Msg("update sent") + log.Trace().Str("node", m.node.Hostname).TimeDiff("timeSpent", time.Now(), startWrite).Str("mkey", m.node.MachineKey.String()).Msg("finished writing mapresp to node") + + if debugHighCardinalityMetrics { + mapResponseLastSentSeconds.WithLabelValues(updateType, m.node.ID.String()).Set(float64(time.Now().Unix())) + } + mapResponseSent.WithLabelValues("ok", updateType).Inc() + m.tracef("update sent") + m.resetKeepAlive() } - case <-ctx.Done(): - logTrace("The client has closed the connection") + case <-m.keepAliveTicker.C: + data, err := m.mapper.KeepAliveResponse(m.req, m.node) + if err != nil { + m.errf(err, "Error generating the keep alive msg") + mapResponseSent.WithLabelValues("error", "keepalive").Inc() + return + } + _, err = m.w.Write(data) + if err != nil { + m.errf(err, "Cannot write keep alive message") + mapResponseSent.WithLabelValues("error", "keepalive").Inc() + return + } + err = rc.Flush() + if err != nil { + m.errf(err, "flushing keep alive to client, for mapSession: %p", m) + mapResponseSent.WithLabelValues("error", "keepalive").Inc() + return + } - go h.updateNodeOnlineStatus(false, node) - - // Failover the node's routes if any. - go h.pollFailoverRoutes(logErr, "node closing connection", node) - - // The connection has been closed, so we can stop polling. - return - - case <-h.shutdownChan: - logTrace("The long-poll handler is shutting down") - - return + if debugHighCardinalityMetrics { + mapResponseLastSentSeconds.WithLabelValues("keepalive", m.node.ID.String()).Set(float64(time.Now().Unix())) + } + mapResponseSent.WithLabelValues("ok", "keepalive").Inc() } } } -func (h *Headscale) pollFailoverRoutes(logErr func(error, string), where string, node *types.Node) { - update, err := db.Write(h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { - return db.EnsureFailoverRouteIsAvailable(tx, h.nodeNotifier.ConnectedMap(), node) +func (m *mapSession) pollFailoverRoutes(where string, node *types.Node) { + update, err := db.Write(m.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) { + return db.FailoverNodeRoutesIfNeccessary(tx, m.h.nodeNotifier.LikelyConnectedMap(), node) }) if err != nil { - logErr(err, fmt.Sprintf("failed to ensure failover routes, %s", where)) + m.errf(err, fmt.Sprintf("failed to ensure failover routes, %s", where)) return } - if update != nil && !update.Empty() && update.Valid() { + if update != nil && !update.Empty() { ctx := types.NotifyCtx(context.Background(), fmt.Sprintf("poll-%s-routes-ensurefailover", strings.ReplaceAll(where, " ", "-")), node.Hostname) - h.nodeNotifier.NotifyWithIgnore(ctx, *update, node.MachineKey.String()) + m.h.nodeNotifier.NotifyWithIgnore(ctx, *update, node.ID) } } @@ -558,82 +400,229 @@ func (h *Headscale) pollFailoverRoutes(logErr func(error, string), where string, // about change in their online/offline status. // It takes a StateUpdateType of either StatePeerOnlineChanged or StatePeerOfflineChanged. func (h *Headscale) updateNodeOnlineStatus(online bool, node *types.Node) { - now := time.Now() + change := &tailcfg.PeerChange{ + NodeID: tailcfg.NodeID(node.ID), + Online: &online, + } - node.LastSeen = &now + if !online { + now := time.Now() - statusUpdate := types.StateUpdate{ + // lastSeen is only relevant if the node is disconnected. + node.LastSeen = &now + change.LastSeen = &now + + err := h.db.Write(func(tx *gorm.DB) error { + return db.SetLastSeen(tx, node.ID, *node.LastSeen) + }) + if err != nil { + log.Error().Err(err).Msg("Cannot update node LastSeen") + + return + } + } + + ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-onlinestatus", node.Hostname) + h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{ Type: types.StatePeerChangedPatch, ChangePatches: []*tailcfg.PeerChange{ - { - NodeID: tailcfg.NodeID(node.ID), - Online: &online, - LastSeen: &now, - }, + change, }, - } - if statusUpdate.Valid() { - ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-onlinestatus", node.Hostname) - h.nodeNotifier.NotifyWithIgnore(ctx, statusUpdate, node.MachineKey.String()) - } + }, node.ID) +} - err := h.db.DB.Transaction(func(tx *gorm.DB) error { - return db.UpdateLastSeen(tx, node.ID, *node.LastSeen) - }) - if err != nil { - log.Error().Err(err).Msg("Cannot update node LastSeen") +func (m *mapSession) handleEndpointUpdate() { + m.tracef("received endpoint update") + change := m.node.PeerChangeFromMapRequest(m.req) + + online := m.h.nodeNotifier.IsLikelyConnected(m.node.ID) + change.Online = &online + + m.node.ApplyPeerChange(&change) + + sendUpdate, routesChanged := hostInfoChanged(m.node.Hostinfo, m.req.Hostinfo) + + // The node might not set NetInfo if it has not changed and if + // the full HostInfo object is overrwritten, the information is lost. + // If there is no NetInfo, keep the previous one. + // From 1.66 the client only sends it if changed: + // https://github.com/tailscale/tailscale/commit/e1011f138737286ecf5123ff887a7a5800d129a2 + // TODO(kradalby): evaulate if we need better comparing of hostinfo + // before we take the changes. + if m.req.Hostinfo.NetInfo == nil { + m.req.Hostinfo.NetInfo = m.node.Hostinfo.NetInfo + } + m.node.Hostinfo = m.req.Hostinfo + + logTracePeerChange(m.node.Hostname, sendUpdate, &change) + + // If there is no changes and nothing to save, + // return early. + if peerChangeEmpty(change) && !sendUpdate { + mapResponseEndpointUpdates.WithLabelValues("noop").Inc() return } -} -func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) { - log.Trace(). - Str("handler", "PollNetMap"). - Str("node", node). - Str("channel", "Done"). - Msg(fmt.Sprintf("Closing %s channel", name)) + // Check if the Hostinfo of the node has changed. + // If it has changed, check if there has been a change to + // the routable IPs of the host and update update them in + // the database. Then send a Changed update + // (containing the whole node object) to peers to inform about + // the route change. + // If the hostinfo has changed, but not the routes, just update + // hostinfo and let the function continue. + if routesChanged { + var err error + _, err = m.h.db.SaveNodeRoutes(m.node) + if err != nil { + m.errf(err, "Error processing node routes") + http.Error(m.w, "", http.StatusInternalServerError) + mapResponseEndpointUpdates.WithLabelValues("error").Inc() - close(channel) -} + return + } -func (h *Headscale) handleLiteRequest( - writer http.ResponseWriter, - node *types.Node, - mapRequest tailcfg.MapRequest, -) { - logTrace, _, logErr := logPollFunc(mapRequest, node) + if m.h.ACLPolicy != nil { + // update routes with peer information + err := m.h.db.EnableAutoApprovedRoutes(m.h.ACLPolicy, m.node) + if err != nil { + m.errf(err, "Error running auto approved routes") + mapResponseEndpointUpdates.WithLabelValues("error").Inc() + } + } - mapp := mapper.NewMapper( - node, - types.Nodes{}, - h.DERPMap, - h.cfg.BaseDomain, - h.cfg.DNSConfig, - h.cfg.LogTail.Enabled, - h.cfg.RandomizeClientPort, - ) + // Send an update to the node itself with to ensure it + // has an updated packetfilter allowing the new route + // if it is defined in the ACL. + ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-self-hostinfochange", m.node.Hostname) + m.h.nodeNotifier.NotifyByNodeID( + ctx, + types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: []types.NodeID{m.node.ID}, + }, + m.node.ID) + } - logTrace("Client asked for a lite update, responding without peers") - - mapResp, err := mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy) - if err != nil { - logErr(err, "Failed to create MapResponse") - http.Error(writer, "", http.StatusInternalServerError) + if err := m.h.db.DB.Save(m.node).Error; err != nil { + m.errf(err, "Failed to persist/update node in the database") + http.Error(m.w, "", http.StatusInternalServerError) + mapResponseEndpointUpdates.WithLabelValues("error").Inc() return } - writer.Header().Set("Content-Type", "application/json; charset=utf-8") - writer.WriteHeader(http.StatusOK) - _, err = writer.Write(mapResp) - if err != nil { - logErr(err, "Failed to write response") + ctx := types.NotifyCtx(context.Background(), "poll-nodeupdate-peers-patch", m.node.Hostname) + m.h.nodeNotifier.NotifyWithIgnore( + ctx, + types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{m.node.ID}, + Message: "called from handlePoll -> update", + }, + m.node.ID) + + m.w.WriteHeader(http.StatusOK) + mapResponseEndpointUpdates.WithLabelValues("ok").Inc() + + return +} + +// handleSaveNode saves node updates in the maprequest _streaming_ +// path and is mostly the same code as in handleEndpointUpdate. +// It is not attempted to be deduplicated since it will go away +// when we stop supporting older than 68 which removes updates +// when the node is streaming. +func (m *mapSession) handleSaveNode() error { + m.tracef("saving node update from stream session") + + change := m.node.PeerChangeFromMapRequest(m.req) + + // A stream is being set up, the node is Online + online := true + change.Online = &online + + m.node.ApplyPeerChange(&change) + + sendUpdate, routesChanged := hostInfoChanged(m.node.Hostinfo, m.req.Hostinfo) + m.node.Hostinfo = m.req.Hostinfo + + // If there is no changes and nothing to save, + // return early. + if peerChangeEmpty(change) || !sendUpdate { + return nil } + + // Check if the Hostinfo of the node has changed. + // If it has changed, check if there has been a change to + // the routable IPs of the host and update update them in + // the database. Then send a Changed update + // (containing the whole node object) to peers to inform about + // the route change. + // If the hostinfo has changed, but not the routes, just update + // hostinfo and let the function continue. + if routesChanged { + var err error + _, err = m.h.db.SaveNodeRoutes(m.node) + if err != nil { + return err + } + + if m.h.ACLPolicy != nil { + // update routes with peer information + err := m.h.db.EnableAutoApprovedRoutes(m.h.ACLPolicy, m.node) + if err != nil { + return err + } + } + } + + if err := m.h.db.DB.Save(m.node).Error; err != nil { + return err + } + + ctx := types.NotifyCtx(context.Background(), "pre-68-update-while-stream", m.node.Hostname) + m.h.nodeNotifier.NotifyWithIgnore( + ctx, + types.StateUpdate{ + Type: types.StatePeerChanged, + ChangeNodes: []types.NodeID{m.node.ID}, + Message: "called from handlePoll -> pre-68-update-while-stream", + }, + m.node.ID) + + return nil +} + +func (m *mapSession) handleReadOnlyRequest() { + m.tracef("Client asked for a lite update, responding without peers") + + mapResp, err := m.mapper.ReadOnlyMapResponse(m.req, m.node, m.h.ACLPolicy) + if err != nil { + m.errf(err, "Failed to create MapResponse") + http.Error(m.w, "", http.StatusInternalServerError) + mapResponseReadOnly.WithLabelValues("error").Inc() + return + } + + m.w.Header().Set("Content-Type", "application/json; charset=utf-8") + m.w.WriteHeader(http.StatusOK) + _, err = m.w.Write(mapResp) + if err != nil { + m.errf(err, "Failed to write response") + mapResponseReadOnly.WithLabelValues("error").Inc() + return + } + + m.w.WriteHeader(http.StatusOK) + mapResponseReadOnly.WithLabelValues("ok").Inc() + + return } func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.PeerChange) { - trace := log.Trace().Str("node_id", change.NodeID.String()).Str("hostname", hostname) + trace := log.Trace().Uint64("node.id", uint64(change.NodeID)).Str("hostname", hostname) if change.Key != nil { trace = trace.Str("node_key", change.Key.ShortString()) @@ -666,3 +655,114 @@ func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.Pe trace.Time("last_seen", *change.LastSeen).Msg("PeerChange received") } + +func peerChangeEmpty(chng tailcfg.PeerChange) bool { + return chng.Key == nil && + chng.DiscoKey == nil && + chng.Online == nil && + chng.Endpoints == nil && + chng.DERPRegion == 0 && + chng.LastSeen == nil && + chng.KeyExpiry == nil +} + +func logPollFunc( + mapRequest tailcfg.MapRequest, + node *types.Node, +) (func(string, ...any), func(string, ...any), func(string, ...any), func(error, string, ...any)) { + return func(msg string, a ...any) { + log.Warn(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Msgf(msg, a...) + }, + func(msg string, a ...any) { + log.Info(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Msgf(msg, a...) + }, + func(msg string, a ...any) { + log.Trace(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Msgf(msg, a...) + }, + func(err error, msg string, a ...any) { + log.Error(). + Caller(). + Bool("readOnly", mapRequest.ReadOnly). + Bool("omitPeers", mapRequest.OmitPeers). + Bool("stream", mapRequest.Stream). + Uint64("node.id", node.ID.Uint64()). + Str("node", node.Hostname). + Err(err). + Msgf(msg, a...) + } +} + +// hostInfoChanged reports if hostInfo has changed in two ways, +// - first bool reports if an update needs to be sent to nodes +// - second reports if there has been changes to routes +// the caller can then use this info to save and update nodes +// and routes as needed. +func hostInfoChanged(old, new *tailcfg.Hostinfo) (bool, bool) { + if old.Equal(new) { + return false, false + } + + // Routes + oldRoutes := old.RoutableIPs + newRoutes := new.RoutableIPs + + sort.Slice(oldRoutes, func(i, j int) bool { + return comparePrefix(oldRoutes[i], oldRoutes[j]) > 0 + }) + sort.Slice(newRoutes, func(i, j int) bool { + return comparePrefix(newRoutes[i], newRoutes[j]) > 0 + }) + + if !xslices.Equal(oldRoutes, newRoutes) { + return true, true + } + + // Services is mostly useful for discovery and not critical, + // except for peerapi, which is how nodes talk to eachother. + // If peerapi was not part of the initial mapresponse, we + // need to make sure its sent out later as it is needed for + // Taildrop. + // TODO(kradalby): Length comparison is a bit naive, replace. + if len(old.Services) != len(new.Services) { + return true, false + } + + return false, false +} + +// TODO(kradalby): Remove after go 1.23, will be in stdlib. +// Compare returns an integer comparing two prefixes. +// The result will be 0 if p == p2, -1 if p < p2, and +1 if p > p2. +// Prefixes sort first by validity (invalid before valid), then +// address family (IPv4 before IPv6), then prefix length, then +// address. +func comparePrefix(p, p2 netip.Prefix) int { + if c := cmp.Compare(p.Addr().BitLen(), p2.Addr().BitLen()); c != 0 { + return c + } + if c := cmp.Compare(p.Bits(), p2.Bits()); c != 0 { + return c + } + return p.Addr().Compare(p2.Addr()) +} diff --git a/hscontrol/poll_noise.go b/hscontrol/poll_noise.go deleted file mode 100644 index 53b1d47e..00000000 --- a/hscontrol/poll_noise.go +++ /dev/null @@ -1,96 +0,0 @@ -package hscontrol - -import ( - "encoding/json" - "errors" - "io" - "net/http" - - "github.com/rs/zerolog/log" - "gorm.io/gorm" - "tailscale.com/tailcfg" - "tailscale.com/types/key" -) - -const ( - MinimumCapVersion tailcfg.CapabilityVersion = 58 -) - -// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol -// -// This is the busiest endpoint, as it keeps the HTTP long poll that updates -// the clients when something in the network changes. -// -// The clients POST stuff like HostInfo and their Endpoints here, but -// only after their first request (marked with the ReadOnly field). -// -// At this moment the updates are sent in a quite horrendous way, but they kinda work. -func (ns *noiseServer) NoisePollNetMapHandler( - writer http.ResponseWriter, - req *http.Request, -) { - log.Trace(). - Str("handler", "NoisePollNetMap"). - Msg("PollNetMapHandler called") - - log.Trace(). - Any("headers", req.Header). - Caller(). - Msg("Headers") - - body, _ := io.ReadAll(req.Body) - - mapRequest := tailcfg.MapRequest{} - if err := json.Unmarshal(body, &mapRequest); err != nil { - log.Error(). - Caller(). - Err(err). - Msg("Cannot parse MapRequest") - http.Error(writer, "Internal error", http.StatusInternalServerError) - - return - } - - // Reject unsupported versions - if mapRequest.Version < MinimumCapVersion { - log.Info(). - Caller(). - Int("min_version", int(MinimumCapVersion)). - Int("client_version", int(mapRequest.Version)). - Msg("unsupported client connected") - http.Error(writer, "Internal error", http.StatusBadRequest) - - return - } - - ns.nodeKey = mapRequest.NodeKey - - node, err := ns.headscale.db.GetNodeByAnyKey( - ns.conn.Peer(), - mapRequest.NodeKey, - key.NodePublic{}, - ) - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - log.Warn(). - Str("handler", "NoisePollNetMap"). - Msgf("Ignoring request, cannot find node with key %s", mapRequest.NodeKey.String()) - http.Error(writer, "Internal error", http.StatusNotFound) - - return - } - log.Error(). - Str("handler", "NoisePollNetMap"). - Msgf("Failed to fetch node from the database with node key: %s", mapRequest.NodeKey.String()) - http.Error(writer, "Internal error", http.StatusInternalServerError) - - return - } - log.Debug(). - Str("handler", "NoisePollNetMap"). - Str("node", node.Hostname). - Int("cap_ver", int(mapRequest.Version)). - Msg("A node sending a MapRequest with Noise protocol") - - ns.headscale.handlePoll(writer, req.Context(), node, mapRequest) -} diff --git a/hscontrol/types/common.go b/hscontrol/types/common.go index ceeceea0..35f5e5e4 100644 --- a/hscontrol/types/common.go +++ b/hscontrol/types/common.go @@ -10,6 +10,7 @@ import ( "time" "tailscale.com/tailcfg" + "tailscale.com/util/ctxkey" ) const ( @@ -90,6 +91,25 @@ func (i StringList) Value() (driver.Value, error) { type StateUpdateType int +func (su StateUpdateType) String() string { + switch su { + case StateFullUpdate: + return "StateFullUpdate" + case StatePeerChanged: + return "StatePeerChanged" + case StatePeerChangedPatch: + return "StatePeerChangedPatch" + case StatePeerRemoved: + return "StatePeerRemoved" + case StateSelfUpdate: + return "StateSelfUpdate" + case StateDERPUpdated: + return "StateDERPUpdated" + } + + return "unknown state update type" +} + const ( StateFullUpdate StateUpdateType = iota // StatePeerChanged is used for updates that needs @@ -118,7 +138,7 @@ type StateUpdate struct { // ChangeNodes must be set when Type is StatePeerAdded // and StatePeerChanged and contains the full node // object for added nodes. - ChangeNodes Nodes + ChangeNodes []NodeID // ChangePatches must be set when Type is StatePeerChangedPatch // and contains a populated PeerChange object. @@ -127,7 +147,7 @@ type StateUpdate struct { // Removed must be set when Type is StatePeerRemoved and // contain a list of the nodes that has been removed from // the network. - Removed []tailcfg.NodeID + Removed []NodeID // DERPMap must be set when Type is StateDERPUpdated and // contain the new DERP Map. @@ -138,39 +158,6 @@ type StateUpdate struct { Message string } -// Valid reports if a StateUpdate is correctly filled and -// panics if the mandatory fields for a type is not -// filled. -// Reports true if valid. -func (su *StateUpdate) Valid() bool { - switch su.Type { - case StatePeerChanged: - if su.ChangeNodes == nil { - panic("Mandatory field ChangeNodes is not set on StatePeerChanged update") - } - case StatePeerChangedPatch: - if su.ChangePatches == nil { - panic("Mandatory field ChangePatches is not set on StatePeerChangedPatch update") - } - case StatePeerRemoved: - if su.Removed == nil { - panic("Mandatory field Removed is not set on StatePeerRemove update") - } - case StateSelfUpdate: - if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 { - panic( - "Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node", - ) - } - case StateDERPUpdated: - if su.DERPMap == nil { - panic("Mandatory field DERPMap is not set on StateDERPUpdated update") - } - } - - return true -} - // Empty reports if there are any updates in the StateUpdate. func (su *StateUpdate) Empty() bool { switch su.Type { @@ -185,22 +172,26 @@ func (su *StateUpdate) Empty() bool { return false } -func StateUpdateExpire(nodeID uint64, expiry time.Time) StateUpdate { +func StateUpdateExpire(nodeID NodeID, expiry time.Time) StateUpdate { return StateUpdate{ Type: StatePeerChangedPatch, ChangePatches: []*tailcfg.PeerChange{ { - NodeID: tailcfg.NodeID(nodeID), + NodeID: nodeID.NodeID(), KeyExpiry: &expiry, }, }, } } +var ( + NotifyOriginKey = ctxkey.New("notify.origin", "") + NotifyHostnameKey = ctxkey.New("notify.hostname", "") +) + func NotifyCtx(ctx context.Context, origin, hostname string) context.Context { - ctx2, _ := context.WithTimeout( - context.WithValue(context.WithValue(ctx, "hostname", hostname), "origin", origin), - 3*time.Second, - ) + ctx2, _ := context.WithTimeout(ctx, 3*time.Second) + ctx2 = NotifyOriginKey.WithValue(ctx2, origin) + ctx2 = NotifyHostnameKey.WithValue(ctx2, hostname) return ctx2 } diff --git a/hscontrol/types/config.go b/hscontrol/types/config.go index 5f66b70b..9c02772c 100644 --- a/hscontrol/types/config.go +++ b/hscontrol/types/config.go @@ -31,6 +31,13 @@ var errOidcMutuallyExclusive = errors.New( "oidc_client_secret and oidc_client_secret_path are mutually exclusive", ) +type IPAllocationStrategy string + +const ( + IPAllocationStrategySequential IPAllocationStrategy = "sequential" + IPAllocationStrategyRandom IPAllocationStrategy = "random" +) + // Config contains the initial Headscale configuration. type Config struct { ServerURL string @@ -39,9 +46,9 @@ type Config struct { GRPCAddr string GRPCAllowInsecure bool EphemeralNodeInactivityTimeout time.Duration - NodeUpdateCheckInterval time.Duration PrefixV4 *netip.Prefix PrefixV6 *netip.Prefix + IPAllocation IPAllocationStrategy NoisePrivateKeyPath string BaseDomain string Log LogConfig @@ -69,6 +76,8 @@ type Config struct { CLI CLIConfig ACL ACLConfig + + Tuning Tuning } type SqliteConfig struct { @@ -163,6 +172,12 @@ type LogConfig struct { Level zerolog.Level } +type Tuning struct { + NotifierSendTimeout time.Duration + BatchChangeDelay time.Duration + NodeMapSessionBufferedChanSize int +} + func LoadConfig(path string, isFile bool) error { if isFile { viper.SetConfigFile(path) @@ -220,7 +235,11 @@ func LoadConfig(path string, isFile bool) error { viper.SetDefault("ephemeral_node_inactivity_timeout", "120s") - viper.SetDefault("node_update_check_interval", "10s") + viper.SetDefault("tuning.notifier_send_timeout", "800ms") + viper.SetDefault("tuning.batch_change_delay", "800ms") + viper.SetDefault("tuning.node_mapsession_buffered_chan_size", 30) + + viper.SetDefault("prefixes.allocation", string(IPAllocationStrategySequential)) if IsCLIConfigured() { return nil @@ -272,15 +291,6 @@ func LoadConfig(path string, isFile bool) error { ) } - maxNodeUpdateCheckInterval, _ := time.ParseDuration("60s") - if viper.GetDuration("node_update_check_interval") > maxNodeUpdateCheckInterval { - errorText += fmt.Sprintf( - "Fatal config error: node_update_check_interval (%s) is set too high, must be less than %s", - viper.GetString("node_update_check_interval"), - maxNodeUpdateCheckInterval, - ) - } - if errorText != "" { // nolint return errors.New(strings.TrimSuffix(errorText, "\n")) @@ -577,18 +587,16 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) { return nil, "" } -func Prefixes() (*netip.Prefix, *netip.Prefix, error) { +func PrefixV4() (*netip.Prefix, error) { prefixV4Str := viper.GetString("prefixes.v4") - prefixV6Str := viper.GetString("prefixes.v6") + + if prefixV4Str == "" { + return nil, nil + } prefixV4, err := netip.ParsePrefix(prefixV4Str) if err != nil { - return nil, nil, err - } - - prefixV6, err := netip.ParsePrefix(prefixV6Str) - if err != nil { - return nil, nil, err + return nil, fmt.Errorf("parsing IPv4 prefix from config: %w", err) } builder := netipx.IPSetBuilder{} @@ -601,13 +609,33 @@ func Prefixes() (*netip.Prefix, *netip.Prefix, error) { prefixV4Str, tsaddr.CGNATRange()) } + return &prefixV4, nil +} + +func PrefixV6() (*netip.Prefix, error) { + prefixV6Str := viper.GetString("prefixes.v6") + + if prefixV6Str == "" { + return nil, nil + } + + prefixV6, err := netip.ParsePrefix(prefixV6Str) + if err != nil { + return nil, fmt.Errorf("parsing IPv6 prefix from config: %w", err) + } + + builder := netipx.IPSetBuilder{} + builder.AddPrefix(tsaddr.CGNATRange()) + builder.AddPrefix(tsaddr.TailscaleULARange()) + ipSet, _ := builder.IPSet() + if !ipSet.ContainsPrefix(prefixV6) { log.Warn(). Msgf("Prefix %s is not in the %s range. This is an unsupported configuration.", prefixV6Str, tsaddr.TailscaleULARange()) } - return &prefixV4, &prefixV6, nil + return &prefixV6, nil } func GetHeadscaleConfig() (*Config, error) { @@ -622,14 +650,37 @@ func GetHeadscaleConfig() (*Config, error) { }, nil } - prefix4, prefix6, err := Prefixes() + logConfig := GetLogConfig() + zerolog.SetGlobalLevel(logConfig.Level) + + prefix4, err := PrefixV4() if err != nil { return nil, err } + prefix6, err := PrefixV6() + if err != nil { + return nil, err + } + + if prefix4 == nil && prefix6 == nil { + return nil, fmt.Errorf("no IPv4 or IPv6 prefix configured, minimum one prefix is required") + } + + allocStr := viper.GetString("prefixes.allocation") + var alloc IPAllocationStrategy + switch allocStr { + case string(IPAllocationStrategySequential): + alloc = IPAllocationStrategySequential + case string(IPAllocationStrategyRandom): + alloc = IPAllocationStrategyRandom + default: + return nil, fmt.Errorf("config error, prefixes.allocation is set to %s, which is not a valid strategy, allowed options: %s, %s", allocStr, IPAllocationStrategySequential, IPAllocationStrategyRandom) + } + dnsConfig, baseDomain := GetDNSConfig() derpConfig := GetDERPConfig() - logConfig := GetLogTailConfig() + logTailConfig := GetLogTailConfig() randomizeClientPort := viper.GetBool("randomize_client_port") oidcClientSecret := viper.GetString("oidc.client_secret") @@ -653,8 +704,9 @@ func GetHeadscaleConfig() (*Config, error) { GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"), DisableUpdateCheck: viper.GetBool("disable_check_updates"), - PrefixV4: prefix4, - PrefixV6: prefix6, + PrefixV4: prefix4, + PrefixV6: prefix6, + IPAllocation: IPAllocationStrategy(alloc), NoisePrivateKeyPath: util.AbsolutePathFromConfigPath( viper.GetString("noise.private_key_path"), @@ -667,10 +719,6 @@ func GetHeadscaleConfig() (*Config, error) { "ephemeral_node_inactivity_timeout", ), - NodeUpdateCheckInterval: viper.GetDuration( - "node_update_check_interval", - ), - Database: GetDatabaseConfig(), TLS: GetTLSConfig(), @@ -714,7 +762,7 @@ func GetHeadscaleConfig() (*Config, error) { UseExpiryFromToken: viper.GetBool("oidc.use_expiry_from_token"), }, - LogTail: logConfig, + LogTail: logTailConfig, RandomizeClientPort: randomizeClientPort, ACL: GetACLConfig(), @@ -726,7 +774,14 @@ func GetHeadscaleConfig() (*Config, error) { Insecure: viper.GetBool("cli.insecure"), }, - Log: GetLogConfig(), + Log: logConfig, + + // TODO(kradalby): Document these settings when more stable + Tuning: Tuning{ + NotifierSendTimeout: viper.GetDuration("tuning.notifier_send_timeout"), + BatchChangeDelay: viper.GetDuration("tuning.batch_change_delay"), + NodeMapSessionBufferedChanSize: viper.GetInt("tuning.node_mapsession_buffered_chan_size"), + }, }, nil } diff --git a/hscontrol/types/const.go b/hscontrol/types/const.go index e718eb2e..019c14b6 100644 --- a/hscontrol/types/const.go +++ b/hscontrol/types/const.go @@ -3,7 +3,7 @@ package types import "time" const ( - HTTPReadTimeout = 30 * time.Second + HTTPTimeout = 30 * time.Second HTTPShutdownTimeout = 3 * time.Second TLSALPN01ChallengeType = "TLS-ALPN-01" HTTP01ChallengeType = "HTTP-01" diff --git a/hscontrol/types/node.go b/hscontrol/types/node.go index 69004bfd..3ccadc38 100644 --- a/hscontrol/types/node.go +++ b/hscontrol/types/node.go @@ -1,18 +1,18 @@ package types import ( - "database/sql/driver" + "database/sql" "encoding/json" "errors" "fmt" "net/netip" - "sort" + "strconv" "strings" "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy/matcher" - "github.com/rs/zerolog/log" + "github.com/juanfont/headscale/hscontrol/util" "go4.org/netipx" "google.golang.org/protobuf/types/known/timestamppb" "gorm.io/gorm" @@ -27,9 +27,29 @@ var ( ErrNodeUserHasNoName = errors.New("node user has no name") ) +type NodeID uint64 + +// type NodeConnectedMap *xsync.MapOf[NodeID, bool] + +func (id NodeID) StableID() tailcfg.StableNodeID { + return tailcfg.StableNodeID(strconv.FormatUint(uint64(id), util.Base10)) +} + +func (id NodeID) NodeID() tailcfg.NodeID { + return tailcfg.NodeID(id) +} + +func (id NodeID) Uint64() uint64 { + return uint64(id) +} + +func (id NodeID) String() string { + return strconv.FormatUint(id.Uint64(), util.Base10) +} + // Node is a Headscale client. type Node struct { - ID uint64 `gorm:"primary_key"` + ID NodeID `gorm:"primary_key"` // MachineKeyDatabaseField is the string representation of MachineKey // it is _only_ used for reading and writing the key to the @@ -66,7 +86,19 @@ type Node struct { HostinfoDatabaseField string `gorm:"column:host_info"` Hostinfo *tailcfg.Hostinfo `gorm:"-"` - IPAddresses NodeAddresses + // IPv4DatabaseField is the string representation of v4 address, + // it is _only_ used for reading and writing the key to the + // database and should not be used. + // Use V4 instead. + IPv4DatabaseField sql.NullString `gorm:"column:ipv4"` + IPv4 *netip.Addr `gorm:"-"` + + // IPv6DatabaseField is the string representation of v4 address, + // it is _only_ used for reading and writing the key to the + // database and should not be used. + // Use V6 instead. + IPv6DatabaseField sql.NullString `gorm:"column:ipv6"` + IPv6 *netip.Addr `gorm:"-"` // Hostname represents the name given by the Tailscale // client during registration @@ -80,20 +112,20 @@ type Node struct { // parts of headscale. GivenName string `gorm:"type:varchar(63);unique_index"` UserID uint - User User `gorm:"foreignKey:UserID"` + User User `gorm:"constraint:OnDelete:CASCADE;"` RegisterMethod string ForcedTags StringList // TODO(kradalby): This seems like irrelevant information? - AuthKeyID uint - AuthKey *PreAuthKey + AuthKeyID *uint `sql:"DEFAULT:NULL"` + AuthKey *PreAuthKey `gorm:"constraint:OnDelete:SET NULL;"` LastSeen *time.Time Expiry *time.Time - Routes []Route + Routes []Route `gorm:"constraint:OnDelete:CASCADE;"` CreatedAt time.Time UpdatedAt time.Time @@ -106,34 +138,41 @@ type ( Nodes []*Node ) -type NodeAddresses []netip.Addr - -func (na NodeAddresses) Sort() { - sort.Slice(na, func(index1, index2 int) bool { - if na[index1].Is4() && na[index2].Is6() { - return true - } - if na[index1].Is6() && na[index2].Is4() { - return false - } - - return na[index1].Compare(na[index2]) < 0 - }) -} - -func (na NodeAddresses) StringSlice() []string { - na.Sort() - strSlice := make([]string, 0, len(na)) - for _, addr := range na { - strSlice = append(strSlice, addr.String()) +// IsExpired returns whether the node registration has expired. +func (node Node) IsExpired() bool { + // If Expiry is not set, the client has not indicated that + // it wants an expiry time, it is therefor considered + // to mean "not expired" + if node.Expiry == nil || node.Expiry.IsZero() { + return false } - return strSlice + return time.Since(*node.Expiry) > 0 } -func (na NodeAddresses) Prefixes() []netip.Prefix { +// IsEphemeral returns if the node is registered as an Ephemeral node. +// https://tailscale.com/kb/1111/ephemeral-nodes/ +func (node *Node) IsEphemeral() bool { + return node.AuthKey != nil && node.AuthKey.Ephemeral +} + +func (node *Node) IPs() []netip.Addr { + var ret []netip.Addr + + if node.IPv4 != nil { + ret = append(ret, *node.IPv4) + } + + if node.IPv6 != nil { + ret = append(ret, *node.IPv6) + } + + return ret +} + +func (node *Node) Prefixes() []netip.Prefix { addrs := []netip.Prefix{} - for _, nodeAddress := range na { + for _, nodeAddress := range node.IPs() { ip := netip.PrefixFrom(nodeAddress, nodeAddress.BitLen()) addrs = append(addrs, ip) } @@ -141,8 +180,22 @@ func (na NodeAddresses) Prefixes() []netip.Prefix { return addrs } -func (na NodeAddresses) InIPSet(set *netipx.IPSet) bool { - for _, nodeAddr := range na { +func (node *Node) IPsAsString() []string { + var ret []string + + if node.IPv4 != nil { + ret = append(ret, node.IPv4.String()) + } + + if node.IPv6 != nil { + ret = append(ret, node.IPv6.String()) + } + + return ret +} + +func (node *Node) InIPSet(set *netipx.IPSet) bool { + for _, nodeAddr := range node.IPs() { if set.Contains(nodeAddr) { return true } @@ -153,62 +206,15 @@ func (na NodeAddresses) InIPSet(set *netipx.IPSet) bool { // AppendToIPSet adds the individual ips in NodeAddresses to a // given netipx.IPSetBuilder. -func (na NodeAddresses) AppendToIPSet(build *netipx.IPSetBuilder) { - for _, ip := range na { +func (node *Node) AppendToIPSet(build *netipx.IPSetBuilder) { + for _, ip := range node.IPs() { build.Add(ip) } } -func (na *NodeAddresses) Scan(destination interface{}) error { - switch value := destination.(type) { - case string: - addresses := strings.Split(value, ",") - *na = (*na)[:0] - for _, addr := range addresses { - if len(addr) < 1 { - continue - } - parsed, err := netip.ParseAddr(addr) - if err != nil { - return err - } - *na = append(*na, parsed) - } - - return nil - - default: - return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination) - } -} - -// Value return json value, implement driver.Valuer interface. -func (na NodeAddresses) Value() (driver.Value, error) { - addresses := strings.Join(na.StringSlice(), ",") - - return addresses, nil -} - -// IsExpired returns whether the node registration has expired. -func (node Node) IsExpired() bool { - // If Expiry is not set, the client has not indicated that - // it wants an expiry time, it is therefor considered - // to mean "not expired" - if node.Expiry == nil || node.Expiry.IsZero() { - return false - } - - return time.Now().UTC().After(*node.Expiry) -} - -// IsEphemeral returns if the node is registered as an Ephemeral node. -// https://tailscale.com/kb/1111/ephemeral-nodes/ -func (node *Node) IsEphemeral() bool { - return node.AuthKey != nil && node.AuthKey.Ephemeral -} - func (node *Node) CanAccess(filter []tailcfg.FilterRule, node2 *Node) bool { - allowedIPs := append([]netip.Addr{}, node2.IPAddresses...) + src := node.IPs() + allowedIPs := node2.IPs() for _, route := range node2.Routes { if route.Enabled { @@ -220,7 +226,7 @@ func (node *Node) CanAccess(filter []tailcfg.FilterRule, node2 *Node) bool { // TODO(kradalby): Cache or pregen this matcher := matcher.MatchFromFilterRule(rule) - if !matcher.SrcsContainsIPs([]netip.Addr(node.IPAddresses)) { + if !matcher.SrcsContainsIPs(src) { continue } @@ -233,13 +239,16 @@ func (node *Node) CanAccess(filter []tailcfg.FilterRule, node2 *Node) bool { } func (nodes Nodes) FilterByIP(ip netip.Addr) Nodes { - found := make(Nodes, 0) + var found Nodes for _, node := range nodes { - for _, mIP := range node.IPAddresses { - if ip == mIP { - found = append(found, node) - } + if node.IPv4 != nil && ip == *node.IPv4 { + found = append(found, node) + continue + } + + if node.IPv6 != nil && ip == *node.IPv6 { + found = append(found, node) } } @@ -264,10 +273,22 @@ func (node *Node) BeforeSave(tx *gorm.DB) error { hi, err := json.Marshal(node.Hostinfo) if err != nil { - return fmt.Errorf("failed to marshal Hostinfo to store in db: %w", err) + return fmt.Errorf("marshalling Hostinfo to store in db: %w", err) } node.HostinfoDatabaseField = string(hi) + if node.IPv4 != nil { + node.IPv4DatabaseField.String, node.IPv4DatabaseField.Valid = node.IPv4.String(), true + } else { + node.IPv4DatabaseField.String, node.IPv4DatabaseField.Valid = "", false + } + + if node.IPv6 != nil { + node.IPv6DatabaseField.String, node.IPv6DatabaseField.Valid = node.IPv6.String(), true + } else { + node.IPv6DatabaseField.String, node.IPv6DatabaseField.Valid = "", false + } + return nil } @@ -279,27 +300,31 @@ func (node *Node) BeforeSave(tx *gorm.DB) error { func (node *Node) AfterFind(tx *gorm.DB) error { var machineKey key.MachinePublic if err := machineKey.UnmarshalText([]byte(node.MachineKeyDatabaseField)); err != nil { - return fmt.Errorf("failed to unmarshal machine key from db: %w", err) + return fmt.Errorf("unmarshalling machine key from db: %w", err) } node.MachineKey = machineKey var nodeKey key.NodePublic if err := nodeKey.UnmarshalText([]byte(node.NodeKeyDatabaseField)); err != nil { - return fmt.Errorf("failed to unmarshal node key from db: %w", err) + return fmt.Errorf("unmarshalling node key from db: %w", err) } node.NodeKey = nodeKey - var discoKey key.DiscoPublic - if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil { - return fmt.Errorf("failed to unmarshal disco key from db: %w", err) + // DiscoKey might be empty if a node has not sent it to headscale. + // This means that this might fail if the disco key is empty. + if node.DiscoKeyDatabaseField != "" { + var discoKey key.DiscoPublic + if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil { + return fmt.Errorf("unmarshalling disco key from db: %w", err) + } + node.DiscoKey = discoKey } - node.DiscoKey = discoKey endpoints := make([]netip.AddrPort, len(node.EndpointsDatabaseField)) for idx, ep := range node.EndpointsDatabaseField { addrPort, err := netip.ParseAddrPort(ep) if err != nil { - return fmt.Errorf("failed to parse endpoint from db: %w", err) + return fmt.Errorf("parsing endpoint from db: %w", err) } endpoints[idx] = addrPort @@ -308,23 +333,41 @@ func (node *Node) AfterFind(tx *gorm.DB) error { var hi tailcfg.Hostinfo if err := json.Unmarshal([]byte(node.HostinfoDatabaseField), &hi); err != nil { - log.Trace().Err(err).Msgf("Hostinfo content: %s", node.HostinfoDatabaseField) - - return fmt.Errorf("failed to unmarshal Hostinfo from db: %w", err) + return fmt.Errorf("unmarshalling hostinfo from database: %w", err) } node.Hostinfo = &hi + if node.IPv4DatabaseField.Valid { + ip, err := netip.ParseAddr(node.IPv4DatabaseField.String) + if err != nil { + return fmt.Errorf("parsing IPv4 from database: %w", err) + } + + node.IPv4 = &ip + } + + if node.IPv6DatabaseField.Valid { + ip, err := netip.ParseAddr(node.IPv6DatabaseField.String) + if err != nil { + return fmt.Errorf("parsing IPv6 from database: %w", err) + } + + node.IPv6 = &ip + } + return nil } func (node *Node) Proto() *v1.Node { nodeProto := &v1.Node{ - Id: node.ID, + Id: uint64(node.ID), MachineKey: node.MachineKey.String(), - NodeKey: node.NodeKey.String(), - DiscoKey: node.DiscoKey.String(), - IpAddresses: node.IPAddresses.StringSlice(), + NodeKey: node.NodeKey.String(), + DiscoKey: node.DiscoKey.String(), + + // TODO(kradalby): replace list with v4, v6 field? + IpAddresses: node.IPsAsString(), Name: node.Hostname, GivenName: node.GivenName, User: node.User.Proto(), @@ -486,8 +529,8 @@ func (nodes Nodes) String() string { return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp)) } -func (nodes Nodes) IDMap() map[uint64]*Node { - ret := map[uint64]*Node{} +func (nodes Nodes) IDMap() map[NodeID]*Node { + ret := map[NodeID]*Node{} for _, node := range nodes { ret[node.ID] = node diff --git a/hscontrol/types/node_test.go b/hscontrol/types/node_test.go index 712a839e..157be89e 100644 --- a/hscontrol/types/node_test.go +++ b/hscontrol/types/node_test.go @@ -12,6 +12,10 @@ import ( ) func Test_NodeCanAccess(t *testing.T) { + iap := func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip + } tests := []struct { name string node1 Node @@ -22,10 +26,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "no-rules", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + IPv4: iap("10.0.0.1"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + IPv4: iap("10.0.0.2"), }, rules: []tailcfg.FilterRule{}, want: false, @@ -33,10 +37,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "wildcard", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + IPv4: iap("10.0.0.1"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + IPv4: iap("10.0.0.2"), }, rules: []tailcfg.FilterRule{ { @@ -54,10 +58,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "other-cant-access-src", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, + IPv4: iap("100.64.0.1"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, + IPv4: iap("100.64.0.3"), }, rules: []tailcfg.FilterRule{ { @@ -72,10 +76,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "dest-cant-access-src", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, + IPv4: iap("100.64.0.3"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, + IPv4: iap("100.64.0.2"), }, rules: []tailcfg.FilterRule{ { @@ -90,10 +94,10 @@ func Test_NodeCanAccess(t *testing.T) { { name: "src-can-access-dest", node1: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, + IPv4: iap("100.64.0.2"), }, node2: Node{ - IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, + IPv4: iap("100.64.0.3"), }, rules: []tailcfg.FilterRule{ { @@ -118,32 +122,6 @@ func Test_NodeCanAccess(t *testing.T) { } } -func TestNodeAddressesOrder(t *testing.T) { - machineAddresses := NodeAddresses{ - netip.MustParseAddr("2001:db8::2"), - netip.MustParseAddr("100.64.0.2"), - netip.MustParseAddr("2001:db8::1"), - netip.MustParseAddr("100.64.0.1"), - } - - strSlice := machineAddresses.StringSlice() - expected := []string{ - "100.64.0.1", - "100.64.0.2", - "2001:db8::1", - "2001:db8::2", - } - - if len(strSlice) != len(expected) { - t.Fatalf("unexpected slice length: got %v, want %v", len(strSlice), len(expected)) - } - for i, addr := range strSlice { - if addr != expected[i] { - t.Errorf("unexpected address at index %v: got %v, want %v", i, addr, expected[i]) - } - } -} - func TestNodeFQDN(t *testing.T) { tests := []struct { name string diff --git a/hscontrol/types/preauth_key.go b/hscontrol/types/preauth_key.go index 0d8c9cff..8b02569a 100644 --- a/hscontrol/types/preauth_key.go +++ b/hscontrol/types/preauth_key.go @@ -14,11 +14,11 @@ type PreAuthKey struct { ID uint64 `gorm:"primary_key"` Key string UserID uint - User User + User User `gorm:"constraint:OnDelete:CASCADE;"` Reusable bool - Ephemeral bool `gorm:"default:false"` - Used bool `gorm:"default:false"` - ACLTags []PreAuthKeyACLTag + Ephemeral bool `gorm:"default:false"` + Used bool `gorm:"default:false"` + ACLTags []PreAuthKeyACLTag `gorm:"constraint:OnDelete:CASCADE;"` CreatedAt *time.Time Expiration *time.Time diff --git a/hscontrol/util/dns.go b/hscontrol/util/dns.go index c6bd2b69..ab3c90b7 100644 --- a/hscontrol/util/dns.go +++ b/hscontrol/util/dns.go @@ -103,33 +103,7 @@ func CheckForFQDNRules(name string) error { // From the netmask we can find out the wildcard bits (the bits that are not set in the netmask). // This allows us to then calculate the subnets included in the subsequent class block and generate the entries. -func GenerateMagicDNSRootDomains(ipPrefixes []netip.Prefix) []dnsname.FQDN { - fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes)) - for _, ipPrefix := range ipPrefixes { - var generateDNSRoot func(netip.Prefix) []dnsname.FQDN - switch ipPrefix.Addr().BitLen() { - case ipv4AddressLength: - generateDNSRoot = generateIPv4DNSRootDomain - - case ipv6AddressLength: - generateDNSRoot = generateIPv6DNSRootDomain - - default: - panic( - fmt.Sprintf( - "unsupported IP version with address length %d", - ipPrefix.Addr().BitLen(), - ), - ) - } - - fqdns = append(fqdns, generateDNSRoot(ipPrefix)...) - } - - return fqdns -} - -func generateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { +func GenerateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { // Conversion to the std lib net.IPnet, a bit easier to operate netRange := netipx.PrefixIPNet(ipPrefix) maskBits, _ := netRange.Mask.Size() @@ -165,7 +139,27 @@ func generateIPv4DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { return fqdns } -func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { +// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`. +// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS +// server (listening in 100.100.100.100 udp/53) should be used for. +// +// Tailscale.com includes in the list: +// - the `BaseDomain` of the user +// - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6) +// - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`. +// In the public SaaS this is [64-127].100.in-addr.arpa. +// +// The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this +// is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the +// subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet. +// +// How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this, +// and do not make use of RFC2317 ("Classless IN-ADDR.ARPA delegation") - hence generating the entries for the next +// class block only. + +// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask). +// This allows us to then calculate the subnets included in the subsequent class block and generate the entries. +func GenerateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN { const nibbleLen = 4 maskBits, _ := netipx.PrefixIPNet(ipPrefix).Mask.Size() diff --git a/hscontrol/util/dns_test.go b/hscontrol/util/dns_test.go index 9d9b08b3..2559cae6 100644 --- a/hscontrol/util/dns_test.go +++ b/hscontrol/util/dns_test.go @@ -148,10 +148,7 @@ func TestCheckForFQDNRules(t *testing.T) { } func TestMagicDNSRootDomains100(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("100.64.0.0/10"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix("100.64.0.0/10")) found := false for _, domain := range domains { @@ -185,10 +182,7 @@ func TestMagicDNSRootDomains100(t *testing.T) { } func TestMagicDNSRootDomains172(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("172.16.0.0/16"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv4DNSRootDomain(netip.MustParsePrefix("172.16.0.0/16")) found := false for _, domain := range domains { @@ -213,20 +207,14 @@ func TestMagicDNSRootDomains172(t *testing.T) { // Happens when netmask is a multiple of 4 bits (sounds likely). func TestMagicDNSRootDomainsIPv6Single(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("fd7a:115c:a1e0::/48"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix("fd7a:115c:a1e0::/48")) assert.Len(t, domains, 1) assert.Equal(t, "0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.", domains[0].WithTrailingDot()) } func TestMagicDNSRootDomainsIPv6SingleMultiple(t *testing.T) { - prefixes := []netip.Prefix{ - netip.MustParsePrefix("fd7a:115c:a1e0::/50"), - } - domains := GenerateMagicDNSRootDomains(prefixes) + domains := GenerateIPv6DNSRootDomain(netip.MustParsePrefix("fd7a:115c:a1e0::/50")) yieldsRoot := func(dom string) bool { for _, candidate := range domains { diff --git a/integration/acl_test.go b/integration/acl_test.go index 517e2dfb..9d763965 100644 --- a/integration/acl_test.go +++ b/integration/acl_test.go @@ -51,7 +51,7 @@ func aclScenario( clientsPerUser int, ) *Scenario { t.Helper() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) spec := map[string]int{ @@ -264,7 +264,7 @@ func TestACLHostsInNetMapTable(t *testing.T) { for name, testCase := range tests { t.Run(name, func(t *testing.T) { - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) spec := testCase.users diff --git a/integration/auth_oidc_test.go b/integration/auth_oidc_test.go index 36e74a8d..d24bf452 100644 --- a/integration/auth_oidc_test.go +++ b/integration/auth_oidc_test.go @@ -42,7 +42,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { IntegrationSkip(t) t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) scenario := AuthOIDCScenario{ @@ -83,7 +83,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -100,7 +100,7 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { shortAccessTTL := 5 * time.Minute - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) baseScenario.pool.MaxWait = 5 * time.Minute @@ -142,7 +142,7 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() diff --git a/integration/auth_web_flow_test.go b/integration/auth_web_flow_test.go index aa589fac..8e121ca0 100644 --- a/integration/auth_web_flow_test.go +++ b/integration/auth_web_flow_test.go @@ -26,7 +26,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { IntegrationSkip(t) t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) if err != nil { t.Fatalf("failed to create scenario: %s", err) } @@ -53,7 +53,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -67,7 +67,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { IntegrationSkip(t) t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) scenario := AuthWebFlowScenario{ @@ -92,7 +92,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() diff --git a/integration/cli_test.go b/integration/cli_test.go index af7b073b..57edf58e 100644 --- a/integration/cli_test.go +++ b/integration/cli_test.go @@ -32,7 +32,7 @@ func TestUserCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -112,7 +112,7 @@ func TestPreAuthKeyCommand(t *testing.T) { user := "preauthkeyspace" count := 3 - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -254,7 +254,7 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) { user := "pre-auth-key-without-exp-user" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -317,7 +317,7 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { user := "pre-auth-key-reus-ephm-user" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -388,13 +388,108 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) { assert.Len(t, listedPreAuthKeys, 3) } +func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + user1 := "user1" + user2 := "user2" + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + spec := map[string]int{ + user1: 1, + user2: 0, + } + + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak")) + assertNoErr(t, err) + + headscale, err := scenario.Headscale() + assertNoErr(t, err) + + var user2Key v1.PreAuthKey + + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "preauthkeys", + "--user", + user2, + "create", + "--reusable", + "--expiration", + "24h", + "--output", + "json", + "--tags", + "tag:test1,tag:test2", + }, + &user2Key, + ) + assertNoErr(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + assert.Len(t, allClients, 1) + + client := allClients[0] + + // Log out from user1 + err = client.Logout() + assertNoErr(t, err) + + err = scenario.WaitForTailscaleLogout() + assertNoErr(t, err) + + status, err := client.Status() + assertNoErr(t, err) + if status.BackendState == "Starting" || status.BackendState == "Running" { + t.Fatalf("expected node to be logged out, backend state: %s", status.BackendState) + } + + err = client.Login(headscale.GetEndpoint(), user2Key.GetKey()) + assertNoErr(t, err) + + status, err = client.Status() + assertNoErr(t, err) + if status.BackendState != "Running" { + t.Fatalf("expected node to be logged in, backend state: %s", status.BackendState) + } + + if status.Self.UserID.String() != "userid:2" { + t.Fatalf("expected node to be logged in as userid:2, got: %s", status.Self.UserID.String()) + } + + var listNodes []v1.Node + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "nodes", + "list", + "--output", + "json", + }, + &listNodes, + ) + assert.Nil(t, err) + assert.Len(t, listNodes, 1) + + assert.Equal(t, "user2", listNodes[0].User.Name) +} + func TestApiKeyCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() count := 5 - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -562,7 +657,7 @@ func TestNodeTagCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -695,7 +790,7 @@ func TestNodeAdvertiseTagNoACLCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -745,7 +840,7 @@ func TestNodeAdvertiseTagWithACLCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -808,7 +903,7 @@ func TestNodeCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1049,7 +1144,7 @@ func TestNodeExpireCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1176,7 +1271,7 @@ func TestNodeRenameCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -1343,7 +1438,7 @@ func TestNodeMoveCommand(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() diff --git a/integration/embedded_derp_test.go b/integration/embedded_derp_test.go index e4f76ec4..39a9acca 100644 --- a/integration/embedded_derp_test.go +++ b/integration/embedded_derp_test.go @@ -23,7 +23,7 @@ func TestDERPServerScenario(t *testing.T) { IntegrationSkip(t) // t.Parallel() - baseScenario, err := NewScenario() + baseScenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) scenario := EmbeddedDERPServerScenario{ @@ -37,25 +37,11 @@ func TestDERPServerScenario(t *testing.T) { // "user1": len(MustTestVersions), } - headscaleConfig := map[string]string{ - "HEADSCALE_DERP_URLS": "", - "HEADSCALE_DERP_SERVER_ENABLED": "true", - "HEADSCALE_DERP_SERVER_REGION_ID": "999", - "HEADSCALE_DERP_SERVER_REGION_CODE": "headscale", - "HEADSCALE_DERP_SERVER_REGION_NAME": "Headscale Embedded DERP", - "HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR": "0.0.0.0:3478", - "HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH": "/tmp/derp.key", - - // Envknob for enabling DERP debug logs - "DERP_DEBUG_LOGS": "true", - "DERP_PROBER_DEBUG_LOGS": "true", - } - err = scenario.CreateHeadscaleEnv( spec, - hsic.WithConfigEnv(headscaleConfig), hsic.WithTestName("derpserver"), hsic.WithExtraPorts([]string{"3478/udp"}), + hsic.WithEmbeddedDERPServerOnly(), hsic.WithTLS(), hsic.WithHostnameAsServerURL(), ) diff --git a/integration/general_test.go b/integration/general_test.go index 9aae26fc..245e8f09 100644 --- a/integration/general_test.go +++ b/integration/general_test.go @@ -1,6 +1,7 @@ package integration import ( + "context" "encoding/json" "fmt" "net/netip" @@ -9,11 +10,13 @@ import ( "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" + "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" "github.com/rs/zerolog/log" "github.com/samber/lo" "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" "tailscale.com/client/tailscale/apitype" "tailscale.com/types/key" ) @@ -22,7 +25,7 @@ func TestPingAllByIP(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -33,26 +36,13 @@ func TestPingAllByIP(t *testing.T) { "user2": len(MustTestVersions), } - headscaleConfig := map[string]string{ - "HEADSCALE_DERP_URLS": "", - "HEADSCALE_DERP_SERVER_ENABLED": "true", - "HEADSCALE_DERP_SERVER_REGION_ID": "999", - "HEADSCALE_DERP_SERVER_REGION_CODE": "headscale", - "HEADSCALE_DERP_SERVER_REGION_NAME": "Headscale Embedded DERP", - "HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR": "0.0.0.0:3478", - "HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH": "/tmp/derp.key", - - // Envknob for enabling DERP debug logs - "DERP_DEBUG_LOGS": "true", - "DERP_PROBER_DEBUG_LOGS": "true", - } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"), - hsic.WithConfigEnv(headscaleConfig), + hsic.WithEmbeddedDERPServerOnly(), hsic.WithTLS(), hsic.WithHostnameAsServerURL(), + hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom), ) assertNoErrHeadscaleEnv(t, err) @@ -65,7 +55,7 @@ func TestPingAllByIP(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -79,7 +69,7 @@ func TestPingAllByIPPublicDERP(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -103,7 +93,7 @@ func TestPingAllByIPPublicDERP(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -117,7 +107,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -135,7 +125,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) clientIPs := make(map[TailscaleClient][]netip.Addr) for _, client := range allClients { @@ -176,7 +166,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allClients, err = scenario.ListTailscaleClients() assertNoErrListClients(t, err) @@ -228,7 +218,7 @@ func TestEphemeral(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -311,7 +301,7 @@ func TestPingAllByHostname(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -329,7 +319,7 @@ func TestPingAllByHostname(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allHostnames, err := scenario.ListTailscaleClientsFQDNs() assertNoErrListFQDN(t, err) @@ -347,20 +337,20 @@ func TestTaildrop(t *testing.T) { IntegrationSkip(t) t.Parallel() - retry := func(times int, sleepInverval time.Duration, doWork func() error) error { + retry := func(times int, sleepInterval time.Duration, doWork func() error) error { var err error for attempts := 0; attempts < times; attempts++ { err = doWork() if err == nil { return nil } - time.Sleep(sleepInverval) + time.Sleep(sleepInterval) } return err } - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -521,7 +511,7 @@ func TestResolveMagicDNS(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -539,7 +529,7 @@ func TestResolveMagicDNS(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) // Poor mans cache _, err = scenario.ListTailscaleClientsFQDNs() @@ -589,7 +579,7 @@ func TestExpireNode(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -609,7 +599,7 @@ func TestExpireNode(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -711,11 +701,11 @@ func TestExpireNode(t *testing.T) { } } -func TestNodeOnlineLastSeenStatus(t *testing.T) { +func TestNodeOnlineStatus(t *testing.T) { IntegrationSkip(t) t.Parallel() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -723,7 +713,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { "user1": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("onlinelastseen")) + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("online")) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() @@ -735,7 +725,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { err = scenario.WaitForTailscaleSync() assertNoErrSync(t, err) - assertClientsState(t, allClients) + // assertClientsState(t, allClients) allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { return x.String() @@ -755,8 +745,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { headscale, err := scenario.Headscale() assertNoErr(t, err) - keepAliveInterval := 60 * time.Second - // Duration is chosen arbitrarily, 10m is reported in #1561 testDuration := 12 * time.Minute start := time.Now() @@ -780,11 +768,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { err = json.Unmarshal([]byte(result), &nodes) assertNoErr(t, err) - now := time.Now() - - // Threshold with some leeway - lastSeenThreshold := now.Add(-keepAliveInterval - (10 * time.Second)) - // Verify that headscale reports the nodes as online for _, node := range nodes { // All nodes should be online @@ -795,18 +778,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { node.GetName(), time.Since(start), ) - - lastSeen := node.GetLastSeen().AsTime() - // All nodes should have been last seen between now and the keepAliveInterval - assert.Truef( - t, - lastSeen.After(lastSeenThreshold), - "node (%s) lastSeen (%v) was not %s after the threshold (%v)", - node.GetName(), - lastSeen, - keepAliveInterval, - lastSeenThreshold, - ) } // Verify that all nodes report all nodes to be online @@ -824,7 +795,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { continue } - // All peers of this nodess are reporting to be + // All peers of this nodes are reporting to be // connected to the control server assert.Truef( t, @@ -834,15 +805,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { client.Hostname(), time.Since(start), ) - - // from docs: last seen to tailcontrol; only present if offline - // assert.Nilf( - // t, - // peerStatus.LastSeen, - // "expected node %s to not have LastSeen set, got %s", - // peerStatus.HostName, - // peerStatus.LastSeen, - // ) } } @@ -850,3 +812,89 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) { time.Sleep(time.Second) } } + +// TestPingAllByIPManyUpDown is a variant of the PingAll +// test which will take the tailscale node up and down +// five times ensuring they are able to restablish connectivity. +func TestPingAllByIPManyUpDown(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + scenario, err := NewScenario(dockertestMaxWait()) + assertNoErr(t, err) + defer scenario.Shutdown() + + // TODO(kradalby): it does not look like the user thing works, only second + // get created? maybe only when many? + spec := map[string]int{ + "user1": len(MustTestVersions), + "user2": len(MustTestVersions), + } + + err = scenario.CreateHeadscaleEnv(spec, + []tsic.Option{}, + hsic.WithTestName("pingallbyipmany"), + hsic.WithEmbeddedDERPServerOnly(), + hsic.WithTLS(), + hsic.WithHostnameAsServerURL(), + ) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + allIps, err := scenario.ListTailscaleClientsIPs() + assertNoErrListClientIPs(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + // assertClientsState(t, allClients) + + allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string { + return x.String() + }) + + success := pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + + wg, _ := errgroup.WithContext(context.Background()) + + for run := range 3 { + t.Logf("Starting DownUpPing run %d", run+1) + + for _, client := range allClients { + c := client + wg.Go(func() error { + t.Logf("taking down %q", c.Hostname()) + return c.Down() + }) + } + + if err := wg.Wait(); err != nil { + t.Fatalf("failed to take down all nodes: %s", err) + } + + time.Sleep(5 * time.Second) + + for _, client := range allClients { + c := client + wg.Go(func() error { + t.Logf("bringing up %q", c.Hostname()) + return c.Up() + }) + } + + if err := wg.Wait(); err != nil { + t.Fatalf("failed to take down all nodes: %s", err) + } + + time.Sleep(5 * time.Second) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + success := pingAllHelper(t, allClients, allAddrs) + t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps)) + } +} diff --git a/integration/hsic/config.go b/integration/hsic/config.go index 606718c7..7953799e 100644 --- a/integration/hsic/config.go +++ b/integration/hsic/config.go @@ -1,5 +1,7 @@ package hsic +import "github.com/juanfont/headscale/hscontrol/types" + // const ( // defaultEphemeralNodeInactivityTimeout = time.Second * 30 // defaultNodeUpdateCheckInterval = time.Second * 10 @@ -71,7 +73,6 @@ database: type: sqlite3 sqlite.path: /tmp/integration_test_db.sqlite3 ephemeral_node_inactivity_timeout: 30m -node_update_check_interval: 10s prefixes: v6: fd7a:115c:a1e0::/48 v4: 100.64.0.0/10 @@ -114,7 +115,6 @@ func DefaultConfigEnv() map[string]string { "HEADSCALE_DATABASE_TYPE": "sqlite", "HEADSCALE_DATABASE_SQLITE_PATH": "/tmp/integration_test_db.sqlite3", "HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "30m", - "HEADSCALE_NODE_UPDATE_CHECK_INTERVAL": "10s", "HEADSCALE_PREFIXES_V4": "100.64.0.0/10", "HEADSCALE_PREFIXES_V6": "fd7a:115c:a1e0::/48", "HEADSCALE_DNS_CONFIG_BASE_DOMAIN": "headscale.net", @@ -124,10 +124,14 @@ func DefaultConfigEnv() map[string]string { "HEADSCALE_PRIVATE_KEY_PATH": "/tmp/private.key", "HEADSCALE_NOISE_PRIVATE_KEY_PATH": "/tmp/noise_private.key", "HEADSCALE_LISTEN_ADDR": "0.0.0.0:8080", - "HEADSCALE_METRICS_LISTEN_ADDR": "127.0.0.1:9090", + "HEADSCALE_METRICS_LISTEN_ADDR": "0.0.0.0:9090", "HEADSCALE_SERVER_URL": "http://headscale:8080", "HEADSCALE_DERP_URLS": "https://controlplane.tailscale.com/derpmap/default", "HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "false", "HEADSCALE_DERP_UPDATE_FREQUENCY": "1m", + + // a bunch of tests (ACL/Policy) rely on predicable IP alloc, + // so ensure the sequential alloc is used by default. + "HEADSCALE_PREFIXES_ALLOCATION": string(types.IPAllocationStrategySequential), } } diff --git a/integration/hsic/hsic.go b/integration/hsic/hsic.go index b61827ac..5b55a0a8 100644 --- a/integration/hsic/hsic.go +++ b/integration/hsic/hsic.go @@ -11,6 +11,7 @@ import ( "encoding/pem" "errors" "fmt" + "io" "log" "math/big" "net" @@ -18,12 +19,14 @@ import ( "net/url" "os" "path" + "strconv" "strings" "time" "github.com/davecgh/go-spew/spew" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy" + "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/dockertestutil" "github.com/juanfont/headscale/integration/integrationutil" @@ -173,6 +176,41 @@ func WithPostgres() Option { } } +// WithIPAllocationStrategy sets the tests IP Allocation strategy. +func WithIPAllocationStrategy(strat types.IPAllocationStrategy) Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["HEADSCALE_PREFIXES_ALLOCATION"] = string(strat) + } +} + +// WithEmbeddedDERPServerOnly configures Headscale to start +// and only use the embedded DERP server. +// It requires WithTLS and WithHostnameAsServerURL to be +// set. +func WithEmbeddedDERPServerOnly() Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["HEADSCALE_DERP_URLS"] = "" + hsic.env["HEADSCALE_DERP_SERVER_ENABLED"] = "true" + hsic.env["HEADSCALE_DERP_SERVER_REGION_ID"] = "999" + hsic.env["HEADSCALE_DERP_SERVER_REGION_CODE"] = "headscale" + hsic.env["HEADSCALE_DERP_SERVER_REGION_NAME"] = "Headscale Embedded DERP" + hsic.env["HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR"] = "0.0.0.0:3478" + hsic.env["HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH"] = "/tmp/derp.key" + + // Envknob for enabling DERP debug logs + hsic.env["DERP_DEBUG_LOGS"] = "true" + hsic.env["DERP_PROBER_DEBUG_LOGS"] = "true" + } +} + +// WithTuning allows changing the tuning settings easily. +func WithTuning(batchTimeout time.Duration, mapSessionChanSize int) Option { + return func(hsic *HeadscaleInContainer) { + hsic.env["HEADSCALE_TUNING_BATCH_CHANGE_DELAY"] = batchTimeout.String() + hsic.env["HEADSCALE_TUNING_NODE_MAPSESSION_BUFFERED_CHAN_SIZE"] = strconv.Itoa(mapSessionChanSize) + } +} + // New returns a new HeadscaleInContainer instance. func New( pool *dockertest.Pool, @@ -248,9 +286,13 @@ func New( } env := []string{ - "HEADSCALE_PROFILING_ENABLED=1", - "HEADSCALE_PROFILING_PATH=/tmp/profile", + "HEADSCALE_DEBUG_PROFILING_ENABLED=1", + "HEADSCALE_DEBUG_PROFILING_PATH=/tmp/profile", "HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH=/tmp/mapresponses", + "HEADSCALE_DEBUG_DEADLOCK=1", + "HEADSCALE_DEBUG_DEADLOCK_TIMEOUT=5s", + "HEADSCALE_DEBUG_HIGH_CARDINALITY_METRICS=1", + "HEADSCALE_DEBUG_DUMP_CONFIG=1", } for key, value := range hsic.env { env = append(env, fmt.Sprintf("%s=%s", key, value)) @@ -260,7 +302,7 @@ func New( runOptions := &dockertest.RunOptions{ Name: hsic.hostname, - ExposedPorts: append([]string{portProto}, hsic.extraPorts...), + ExposedPorts: append([]string{portProto, "9090/tcp"}, hsic.extraPorts...), Networks: []*dockertest.Network{network}, // Cmd: []string{"headscale", "serve"}, // TODO(kradalby): Get rid of this hack, we currently need to give us some @@ -359,6 +401,14 @@ func (t *HeadscaleInContainer) Shutdown() error { ) } + err = t.SaveMetrics(fmt.Sprintf("/tmp/control/%s_metrics.txt", t.hostname)) + if err != nil { + log.Printf( + "Failed to metrics from control: %s", + err, + ) + } + // Send a interrupt signal to the "headscale" process inside the container // allowing it to shut down gracefully and flush the profile to disk. // The container will live for a bit longer due to the sleep at the end. @@ -411,6 +461,25 @@ func (t *HeadscaleInContainer) SaveLog(path string) error { return dockertestutil.SaveLog(t.pool, t.container, path) } +func (t *HeadscaleInContainer) SaveMetrics(savePath string) error { + resp, err := http.Get(fmt.Sprintf("http://%s:9090/metrics", t.hostname)) + if err != nil { + return fmt.Errorf("getting metrics: %w", err) + } + defer resp.Body.Close() + out, err := os.Create(savePath) + if err != nil { + return fmt.Errorf("creating file for metrics: %w", err) + } + defer out.Close() + _, err = io.Copy(out, resp.Body) + if err != nil { + return fmt.Errorf("copy response to file: %w", err) + } + + return nil +} + func (t *HeadscaleInContainer) SaveProfile(savePath string) error { tarFile, err := t.FetchPath("/tmp/profile") if err != nil { @@ -682,7 +751,7 @@ func createCertificate(hostname string) ([]byte, []byte, error) { Locality: []string{"Leiden"}, }, NotBefore: time.Now(), - NotAfter: time.Now().Add(60 * time.Minute), + NotAfter: time.Now().Add(60 * time.Hour), IsCA: true, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, diff --git a/integration/route_test.go b/integration/route_test.go index 75296fd5..48b6c07f 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -28,7 +28,7 @@ func TestEnablingRoutes(t *testing.T) { user := "enable-routing" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -212,7 +212,11 @@ func TestEnablingRoutes(t *testing.T) { if route.GetId() == routeToBeDisabled.GetId() { assert.Equal(t, false, route.GetEnabled()) - assert.Equal(t, false, route.GetIsPrimary()) + + // since this is the only route of this cidr, + // it will not failover, and remain Primary + // until something can replace it. + assert.Equal(t, true, route.GetIsPrimary()) } else { assert.Equal(t, true, route.GetEnabled()) assert.Equal(t, true, route.GetIsPrimary()) @@ -246,7 +250,7 @@ func TestHASubnetRouterFailover(t *testing.T) { user := "enable-routing" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -291,6 +295,7 @@ func TestHASubnetRouterFailover(t *testing.T) { client := allClients[2] + t.Logf("Advertise route from r1 (%s) and r2 (%s), making it HA, n1 is primary", subRouter1.Hostname(), subRouter2.Hostname()) // advertise HA route on node 1 and 2 // ID 1 will be primary // ID 2 will be secondary @@ -384,12 +389,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is primary assert.Equal(t, true, enablingRoutes[0].GetAdvertised()) assert.Equal(t, true, enablingRoutes[0].GetEnabled()) - assert.Equal(t, true, enablingRoutes[0].GetIsPrimary()) + assert.Equal(t, true, enablingRoutes[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be primary") // Node 2 is not primary assert.Equal(t, true, enablingRoutes[1].GetAdvertised()) assert.Equal(t, true, enablingRoutes[1].GetEnabled()) - assert.Equal(t, false, enablingRoutes[1].GetIsPrimary()) + assert.Equal(t, false, enablingRoutes[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be non-primary") // Verify that the client has routes from the primary machine srs1, err := subRouter1.Status() @@ -401,6 +406,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus := clientStatus.Peer[srs2.Self.PublicKey] + assert.True(t, srs1PeerStatus.Online, "r1 up, r2 up") + assert.True(t, srs2PeerStatus.Online, "r1 up, r2 up") + assertNotNil(t, srs1PeerStatus.PrimaryRoutes) assert.Nil(t, srs2PeerStatus.PrimaryRoutes) @@ -411,7 +419,8 @@ func TestHASubnetRouterFailover(t *testing.T) { ) // Take down the current primary - t.Logf("taking down subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("taking down subnet router r1 (%s)", subRouter1.Hostname()) + t.Logf("expecting r2 (%s) to take over as primary", subRouter2.Hostname()) err = subRouter1.Down() assertNoErr(t, err) @@ -435,15 +444,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is not primary assert.Equal(t, true, routesAfterMove[0].GetAdvertised()) assert.Equal(t, true, routesAfterMove[0].GetEnabled()) - assert.Equal(t, false, routesAfterMove[0].GetIsPrimary()) + assert.Equal(t, false, routesAfterMove[0].GetIsPrimary(), "r1 is down, expected r2 to be primary") // Node 2 is primary assert.Equal(t, true, routesAfterMove[1].GetAdvertised()) assert.Equal(t, true, routesAfterMove[1].GetEnabled()) - assert.Equal(t, true, routesAfterMove[1].GetIsPrimary()) - - // TODO(kradalby): Check client status - // Route is expected to be on SR2 + assert.Equal(t, true, routesAfterMove[1].GetIsPrimary(), "r1 is down, expected r2 to be primary") srs2, err = subRouter2.Status() @@ -453,6 +459,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.False(t, srs1PeerStatus.Online, "r1 down, r2 down") + assert.True(t, srs2PeerStatus.Online, "r1 down, r2 up") + assert.Nil(t, srs1PeerStatus.PrimaryRoutes) assertNotNil(t, srs2PeerStatus.PrimaryRoutes) @@ -465,7 +474,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Take down subnet router 2, leaving none available - t.Logf("taking down subnet router 2 (%s)", subRouter2.Hostname()) + t.Logf("taking down subnet router r2 (%s)", subRouter2.Hostname()) + t.Logf("expecting r2 (%s) to remain primary, no other available", subRouter2.Hostname()) err = subRouter2.Down() assertNoErr(t, err) @@ -489,14 +499,14 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is not primary assert.Equal(t, true, routesAfterBothDown[0].GetAdvertised()) assert.Equal(t, true, routesAfterBothDown[0].GetEnabled()) - assert.Equal(t, false, routesAfterBothDown[0].GetIsPrimary()) + assert.Equal(t, false, routesAfterBothDown[0].GetIsPrimary(), "r1 and r2 is down, expected r2 to _still_ be primary") // Node 2 is primary // if the node goes down, but no other suitable route is // available, keep the last known good route. assert.Equal(t, true, routesAfterBothDown[1].GetAdvertised()) assert.Equal(t, true, routesAfterBothDown[1].GetEnabled()) - assert.Equal(t, true, routesAfterBothDown[1].GetIsPrimary()) + assert.Equal(t, true, routesAfterBothDown[1].GetIsPrimary(), "r1 and r2 is down, expected r2 to _still_ be primary") // TODO(kradalby): Check client status // Both are expected to be down @@ -508,6 +518,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.False(t, srs1PeerStatus.Online, "r1 down, r2 down") + assert.False(t, srs2PeerStatus.Online, "r1 down, r2 down") + assert.Nil(t, srs1PeerStatus.PrimaryRoutes) assertNotNil(t, srs2PeerStatus.PrimaryRoutes) @@ -520,7 +533,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Bring up subnet router 1, making the route available from there. - t.Logf("bringing up subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("bringing up subnet router r1 (%s)", subRouter1.Hostname()) + t.Logf("expecting r1 (%s) to take over as primary (only one online)", subRouter1.Hostname()) err = subRouter1.Up() assertNoErr(t, err) @@ -544,12 +558,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is primary assert.Equal(t, true, routesAfter1Up[0].GetAdvertised()) assert.Equal(t, true, routesAfter1Up[0].GetEnabled()) - assert.Equal(t, true, routesAfter1Up[0].GetIsPrimary()) + assert.Equal(t, true, routesAfter1Up[0].GetIsPrimary(), "r1 is back up, expected r1 to become be primary") // Node 2 is not primary assert.Equal(t, true, routesAfter1Up[1].GetAdvertised()) assert.Equal(t, true, routesAfter1Up[1].GetEnabled()) - assert.Equal(t, false, routesAfter1Up[1].GetIsPrimary()) + assert.Equal(t, false, routesAfter1Up[1].GetIsPrimary(), "r1 is back up, expected r1 to become be primary") // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -558,6 +572,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.True(t, srs1PeerStatus.Online, "r1 is back up, r2 down") + assert.False(t, srs2PeerStatus.Online, "r1 is back up, r2 down") + assert.NotNil(t, srs1PeerStatus.PrimaryRoutes) assert.Nil(t, srs2PeerStatus.PrimaryRoutes) @@ -570,7 +587,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Bring up subnet router 2, should result in no change. - t.Logf("bringing up subnet router 2 (%s)", subRouter2.Hostname()) + t.Logf("bringing up subnet router r2 (%s)", subRouter2.Hostname()) + t.Logf("both online, expecting r1 (%s) to still be primary (no flapping)", subRouter1.Hostname()) err = subRouter2.Up() assertNoErr(t, err) @@ -594,12 +612,12 @@ func TestHASubnetRouterFailover(t *testing.T) { // Node 1 is not primary assert.Equal(t, true, routesAfter2Up[0].GetAdvertised()) assert.Equal(t, true, routesAfter2Up[0].GetEnabled()) - assert.Equal(t, true, routesAfter2Up[0].GetIsPrimary()) + assert.Equal(t, true, routesAfter2Up[0].GetIsPrimary(), "r1 and r2 is back up, expected r1 to _still_ be primary") // Node 2 is primary assert.Equal(t, true, routesAfter2Up[1].GetAdvertised()) assert.Equal(t, true, routesAfter2Up[1].GetEnabled()) - assert.Equal(t, false, routesAfter2Up[1].GetIsPrimary()) + assert.Equal(t, false, routesAfter2Up[1].GetIsPrimary(), "r1 and r2 is back up, expected r1 to _still_ be primary") // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -608,6 +626,9 @@ func TestHASubnetRouterFailover(t *testing.T) { srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey] srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey] + assert.True(t, srs1PeerStatus.Online, "r1 up, r2 up") + assert.True(t, srs2PeerStatus.Online, "r1 up, r2 up") + assert.NotNil(t, srs1PeerStatus.PrimaryRoutes) assert.Nil(t, srs2PeerStatus.PrimaryRoutes) @@ -620,7 +641,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // Disable the route of subnet router 1, making it failover to 2 - t.Logf("disabling route in subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("disabling route in subnet router r1 (%s)", subRouter1.Hostname()) + t.Logf("expecting route to failover to r2 (%s), which is still available", subRouter2.Hostname()) _, err = headscale.Execute( []string{ "headscale", @@ -648,7 +670,7 @@ func TestHASubnetRouterFailover(t *testing.T) { assertNoErr(t, err) assert.Len(t, routesAfterDisabling1, 2) - t.Logf("routes after disabling1 %#v", routesAfterDisabling1) + t.Logf("routes after disabling r1 %#v", routesAfterDisabling1) // Node 1 is not primary assert.Equal(t, true, routesAfterDisabling1[0].GetAdvertised()) @@ -680,6 +702,7 @@ func TestHASubnetRouterFailover(t *testing.T) { // enable the route of subnet router 1, no change expected t.Logf("enabling route in subnet router 1 (%s)", subRouter1.Hostname()) + t.Logf("both online, expecting r2 (%s) to still be primary (no flapping)", subRouter2.Hostname()) _, err = headscale.Execute( []string{ "headscale", @@ -736,7 +759,8 @@ func TestHASubnetRouterFailover(t *testing.T) { } // delete the route of subnet router 2, failover to one expected - t.Logf("deleting route in subnet router 2 (%s)", subRouter2.Hostname()) + t.Logf("deleting route in subnet router r2 (%s)", subRouter2.Hostname()) + t.Logf("expecting route to failover to r1 (%s)", subRouter1.Hostname()) _, err = headscale.Execute( []string{ "headscale", @@ -764,7 +788,7 @@ func TestHASubnetRouterFailover(t *testing.T) { assertNoErr(t, err) assert.Len(t, routesAfterDeleting2, 1) - t.Logf("routes after deleting2 %#v", routesAfterDeleting2) + t.Logf("routes after deleting r2 %#v", routesAfterDeleting2) // Node 1 is primary assert.Equal(t, true, routesAfterDeleting2[0].GetAdvertised()) @@ -798,7 +822,7 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { user := "enable-disable-routing" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() @@ -942,7 +966,7 @@ func TestSubnetRouteACL(t *testing.T) { user := "subnet-route-acl" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErrf(t, "failed to create scenario: %s", err) defer scenario.Shutdown() diff --git a/integration/scenario.go b/integration/scenario.go index a2c63e6f..bd004247 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -8,6 +8,7 @@ import ( "os" "sort" "sync" + "time" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/util" @@ -50,6 +51,11 @@ var ( tailscaleVersions2021 = map[string]bool{ "head": true, "unstable": true, + "1.66": true, // CapVer: not checked + "1.64": true, // CapVer: not checked + "1.62": true, // CapVer: not checked + "1.60": true, // CapVer: not checked + "1.58": true, // CapVer: not checked "1.56": true, // CapVer: 82 "1.54": true, // CapVer: 79 "1.52": true, // CapVer: 79 @@ -139,7 +145,7 @@ type Scenario struct { // NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with // a set of Users and TailscaleClients. -func NewScenario() (*Scenario, error) { +func NewScenario(maxWait time.Duration) (*Scenario, error) { hash, err := util.GenerateRandomStringDNSSafe(scenarioHashLength) if err != nil { return nil, err @@ -150,7 +156,7 @@ func NewScenario() (*Scenario, error) { return nil, fmt.Errorf("could not connect to docker: %w", err) } - pool.MaxWait = dockertestMaxWait() + pool.MaxWait = maxWait networkName := fmt.Sprintf("hs-%s", hash) if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" { @@ -420,8 +426,10 @@ func (s *Scenario) WaitForTailscaleSync() error { if err != nil { for _, user := range s.users { for _, client := range user.Clients { - peers, _ := client.PrettyPeers() - log.Println(peers) + peers, allOnline, _ := client.FailingPeersAsString() + if !allOnline { + log.Println(peers) + } } } } @@ -447,7 +455,7 @@ func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int) error { return nil } -// CreateHeadscaleEnv is a conventient method returning a complete Headcale +// CreateHeadscaleEnv is a convenient method returning a complete Headcale // test environment with nodes of all versions, joined to the server with X // users. func (s *Scenario) CreateHeadscaleEnv( @@ -508,7 +516,7 @@ func (s *Scenario) GetIPs(user string) ([]netip.Addr, error) { return ips, fmt.Errorf("failed to get ips: %w", errNoUserAvailable) } -// GetIPs returns all TailscaleClients associated with a User in a Scenario. +// GetClients returns all TailscaleClients associated with a User in a Scenario. func (s *Scenario) GetClients(user string) ([]TailscaleClient, error) { var clients []TailscaleClient if ns, ok := s.users[user]; ok { @@ -584,7 +592,7 @@ func (s *Scenario) ListTailscaleClientsIPs(users ...string) ([]netip.Addr, error return allIps, nil } -// ListTailscaleClientsIPs returns a list of FQDN based on Users +// ListTailscaleClientsFQDNs returns a list of FQDN based on Users // passed as parameters. func (s *Scenario) ListTailscaleClientsFQDNs(users ...string) ([]string, error) { allFQDNs := make([]string, 0) diff --git a/integration/scenario_test.go b/integration/scenario_test.go index cc9810a4..ea941ed7 100644 --- a/integration/scenario_test.go +++ b/integration/scenario_test.go @@ -33,7 +33,7 @@ func TestHeadscale(t *testing.T) { user := "test-space" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -78,7 +78,7 @@ func TestCreateTailscale(t *testing.T) { user := "only-create-containers" - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() @@ -114,7 +114,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) { count := 1 - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) defer scenario.Shutdown() diff --git a/integration/ssh_test.go b/integration/ssh_test.go index 587190e4..6d053b0d 100644 --- a/integration/ssh_test.go +++ b/integration/ssh_test.go @@ -44,7 +44,7 @@ var retry = func(times int, sleepInterval time.Duration, func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario { t.Helper() - scenario, err := NewScenario() + scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) spec := map[string]int{ diff --git a/integration/tailscale.go b/integration/tailscale.go index 9d6796bd..2ea3faa9 100644 --- a/integration/tailscale.go +++ b/integration/tailscale.go @@ -27,7 +27,7 @@ type TailscaleClient interface { Down() error IPs() ([]netip.Addr, error) FQDN() (string, error) - Status() (*ipnstate.Status, error) + Status(...bool) (*ipnstate.Status, error) Netmap() (*netmap.NetworkMap, error) Netcheck() (*netcheck.Report, error) WaitForNeedsLogin() error @@ -36,5 +36,8 @@ type TailscaleClient interface { Ping(hostnameOrIP string, opts ...tsic.PingOption) error Curl(url string, opts ...tsic.CurlOption) (string, error) ID() string - PrettyPeers() (string, error) + + // FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client + // and a bool indicating if the clients online count and peer count is equal. + FailingPeersAsString() (string, bool, error) } diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index 320ae0d5..0e3c91f8 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -9,6 +9,7 @@ import ( "log" "net/netip" "net/url" + "os" "strconv" "strings" "time" @@ -503,7 +504,7 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) { } // Status returns the ipnstate.Status of the Tailscale instance. -func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) { +func (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) { command := []string{ "tailscale", "status", @@ -521,60 +522,70 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) { return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err) } + err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_status.json", t.hostname), []byte(result), 0o755) + if err != nil { + return nil, fmt.Errorf("status netmap to /tmp/control: %w", err) + } + return &status, err } // Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance. // Only works with Tailscale 1.56 and newer. // Panics if version is lower then minimum. -// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { -// if !util.TailscaleVersionNewerOrEqual("1.56", t.version) { -// panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version)) -// } +func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { + if !util.TailscaleVersionNewerOrEqual("1.56", t.version) { + panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version)) + } -// command := []string{ -// "tailscale", -// "debug", -// "netmap", -// } + command := []string{ + "tailscale", + "debug", + "netmap", + } -// result, stderr, err := t.Execute(command) -// if err != nil { -// fmt.Printf("stderr: %s\n", stderr) -// return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err) -// } + result, stderr, err := t.Execute(command) + if err != nil { + fmt.Printf("stderr: %s\n", stderr) + return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err) + } -// var nm netmap.NetworkMap -// err = json.Unmarshal([]byte(result), &nm) -// if err != nil { -// return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err) -// } + var nm netmap.NetworkMap + err = json.Unmarshal([]byte(result), &nm) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err) + } -// return &nm, err -// } + err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_netmap.json", t.hostname), []byte(result), 0o755) + if err != nil { + return nil, fmt.Errorf("saving netmap to /tmp/control: %w", err) + } + + return &nm, err +} // Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance. // This implementation is based on getting the netmap from `tailscale debug watch-ipn` // as there seem to be some weirdness omitting endpoint and DERP info if we use // Patch updates. // This implementation works on all supported versions. -func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { - // watch-ipn will only give an update if something is happening, - // since we send keep alives, the worst case for this should be - // 1 minute, but set a slightly more conservative time. - ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute) +// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { +// // watch-ipn will only give an update if something is happening, +// // since we send keep alives, the worst case for this should be +// // 1 minute, but set a slightly more conservative time. +// ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute) - notify, err := t.watchIPN(ctx) - if err != nil { - return nil, err - } +// notify, err := t.watchIPN(ctx) +// if err != nil { +// return nil, err +// } - if notify.NetMap == nil { - return nil, fmt.Errorf("no netmap present in ipn.Notify") - } +// if notify.NetMap == nil { +// return nil, fmt.Errorf("no netmap present in ipn.Notify") +// } - return notify.NetMap, nil -} +// return notify.NetMap, nil +// } // watchIPN watches `tailscale debug watch-ipn` for a ipn.Notify object until // it gets one that has a netmap.NetworkMap. @@ -680,15 +691,18 @@ func (t *TailscaleInContainer) FQDN() (string, error) { return status.Self.DNSName, nil } -// PrettyPeers returns a formatted-ish table of peers in the client. -func (t *TailscaleInContainer) PrettyPeers() (string, error) { +// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client +// and a bool indicating if the clients online count and peer count is equal. +func (t *TailscaleInContainer) FailingPeersAsString() (string, bool, error) { status, err := t.Status() if err != nil { - return "", fmt.Errorf("failed to get FQDN: %w", err) + return "", false, fmt.Errorf("failed to get FQDN: %w", err) } - str := fmt.Sprintf("Peers of %s\n", t.hostname) - str += "Hostname\tOnline\tLastSeen\n" + var b strings.Builder + + fmt.Fprintf(&b, "Peers of %s\n", t.hostname) + fmt.Fprint(&b, "Hostname\tOnline\tLastSeen\n") peerCount := len(status.Peers()) onlineCount := 0 @@ -700,12 +714,12 @@ func (t *TailscaleInContainer) PrettyPeers() (string, error) { onlineCount++ } - str += fmt.Sprintf("%s\t%t\t%s\n", peer.HostName, peer.Online, peer.LastSeen) + fmt.Fprintf(&b, "%s\t%t\t%s\n", peer.HostName, peer.Online, peer.LastSeen) } - str += fmt.Sprintf("Peer Count: %d, Online Count: %d\n\n", peerCount, onlineCount) + fmt.Fprintf(&b, "Peer Count: %d, Online Count: %d\n\n", peerCount, onlineCount) - return str, nil + return b.String(), peerCount == onlineCount, nil } // WaitForNeedsLogin blocks until the Tailscale (tailscaled) instance has diff --git a/integration/utils.go b/integration/utils.go index b9e25be6..840dbc4c 100644 --- a/integration/utils.go +++ b/integration/utils.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/tsic" "github.com/stretchr/testify/assert" ) @@ -154,11 +155,11 @@ func assertClientsState(t *testing.T, clients []TailscaleClient) { func assertValidNetmap(t *testing.T, client TailscaleClient) { t.Helper() - // if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) { - // t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version()) + if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) { + t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version()) - // return - // } + return + } t.Logf("Checking netmap of %q", client.Hostname()) @@ -175,7 +176,11 @@ func assertValidNetmap(t *testing.T, client TailscaleClient) { assert.NotEmptyf(t, netmap.SelfNode.AllowedIPs(), "%q does not have any allowed IPs", client.Hostname()) assert.NotEmptyf(t, netmap.SelfNode.Addresses(), "%q does not have any addresses", client.Hostname()) - assert.Truef(t, *netmap.SelfNode.Online(), "%q is not online", client.Hostname()) + if netmap.SelfNode.Online() != nil { + assert.Truef(t, *netmap.SelfNode.Online(), "%q is not online", client.Hostname()) + } else { + t.Errorf("Online should not be nil for %s", client.Hostname()) + } assert.Falsef(t, netmap.SelfNode.Key().IsZero(), "%q does not have a valid NodeKey", client.Hostname()) assert.Falsef(t, netmap.SelfNode.Machine().IsZero(), "%q does not have a valid MachineKey", client.Hostname()) @@ -213,7 +218,7 @@ func assertValidNetmap(t *testing.T, client TailscaleClient) { // This test is not suitable for ACL/partial connection tests. func assertValidStatus(t *testing.T, client TailscaleClient) { t.Helper() - status, err := client.Status() + status, err := client.Status(true) if err != nil { t.Fatalf("getting status for %q: %s", client.Hostname(), err) } @@ -326,7 +331,7 @@ func dockertestMaxWait() time.Duration { // return timeout // } -// pingAllNegativeHelper is intended to have 1 or more nodes timeing out from the ping, +// pingAllNegativeHelper is intended to have 1 or more nodes timing out from the ping, // it counts failures instead of successes. // func pingAllNegativeHelper(t *testing.T, clients []TailscaleClient, addrs []string) int { // t.Helper() diff --git a/proto/headscale/v1/headscale.proto b/proto/headscale/v1/headscale.proto index f8cc596f..1ccc7029 100644 --- a/proto/headscale/v1/headscale.proto +++ b/proto/headscale/v1/headscale.proto @@ -123,6 +123,13 @@ service HeadscaleService { post: "/api/v1/node/{node_id}/user" }; } + + rpc BackfillNodeIPs(BackfillNodeIPsRequest) returns (BackfillNodeIPsResponse) { + option (google.api.http) = { + post: "/api/v1/node/backfillips" + }; + } + // --- Node end --- // --- Route start --- diff --git a/proto/headscale/v1/node.proto b/proto/headscale/v1/node.proto index a9551530..26fe73c7 100644 --- a/proto/headscale/v1/node.proto +++ b/proto/headscale/v1/node.proto @@ -126,3 +126,11 @@ message DebugCreateNodeRequest { message DebugCreateNodeResponse { Node node = 1; } + +message BackfillNodeIPsRequest { + bool confirmed = 1; +} + +message BackfillNodeIPsResponse { + repeated string changes = 1; +}